summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/adp/adp-mipi.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c491
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c262
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c175
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c236
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c313
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c276
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c184
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c123
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c8
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c200
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c117
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c429
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c508
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h181
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h149
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c265
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c192
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c316
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c287
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h13
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h367
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c21
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c13
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c15
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c9
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c72
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c118
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h7
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c121
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c37
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c37
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c57
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c43
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c106
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c41
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c59
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c105
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c60
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c90
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h41
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c23
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c12
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c12
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h4
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c5
-rw-r--r--drivers/gpu/drm/ast/Makefile5
-rw-r--r--drivers/gpu/drm/ast/ast_2000.c149
-rw-r--r--drivers/gpu/drm/ast/ast_2100.c348
-rw-r--r--drivers/gpu/drm/ast/ast_2300.c1328
-rw-r--r--drivers/gpu/drm/ast/ast_2500.c569
-rw-r--r--drivers/gpu/drm/ast/ast_2600.c44
-rw-r--r--drivers/gpu/drm/ast/ast_dram_tables.h207
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h17
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c69
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2027
-rw-r--r--drivers/gpu/drm/ast/ast_post.h50
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h53
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c77
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c57
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c360
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c9
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c9
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c41
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h3
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c9
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c9
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c10
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c72
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c11
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c8
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c10
-rw-r--r--drivers/gpu/drm/bridge/cros-ec-anx7688.c8
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c11
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c7
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c10
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c8
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c27
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c8
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c8
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c11
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c10
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c11
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c14
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c7
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c23
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c3
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c9
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c16
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c8
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c8
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c9
-rw-r--r--drivers/gpu/drm/bridge/panel.c13
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c8
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c8
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c70
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c10
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c8
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c8
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c10
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c23
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c9
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c9
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c8
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c8
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c62
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c8
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c8
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c73
-rw-r--r--drivers/gpu/drm/bridge/ti-tdp158.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c10
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c16
-rw-r--r--drivers/gpu/drm/ci/build-igt.sh2
-rw-r--r--drivers/gpu/drm/ci/build.sh17
-rw-r--r--drivers/gpu/drm/ci/build.yml10
-rw-r--r--drivers/gpu/drm/ci/check-devicetrees.yml50
-rw-r--r--drivers/gpu/drm/ci/container.yml30
-rwxr-xr-xdrivers/gpu/drm/ci/dt-binding-check.sh19
-rwxr-xr-xdrivers/gpu/drm/ci/dtbs-check.sh22
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml56
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh1
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml20
-rwxr-xr-xdrivers/gpu/drm/ci/kunit.sh16
-rw-r--r--drivers/gpu/drm/ci/kunit.yml37
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh6
-rwxr-xr-xdrivers/gpu/drm/ci/setup-llvm-links.sh13
-rw-r--r--drivers/gpu/drm/ci/test.yml47
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt29
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt139
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt350
-rw-r--r--drivers/gpu/drm/clients/drm_client_setup.c7
-rw-r--r--drivers/gpu/drm/display/Kconfig13
-rw-r--r--drivers/gpu/drm/display/Makefile4
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c126
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c1
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c1
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c137
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c3
-rw-r--r--drivers/gpu/drm/display/drm_dsc_helper.c1
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_audio_helper.c4
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_helper.c193
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c65
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_helper.c3
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c129
-rw-r--r--drivers/gpu/drm/display/drm_scdc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c161
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c1
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c1
-rw-r--r--drivers/gpu/drm/drm_auth.c65
-rw-r--r--drivers/gpu/drm/drm_bridge.c49
-rw-r--r--drivers/gpu/drm/drm_bridge_helper.c2
-rw-r--r--drivers/gpu/drm/drm_buddy.c1
-rw-r--r--drivers/gpu/drm/drm_client.c36
-rw-r--r--drivers/gpu/drm/drm_client_event.c1
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c2
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c210
-rw-r--r--drivers/gpu/drm/drm_connector.c44
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c128
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c38
-rw-r--r--drivers/gpu/drm/drm_edid.c252
-rw-r--r--drivers/gpu/drm/drm_exec.c2
-rw-r--r--drivers/gpu/drm/drm_fb_dma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c1
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c1
-rw-r--r--drivers/gpu/drm/drm_fbdev_shmem.c1
-rw-r--r--drivers/gpu/drm/drm_fbdev_ttm.c1
-rw-r--r--drivers/gpu/drm/drm_file.c18
-rw-r--r--drivers/gpu/drm/drm_flip_work.c1
-rw-r--r--drivers/gpu/drm/drm_format_helper.c313
-rw-r--r--drivers/gpu/drm/drm_format_internal.h16
-rw-r--r--drivers/gpu/drm/drm_fourcc.c45
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c27
-rw-r--r--drivers/gpu/drm/drm_gem.c47
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c48
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c61
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c80
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c797
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c133
-rw-r--r--drivers/gpu/drm/drm_internal.h10
-rw-r--r--drivers/gpu/drm/drm_managed.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c9
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c1
-rw-r--r--drivers/gpu/drm/drm_mode_config.c1
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c6
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_pagemap.c838
-rw-r--r--drivers/gpu/drm/drm_panel.c52
-rw-r--r--drivers/gpu/drm/drm_panel_backlight_quirks.c1
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c1
-rw-r--r--drivers/gpu/drm/drm_panic.c1
-rw-r--r--drivers/gpu/drm/drm_pci.c1
-rw-r--r--drivers/gpu/drm/drm_plane.c1
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c58
-rw-r--r--drivers/gpu/drm/drm_print.c1
-rw-r--r--drivers/gpu/drm/drm_privacy_screen.c1
-rw-r--r--drivers/gpu/drm/drm_self_refresh_helper.c1
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c1
-rw-r--r--drivers/gpu/drm/drm_suballoc.c2
-rw-r--r--drivers/gpu/drm/drm_syncobj.c1
-rw-r--r--drivers/gpu/drm/drm_vblank_work.c2
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c1
-rw-r--r--drivers/gpu/drm/drm_writeback.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c5
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c14
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h1
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c9
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm.h4
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c206
-rw-r--r--drivers/gpu/drm/i915/Makefile12
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c1
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c1
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c16
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_display_sr.c2
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c38
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c36
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c11
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi_regs.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c72
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c239
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h53
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c204
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c73
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c101
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c196
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c116
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_regs.h2932
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h46
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c531
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h489
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c79
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c142
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c94
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c203
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c197
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c539
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h80
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c246
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.c472
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c239
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c132
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c154
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.c (renamed from drivers/gpu/drm/i915/display/intel_atomic_plane.c)200
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.h (renamed from drivers/gpu/drm/i915/display/intel_atomic_plane.h)22
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps_regs.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c110
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.c90
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi_regs.h65
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr_regs.h121
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c19
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c39
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c465
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h33
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c32
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c46
-rw-r--r--drivers/gpu/drm/i915/display/vlv_sideband.c50
-rw-r--r--drivers/gpu/drm/i915/display/vlv_sideband.h156
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c142
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c7
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c62
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c40
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c1
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c75
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h67
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c156
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2977
-rw-r--r--drivers/gpu/drm/i915/i915_request.c7
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c10
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c20
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h27
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c6
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c29
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.h15
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.c94
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.h27
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c8
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h5
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c12
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c92
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.h26
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb.c (renamed from drivers/gpu/drm/i915/vlv_sideband.c)178
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb.h37
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb_reg.h (renamed from drivers/gpu/drm/i915/vlv_sideband_reg.h)6
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.h125
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c59
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c9
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h2
-rw-r--r--drivers/gpu/drm/imx/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/Makefile1
-rw-r--r--drivers/gpu/drm/imx/dc/Kconfig13
-rw-r--r--drivers/gpu/drm/imx/dc/Makefile7
-rw-r--r--drivers/gpu/drm/imx/dc/dc-cf.c172
-rw-r--r--drivers/gpu/drm/imx/dc/dc-crtc.c555
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.c177
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.h59
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.c293
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.h102
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ed.c288
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fg.c376
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fl.c185
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.c258
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.h129
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fw.c222
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ic.c282
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.c143
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.h131
-rw-r--r--drivers/gpu/drm/imx/dc/dc-lb.c325
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.c158
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.h101
-rw-r--r--drivers/gpu/drm/imx/dc/dc-plane.c224
-rw-r--r--drivers/gpu/drm/imx/dc/dc-tc.c141
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c5
-rw-r--r--drivers/gpu/drm/lib/drm_random.c1
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c2
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c12
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h3
-rw-r--r--drivers/gpu/drm/lima/lima_trace.h6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c11
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c10
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c10
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c82
-rw-r--r--drivers/gpu/drm/msm/Kconfig36
-rw-r--r--drivers/gpu/drm/msm/Makefile26
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c25
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpummu.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c17
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c17
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c42
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c48
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c32
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c187
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c43
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c104
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h69
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h49
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h494
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h39
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h44
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h38
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h40
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h46
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h49
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h48
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c61
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c71
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h103
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c145
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c224
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c65
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c15
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c41
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c36
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c18
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c12
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c138
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c216
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c1298
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h113
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c607
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h19
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c5
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c156
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c258
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h13
-rw-r--r--drivers/gpu/drm/msm/dp/dp_reg.h19
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c14
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c75
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c79
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c9
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c4
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c97
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c371
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h64
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c51
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c536
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h296
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c65
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c104
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c318
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c1514
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c208
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h144
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h14
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c302
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c59
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h48
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c331
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.h28
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h38
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c62
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c10
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c96
-rw-r--r--drivers/gpu/drm/msm/msm_syncobj.c172
-rw-r--r--drivers/gpu/drm/msm/msm_syncobj.h37
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml3582
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml198
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml383
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml600
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml223
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml1030
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml302
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml14
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c107
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c25
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h3
-rw-r--r--drivers/gpu/drm/panel/Kconfig36
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c10
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c8
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c10
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c11
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83102.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112b.c430
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c153
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c11
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9805.c12
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9806e.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c236
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9882t.c10
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-ej030na.c11
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c11
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c10
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c10
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c12
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c11
-rw-r--r--drivers/gpu/drm/panel/panel-khadas-ts050.c13
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c12
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c11
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c10
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lg-sw43408.c10
-rw-r--r--drivers/gpu/drm/panel/panel-lincolntech-lcd197.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c12
-rw-r--r--drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c11
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c11
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c10
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c11
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3052c.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c12
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c12
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c9
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c10
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c11
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-ota5601a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c12
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c12
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c12
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c10
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67200.c50
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c11
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm692e5.c10
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm69380.c10
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r61307.c325
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r69328.c281
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams581vf01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams639rq08.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-db7430.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d27a1.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c12
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c12
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c11
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c27
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c132
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c11
-rw-r--r--drivers/gpu/drm/panel/panel-summit.c10
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c11
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c11
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c10
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c11
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-r66451.c9
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c257
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm692e5.c10
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c11
-rw-r--r--drivers/gpu/drm/panel/panel-widechips-ws2401.c11
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h15
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c142
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c186
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h66
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h89
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c63
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c9
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c31
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h3
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c160
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.h12
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c49
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h1
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h100
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c7
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h3
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c98
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c3
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c3
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c8
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c8
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c11
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c44
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c3
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c355
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h56
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c3
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c293
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h8
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c16
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c452
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.h349
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c20
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c29
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h33
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c68
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c89
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h103
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c26
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c6
-rw-r--r--drivers/gpu/drm/scheduler/sched_internal.h2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c203
-rw-r--r--drivers/gpu/drm/scheduler/tests/mock_scheduler.c109
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h3
-rw-r--r--drivers/gpu/drm/scheduler/tests/tests_basic.c93
-rw-r--r--drivers/gpu/drm/sitronix/Kconfig10
-rw-r--r--drivers/gpu/drm/sitronix/st7571-i2c.c95
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c29
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c27
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c26
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h2
-rw-r--r--drivers/gpu/drm/stm/lvds.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c168
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h31
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c27
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.c6
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h4
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_modeset.c138
-rw-r--r--drivers/gpu/drm/sysfb/efidrm.c6
-rw-r--r--drivers/gpu/drm/sysfb/ofdrm.c85
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c5
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c106
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/fb.c7
-rw-r--r--drivers/gpu/drm/tegra/fbdev.c4
-rw-r--r--drivers/gpu/drm/tegra/gem.c4
-rw-r--r--drivers/gpu/drm/tests/Makefile3
-rw-r--r--drivers/gpu/drm/tests/drm_bridge_test.c176
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c266
-rw-r--r--drivers/gpu/drm/tests/drm_framebuffer_test.c1
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c651
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_edid.h374
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c1
-rw-r--r--drivers/gpu/drm/tests/drm_sysfb_modeset_test.c168
-rw-r--r--drivers/gpu/drm/tidss/Makefile3
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c287
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h20
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h29
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c10
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h5
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c10
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.c598
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.h43
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c2
-rw-r--r--drivers/gpu/drm/tiny/bochs.c19
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c60
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c3
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_internal.h58
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c232
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c24
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c18
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c3
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c30
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c6
-rw-r--r--drivers/gpu/drm/vkms/tests/Makefile6
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_format_test.c280
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c2
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c28
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h39
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c467
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h9
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c510
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c61
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/gpu/drm/xe/Kconfig14
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug11
-rw-r--r--drivers/gpu/drm/xe/Makefile20
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h35
-rw-r--r--drivers/gpu/drm/xe/abi/guc_errors_abi.h14
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h28
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h31
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h12
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h42
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb_reg.h (renamed from drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h)2
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h132
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c91
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c7
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c101
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h4
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.c4
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c2
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c61
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c2
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c13
-rw-r--r--drivers/gpu/drm/xe/display/xe_tdf.c4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_i2c_regs.h20
-rw-r--r--drivers/gpu/drm/xe/regs/xe_irq_regs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h7
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pcode_regs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pmt.h7
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c6
-rw-r--r--drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c232
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c52
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c90
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c28
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h12
-rw-r--r--drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c227
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c2
-rw-r--r--drivers/gpu/drm/xe/xe_bb.h2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c121
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h20
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c4
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c160
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h11
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c62
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c12
-rw-r--r--drivers/gpu/drm/xe/xe_device.c73
-rw-r--r--drivers/gpu/drm/xe/xe_device.h55
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c143
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h85
-rw-r--r--drivers/gpu/drm/xe/xe_device_wa_oob.rules2
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c2
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c6
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c2
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c45
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c253
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h24
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c266
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c96
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c47
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c106
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c131
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c168
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c318
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h33
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c58
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c49
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c166
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c34
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c394
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h15
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_exec_queue_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c10
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c64
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c5
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c40
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c19
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence.c5
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c230
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.c329
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.h62
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c9
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c60
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.h1
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c316
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_map.h18
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c30
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c24
-rw-r--r--drivers/gpu/drm/xe/xe_module.c40
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.c167
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.h15
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c224
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c44
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c78
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h41
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c30
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h12
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h15
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c9
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.c7
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c135
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c147
-rw-r--r--drivers/gpu/drm/xe/xe_query.c33
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c47
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h14
-rw-r--r--drivers/gpu/drm/xe/xe_rtp_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c17
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.c45
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.h4
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.c61
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.h6
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service.c216
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service.h23
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_types.h45
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c163
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_types.h41
-rw-r--r--drivers/gpu/drm/xe/xe_step.c2
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c19
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c403
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h136
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c13
-rw-r--r--drivers/gpu/drm/xe/xe_tile.h17
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.c254
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.h18
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.h4
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c5
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c78
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h5
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c58
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c385
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h24
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h26
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.c4
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.h4
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c120
-rw-r--r--drivers/gpu/drm/xe/xe_wa.h22
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules12
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c3
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c34
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c3
1347 files changed, 52203 insertions, 29176 deletions
diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug
index fa6ee76f4d3c..05dc43c0b8c5 100644
--- a/drivers/gpu/drm/Kconfig.debug
+++ b/drivers/gpu/drm/Kconfig.debug
@@ -70,6 +70,7 @@ config DRM_KUNIT_TEST
select DRM_GEM_SHMEM_HELPER
select DRM_KUNIT_TEST_HELPERS
select DRM_LIB_RANDOM
+ select DRM_SYSFB_HELPER
select PRIME_NUMBERS
default KUNIT_ALL_TESTS
help
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 5050ac32bba2..4dafbdc8f86a 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -104,7 +104,11 @@ obj-$(CONFIG_DRM_PANEL_BACKLIGHT_QUIRKS) += drm_panel_backlight_quirks.o
#
obj-$(CONFIG_DRM_EXEC) += drm_exec.o
obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o
-obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm.o
+
+drm_gpusvm_helper-y := \
+ drm_gpusvm.o\
+ drm_pagemap.o
+obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c
index 2b60128e2c69..cba7d32150a9 100644
--- a/drivers/gpu/drm/adp/adp-mipi.c
+++ b/drivers/gpu/drm/adp/adp-mipi.c
@@ -229,9 +229,10 @@ static int adp_mipi_probe(struct platform_device *pdev)
{
struct adp_mipi_drv_private *adp;
- adp = devm_kzalloc(&pdev->dev, sizeof(*adp), GFP_KERNEL);
- if (!adp)
- return -ENOMEM;
+ adp = devm_drm_bridge_alloc(&pdev->dev, struct adp_mipi_drv_private,
+ bridge, &adp_dsi_bridge_funcs);
+ if (IS_ERR(adp))
+ return PTR_ERR(adp);
adp->mipi = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adp->mipi)) {
@@ -241,7 +242,6 @@ static int adp_mipi_probe(struct platform_device *pdev)
adp->dsi.dev = &pdev->dev;
adp->dsi.ops = &adp_dsi_host_ops;
- adp->bridge.funcs = &adp_dsi_bridge_funcs;
adp->bridge.of_node = pdev->dev.of_node;
adp->bridge.type = DRM_MODE_CONNECTOR_DSI;
dev_set_drvdata(&pdev->dev, adp);
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 87080c06e5fc..930de203d533 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -66,7 +66,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o \
- amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o
+ amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o amdgpu_ip.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index e13fbd974141..9569dc16dd3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -71,18 +71,29 @@ aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
return NULL;
}
+static inline uint32_t aldebaran_get_ip_block_mask(struct amdgpu_device *adev)
+{
+ uint32_t ip_block_mask = BIT(AMD_IP_BLOCK_TYPE_GFX) |
+ BIT(AMD_IP_BLOCK_TYPE_SDMA);
+
+ if (adev->aid_mask)
+ ip_block_mask |= BIT(AMD_IP_BLOCK_TYPE_IH);
+
+ return ip_block_mask;
+}
+
static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
{
+ uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev);
+ uint32_t ip_block;
int r, i;
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!(adev->ip_blocks[i].version->type ==
- AMD_IP_BLOCK_TYPE_GFX ||
- adev->ip_blocks[i].version->type ==
- AMD_IP_BLOCK_TYPE_SDMA))
+ ip_block = BIT(adev->ip_blocks[i].version->type);
+ if (!(ip_block_mask & ip_block))
continue;
r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
@@ -200,8 +211,10 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
{
struct amdgpu_firmware_info *ucode_list[AMDGPU_UCODE_ID_MAXIMUM];
+ uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev);
struct amdgpu_firmware_info *ucode;
struct amdgpu_ip_block *cmn_block;
+ struct amdgpu_ip_block *ih_block;
int ucode_count = 0;
int i, r;
@@ -243,6 +256,18 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
if (r)
return r;
+ if (ip_block_mask & BIT(AMD_IP_BLOCK_TYPE_IH)) {
+ ih_block = amdgpu_device_ip_get_ip_block(adev,
+ AMD_IP_BLOCK_TYPE_IH);
+ if (unlikely(!ih_block)) {
+ dev_err(adev->dev, "Failed to get IH handle\n");
+ return -EINVAL;
+ }
+ r = amdgpu_ip_block_resume(ih_block);
+ if (r)
+ return r;
+ }
+
/* Reinit GFXHUB */
adev->gfxhub.funcs->init(adev);
r = adev->gfxhub.funcs->gart_enable(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a5ccd0ada16a..a1737556a77e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -470,9 +470,6 @@ struct amdgpu_sa_manager {
void *cpu_ptr;
};
-int amdgpu_fence_slab_init(void);
-void amdgpu_fence_slab_fini(void);
-
/*
* IRQS.
*/
@@ -1282,6 +1279,7 @@ struct amdgpu_device {
bool debug_exp_resets;
bool debug_disable_gpu_ring_reset;
bool debug_vm_userptr;
+ bool debug_disable_ce_logs;
/* Protection for the following isolation structure */
struct mutex enforce_isolation_mutex;
@@ -1336,6 +1334,11 @@ static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
return container_of(bdev, struct amdgpu_device, mman.bdev);
}
+static inline bool amdgpu_is_multi_aid(struct amdgpu_device *adev)
+{
+ return !!adev->aid_mask;
+}
+
int amdgpu_device_init(struct amdgpu_device *adev,
uint32_t flags);
void amdgpu_device_fini_hw(struct amdgpu_device *adev);
@@ -1387,7 +1390,8 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
u64 reg_addr, u64 reg_data);
u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
-bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
+bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
+ enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
@@ -1558,16 +1562,16 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
int amdgpu_device_link_reset(struct amdgpu_device *adev);
-bool amdgpu_device_supports_atpx(struct drm_device *dev);
-bool amdgpu_device_supports_px(struct drm_device *dev);
-bool amdgpu_device_supports_boco(struct drm_device *dev);
-bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
-int amdgpu_device_supports_baco(struct drm_device *dev);
+bool amdgpu_device_supports_atpx(struct amdgpu_device *adev);
+bool amdgpu_device_supports_px(struct amdgpu_device *adev);
+bool amdgpu_device_supports_boco(struct amdgpu_device *adev);
+bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev);
+int amdgpu_device_supports_baco(struct amdgpu_device *adev);
void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
-int amdgpu_device_baco_enter(struct drm_device *dev);
-int amdgpu_device_baco_exit(struct drm_device *dev);
+int amdgpu_device_baco_enter(struct amdgpu_device *adev);
+int amdgpu_device_baco_exit(struct amdgpu_device *adev);
void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
@@ -1619,6 +1623,7 @@ void amdgpu_driver_release_kms(struct drm_device *dev);
int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_prepare(struct drm_device *dev);
+void amdgpu_device_complete(struct drm_device *dev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
@@ -1669,7 +1674,8 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise);
int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state);
-int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
+int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
u64 *tmr_size);
@@ -1700,8 +1706,11 @@ static inline void amdgpu_acpi_release(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state) { return 0; }
-static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
- enum amdgpu_ss ss_state) { return 0; }
+static inline int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state)
+{
+ return 0;
+}
static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { }
#endif
@@ -1714,7 +1723,7 @@ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return
#endif
#if defined(CONFIG_DRM_AMD_ISP)
-int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN]);
+int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev);
#endif
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
@@ -1760,4 +1769,19 @@ extern const struct attribute_group amdgpu_flash_attr_group;
void amdgpu_set_init_level(struct amdgpu_device *adev,
enum amdgpu_init_lvl_id lvl);
+
+static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev)
+{
+ u32 status;
+ int r;
+
+ r = pci_read_config_dword(adev->pdev, PCI_COMMAND, &status);
+ if (r || PCI_POSSIBLE_ERROR(status)) {
+ dev_err(adev->dev, "device lost from bus!");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index 3835f2592914..cbc40cad581b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -115,6 +115,11 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st
u64 event_id = qctx ? qctx->evid.event_id : RAS_EVENT_INVALID_ID;
int i;
+ if (adev->debug_disable_ce_logs &&
+ bank->smu_err_type == ACA_SMU_TYPE_CE &&
+ !ACA_BANK_ERR_IS_DEFFERED(bank))
+ return;
+
RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
/* plus 1 for output format, e.g: ACA[08/08]: xxxx */
for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index f5466c592d94..6c62e27b9800 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -811,18 +811,18 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
/**
* amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
* @ss_state: current smart shift event
*
* returns 0 on success,
* otherwise return error number.
*/
-int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state)
+int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
int r;
- if (!amdgpu_device_supports_smart_shift(dev))
+ if (!amdgpu_device_supports_smart_shift(adev))
return 0;
switch (ss_state) {
@@ -1545,7 +1545,7 @@ static int isp_match_acpi_device_ids(struct device *dev, const void *data)
return acpi_match_device(data, dev) ? 1 : 0;
}
-int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN])
+int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev)
{
struct device *pdev __free(put_device) = NULL;
struct acpi_device *acpi_pdev;
@@ -1559,7 +1559,7 @@ int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN])
if (!acpi_pdev)
return -ENODEV;
- strscpy(*hid, acpi_device_hid(acpi_pdev));
+ *dev = acpi_pdev;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index d8ac4b1051a8..fbe7616555c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -248,18 +248,34 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
}
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
{
if (adev->kfd.dev)
- kgd2kfd_suspend(adev->kfd.dev, run_pm);
+ kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
}
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
{
int r = 0;
if (adev->kfd.dev)
- r = kgd2kfd_resume(adev->kfd.dev, run_pm);
+ r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+
+ return r;
+}
+
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev)
+{
+ if (adev->kfd.dev)
+ kgd2kfd_suspend_process(adev->kfd.dev);
+}
+
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->kfd.dev)
+ r = kgd2kfd_resume_process(adev->kfd.dev);
return r;
}
@@ -642,7 +658,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
goto err;
}
- ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
+ ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job, 0);
if (ret)
goto err;
@@ -749,12 +765,12 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
{
- return kgd2kfd_check_and_lock_kfd();
+ return kgd2kfd_check_and_lock_kfd(adev->kfd.dev);
}
void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev)
{
- kgd2kfd_unlock_kfd();
+ kgd2kfd_unlock_kfd(adev->kfd.dev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index b6ca41859b53..33eb4826b58b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -154,8 +154,10 @@ struct amdkfd_process_info {
int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc);
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc);
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev);
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
@@ -411,16 +413,18 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc);
+int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc);
+void kgd2kfd_suspend_process(struct kfd_dev *kfd);
+int kgd2kfd_resume_process(struct kfd_dev *kfd);
int kgd2kfd_pre_reset(struct kfd_dev *kfd,
struct amdgpu_reset_context *reset_context);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
-int kgd2kfd_check_and_lock_kfd(void);
-void kgd2kfd_unlock_kfd(void);
+int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
+void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
@@ -454,11 +458,20 @@ static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
}
-static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
{
}
-static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
+{
+ return 0;
+}
+
+static inline void kgd2kfd_suspend_process(struct kfd_dev *kfd)
+{
+}
+
+static inline int kgd2kfd_resume_process(struct kfd_dev *kfd)
{
return 0;
}
@@ -489,12 +502,12 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
{
}
-static inline int kgd2kfd_check_and_lock_kfd(void)
+static inline int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
{
return 0;
}
-static inline void kgd2kfd_unlock_kfd(void)
+static inline void kgd2kfd_unlock_kfd(struct kfd_dev *kfd)
{
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index ffbaa8bc5eea..1105a09e55dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -320,7 +320,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai
if (!down_read_trylock(&adev->reset_domain->sem))
return;
- amdgpu_amdkfd_suspend(adev, false);
+ amdgpu_amdkfd_suspend(adev, true);
if (suspend_resume_compute_scheduler(adev, true))
goto out;
@@ -333,7 +333,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai
out:
suspend_resume_compute_scheduler(adev, false);
- amdgpu_amdkfd_resume(adev, false);
+ amdgpu_amdkfd_resume(adev, true);
up_read(&adev->reset_domain->sem);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index 5a234eadae8b..15dde1f50328 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -212,7 +212,7 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
amdgpu_cper_entry_fill_section_desc(adev, section_desc, true, false,
- CPER_SEV_NUM, RUNTIME, NONSTD_SEC_LEN,
+ CPER_SEV_FATAL, RUNTIME, NONSTD_SEC_LEN,
NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
section->hdr.valid_bits.err_info_cnt = 1;
@@ -326,7 +326,9 @@ int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev)
return -ENOMEM;
}
- amdgpu_cper_entry_fill_hdr(adev, bp_threshold, AMDGPU_CPER_TYPE_BP_THRESHOLD, CPER_SEV_NUM);
+ amdgpu_cper_entry_fill_hdr(adev, bp_threshold,
+ AMDGPU_CPER_TYPE_BP_THRESHOLD,
+ CPER_SEV_FATAL);
ret = amdgpu_cper_entry_fill_bad_page_threshold_section(adev, bp_threshold, 0);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9ea0d9b71f48..a2adaacf6adb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -293,7 +293,8 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
for (i = 0; i < p->gang_size; ++i) {
ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
- num_ibs[i], &p->jobs[i]);
+ num_ibs[i], &p->jobs[i],
+ p->filp->client_id);
if (ret)
goto free_all_kdata;
switch (p->adev->enforce_isolation[fpriv->xcp_id]) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index f81608330a3d..0e6e2e2acf5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1786,7 +1786,7 @@ static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
ti = amdgpu_vm_get_task_info_vm(vm);
if (ti) {
- seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->pid, ti->process_name);
+ seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->task.pid, ti->process_name);
amdgpu_vm_put_task_info(ti);
}
@@ -2131,6 +2131,55 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
return 0;
}
+static int amdgpu_pt_info_read(struct seq_file *m, void *unused)
+{
+ struct drm_file *file;
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_bo *root_bo;
+ int r;
+
+ file = m->private;
+ if (!file)
+ return -EINVAL;
+
+ fpriv = file->driver_priv;
+ if (!fpriv || !fpriv->vm.root.bo)
+ return -ENODEV;
+
+ root_bo = amdgpu_bo_ref(fpriv->vm.root.bo);
+ r = amdgpu_bo_reserve(root_bo, true);
+ if (r) {
+ amdgpu_bo_unref(&root_bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(fpriv->vm.root.bo));
+
+ amdgpu_bo_unreserve(root_bo);
+ amdgpu_bo_unref(&root_bo);
+
+ return 0;
+}
+
+static int amdgpu_pt_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_pt_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_pt_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_pt_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void amdgpu_debugfs_vm_init(struct drm_file *file)
+{
+ debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file,
+ &amdgpu_pt_info_fops);
+}
+
#else
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
@@ -2140,4 +2189,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
return 0;
}
+void amdgpu_debugfs_vm_init(struct drm_file *file)
+{
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index 0425432d8659..e7b3c38e5186 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -33,4 +33,5 @@ void amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_vm_init(struct drm_file *file);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index 7b50741dc097..8a026bc9ea44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -220,10 +220,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
coredump->reset_time.tv_nsec);
- if (coredump->reset_task_info.pid)
+ if (coredump->reset_task_info.task.pid)
drm_printf(&p, "process_name: %s PID: %d\n",
coredump->reset_task_info.process_name,
- coredump->reset_task_info.pid);
+ coredump->reset_task_info.task.pid);
/* SOC Information */
drm_printf(&p, "\nSOC Information\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index aa32df7e2fb2..6f93473436be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -232,7 +232,7 @@ static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
{
int ret = 0;
- if (!amdgpu_sriov_vf(adev))
+ if (amdgpu_nbio_is_replay_cnt_supported(adev))
ret = sysfs_create_file(&adev->dev->kobj,
&dev_attr_pcie_replay_count.attr);
@@ -241,7 +241,7 @@ static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
{
- if (!amdgpu_sriov_vf(adev))
+ if (amdgpu_nbio_is_replay_cnt_supported(adev))
sysfs_remove_file(&adev->dev->kobj,
&dev_attr_pcie_replay_count.attr);
}
@@ -411,19 +411,16 @@ static const struct attribute_group amdgpu_board_attrs_group = {
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
-
/**
* amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with ATPX power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_px(struct drm_device *dev)
+bool amdgpu_device_supports_px(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
return true;
return false;
@@ -432,15 +429,13 @@ bool amdgpu_device_supports_px(struct drm_device *dev)
/**
* amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with ACPI power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_boco(struct drm_device *dev)
+bool amdgpu_device_supports_boco(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
return false;
@@ -453,29 +448,24 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
/**
* amdgpu_device_supports_baco - Does the device support BACO
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Return:
* 1 if the device supports BACO;
* 3 if the device supports MACO (only works if BACO is supported)
* otherwise return 0.
*/
-int amdgpu_device_supports_baco(struct drm_device *dev)
+int amdgpu_device_supports_baco(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
return amdgpu_asic_supports_baco(adev);
}
void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
{
- struct drm_device *dev;
int bamaco_support;
- dev = adev_to_drm(adev);
-
adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
- bamaco_support = amdgpu_device_supports_baco(dev);
+ bamaco_support = amdgpu_device_supports_baco(adev);
switch (amdgpu_runtime_pm) {
case 2:
@@ -495,10 +485,12 @@ void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
break;
case -1:
case -2:
- if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */
+ if (amdgpu_device_supports_px(adev)) {
+ /* enable PX as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
dev_info(adev->dev, "Using ATPX for runtime pm\n");
- } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */
+ } else if (amdgpu_device_supports_boco(adev)) {
+ /* enable boco as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
dev_info(adev->dev, "Using BOCO for runtime pm\n");
} else {
@@ -547,14 +539,14 @@ no_runtime_pm:
* amdgpu_device_supports_smart_shift - Is the device dGPU with
* smart shift support
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with Smart Shift support,
* otherwise returns false.
*/
-bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
+bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev)
{
- return (amdgpu_device_supports_boco(dev) &&
+ return (amdgpu_device_supports_boco(adev) &&
amdgpu_acpi_is_power_shift_control_supported());
}
@@ -1288,14 +1280,14 @@ u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
*/
static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%04X\n", reg);
BUG();
return 0;
}
static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
BUG();
return 0;
}
@@ -1312,15 +1304,17 @@ static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg
*/
static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
- DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write register 0x%04X with 0x%08X\n", reg,
+ v);
BUG();
}
static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
{
- DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write register 0x%llX with 0x%08X\n", reg,
+ v);
BUG();
}
@@ -1336,14 +1330,15 @@ static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, ui
*/
static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
+ dev_err(adev->dev, "Invalid callback to read 64 bit register 0x%04X\n",
+ reg);
BUG();
return 0;
}
static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
BUG();
return 0;
}
@@ -1360,15 +1355,17 @@ static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t r
*/
static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
{
- DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
+ reg, v);
BUG();
}
static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
{
- DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
+ reg, v);
BUG();
}
@@ -1386,8 +1383,9 @@ static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg,
static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
uint32_t block, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
- reg, block);
+ dev_err(adev->dev,
+ "Invalid callback to read register 0x%04X in block 0x%04X\n",
+ reg, block);
BUG();
return 0;
}
@@ -1407,8 +1405,9 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
uint32_t block,
uint32_t reg, uint32_t v)
{
- DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
- reg, block, v);
+ dev_err(adev->dev,
+ "Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
+ reg, block, v);
BUG();
}
@@ -1694,7 +1693,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
- DRM_WARN("System can't access extended configuration space, please check!!\n");
+ dev_warn(
+ adev->dev,
+ "System can't access extended configuration space, please check!!\n");
/* skip if the bios has already enabled large BAR */
if (adev->gmc.real_vram_size &&
@@ -1734,9 +1735,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
r = pci_resize_resource(adev->pdev, 0, rbar_size);
if (r == -ENOSPC)
- DRM_INFO("Not enough PCI address space for a large BAR.");
+ dev_info(adev->dev,
+ "Not enough PCI address space for a large BAR.");
else if (r && r != -ENOTSUPP)
- DRM_ERROR("Problem resizing BAR0 (%d).", r);
+ dev_err(adev->dev, "Problem resizing BAR0 (%d).", r);
pci_assign_unassigned_bus_resources(adev->pdev->bus);
@@ -1838,8 +1840,8 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
case 0:
return false;
default:
- DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
- amdgpu_seamless);
+ dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n",
+ amdgpu_seamless);
return false;
}
@@ -2015,7 +2017,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
return;
if (!is_os_64) {
- DRM_WARN("Not 64-bit OS, feature not supported\n");
+ dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n");
goto def_value;
}
si_meminfo(&si);
@@ -2030,7 +2032,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
if (total_memory < dram_size_seven_GB)
goto def_value1;
} else {
- DRM_WARN("Smu memory pool size not supported\n");
+ dev_warn(adev->dev, "Smu memory pool size not supported\n");
goto def_value;
}
adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
@@ -2038,7 +2040,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
return;
def_value1:
- DRM_WARN("No enough system memory\n");
+ dev_warn(adev->dev, "No enough system memory\n");
def_value:
adev->pm.smu_prv_buffer_size = 0;
}
@@ -2190,7 +2192,8 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
struct drm_device *dev = pci_get_drvdata(pdev);
int r;
- if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
+ if (amdgpu_device_supports_px(drm_to_adev(dev)) &&
+ state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
@@ -2202,12 +2205,13 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
amdgpu_device_load_pci_state(pdev);
r = pci_enable_device(pdev);
if (r)
- DRM_WARN("pci_enable_device failed (%d)\n", r);
+ dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n",
+ r);
amdgpu_device_resume(dev, true);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
- pr_info("switched off\n");
+ dev_info(&pdev->dev, "switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_prepare(dev);
amdgpu_device_suspend(dev, true);
@@ -2274,8 +2278,9 @@ int amdgpu_device_ip_set_clockgating_state(void *dev,
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
&adev->ip_blocks[i], state);
if (r)
- DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_clockgating_state of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
@@ -2308,8 +2313,9 @@ int amdgpu_device_ip_set_powergating_state(void *dev,
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
&adev->ip_blocks[i], state);
if (r)
- DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_powergating_state of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
@@ -2525,9 +2531,11 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
}
}
- DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
- amdgpu_virtual_display, pci_address_name,
- adev->enable_virtual_display, adev->mode_info.num_crtc);
+ dev_info(
+ adev->dev,
+ "virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
+ amdgpu_virtual_display, pci_address_name,
+ adev->enable_virtual_display, adev->mode_info.num_crtc);
kfree(pciaddstr);
}
@@ -2538,8 +2546,9 @@ void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
- DRM_INFO("virtual_display:%d, num_crtc:%d\n",
- adev->enable_virtual_display, adev->mode_info.num_crtc);
+ dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n",
+ adev->enable_virtual_display,
+ adev->mode_info.num_crtc);
}
}
@@ -2773,21 +2782,29 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
+ adev->virt.is_xgmi_node_migrate_enabled = false;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->virt.is_xgmi_node_migrate_enabled =
+ amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4);
+ }
+
total = true;
for (i = 0; i < adev->num_ip_blocks; i++) {
ip_block = &adev->ip_blocks[i];
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
- DRM_WARN("disabled ip block: %d <%s>\n",
- i, adev->ip_blocks[i].version->funcs->name);
+ dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i,
+ adev->ip_blocks[i].version->funcs->name);
adev->ip_blocks[i].status.valid = false;
} else if (ip_block->version->funcs->early_init) {
r = ip_block->version->funcs->early_init(ip_block);
if (r == -ENOENT) {
adev->ip_blocks[i].status.valid = false;
} else if (r) {
- DRM_ERROR("early_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "early_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
total = false;
} else {
adev->ip_blocks[i].status.valid = true;
@@ -2868,8 +2885,10 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2893,8 +2912,9 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
continue;
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2932,8 +2952,11 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
} else {
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i]
+ .version->funcs->name,
+ r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2988,25 +3011,29 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
r = drm_sched_init(&ring->sched, &args);
if (r) {
- DRM_ERROR("Failed to create scheduler on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create scheduler on ring %s.\n",
+ ring->name);
return r;
}
r = amdgpu_uvd_entity_init(adev, ring);
if (r) {
- DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create UVD scheduling entity on ring %s.\n",
+ ring->name);
return r;
}
r = amdgpu_vce_entity_init(adev, ring);
if (r) {
- DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create VCE scheduling entity on ring %s.\n",
+ ring->name);
return r;
}
}
- amdgpu_xcp_update_partition_sched_list(adev);
+ if (adev->xcp_mgr)
+ amdgpu_xcp_update_partition_sched_list(adev);
return 0;
}
@@ -3038,8 +3065,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->sw_init) {
r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("sw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "sw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
goto init_failed;
}
}
@@ -3053,7 +3082,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
/* need to do common hw init early so everything is set up for gmc */
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init %d failed %d\n", i, r);
+ dev_err(adev->dev, "hw_init %d failed %d\n", i,
+ r);
goto init_failed;
}
adev->ip_blocks[i].status.hw = true;
@@ -3065,17 +3095,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
r = amdgpu_device_mem_scratch_init(adev);
if (r) {
- DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
+ dev_err(adev->dev,
+ "amdgpu_mem_scratch_init failed %d\n",
+ r);
goto init_failed;
}
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init %d failed %d\n", i, r);
+ dev_err(adev->dev, "hw_init %d failed %d\n", i,
+ r);
goto init_failed;
}
r = amdgpu_device_wb_init(adev);
if (r) {
- DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
+ dev_err(adev->dev,
+ "amdgpu_device_wb_init failed %d\n", r);
goto init_failed;
}
adev->ip_blocks[i].status.hw = true;
@@ -3087,14 +3121,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_CSA_SIZE);
if (r) {
- DRM_ERROR("allocate CSA failed %d\n", r);
+ dev_err(adev->dev,
+ "allocate CSA failed %d\n", r);
goto init_failed;
}
}
r = amdgpu_seq64_init(adev);
if (r) {
- DRM_ERROR("allocate seq64 failed %d\n", r);
+ dev_err(adev->dev, "allocate seq64 failed %d\n",
+ r);
goto init_failed;
}
}
@@ -3284,8 +3320,10 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
state);
if (r) {
- DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_clockgating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3321,8 +3359,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
state);
if (r) {
- DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_powergating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3388,8 +3428,10 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->late_init) {
r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("late_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "late_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3398,7 +3440,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_ras_late_init(adev);
if (r) {
- DRM_ERROR("amdgpu_ras_late_init failed %d", r);
+ dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r);
return r;
}
@@ -3412,7 +3454,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_device_enable_mgpu_fan_boost();
if (r)
- DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+ dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r);
/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
if (amdgpu_passthrough(adev) &&
@@ -3445,7 +3487,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
AMDGPU_XGMI_PSTATE_MIN);
if (r) {
- DRM_ERROR("pstate setting failed (%d).\n", r);
+ dev_err(adev->dev,
+ "pstate setting failed (%d).\n",
+ r);
break;
}
}
@@ -3459,17 +3503,19 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block)
{
+ struct amdgpu_device *adev = ip_block->adev;
int r;
if (!ip_block->version->funcs->hw_fini) {
- DRM_ERROR("hw_fini of IP block <%s> not defined\n",
- ip_block->version->funcs->name);
+ dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n",
+ ip_block->version->funcs->name);
} else {
r = ip_block->version->funcs->hw_fini(ip_block);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- ip_block->version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "hw_fini of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
}
}
@@ -3510,15 +3556,16 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
if (r) {
- DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "early_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
- amdgpu_amdkfd_suspend(adev, false);
+ amdgpu_amdkfd_suspend(adev, true);
amdgpu_userq_suspend(adev);
/* Workaround for ASICs need to disable SMC first */
@@ -3533,7 +3580,8 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev)) {
if (amdgpu_virt_release_full_gpu(adev, false))
- DRM_ERROR("failed to release exclusive mode on fini\n");
+ dev_err(adev->dev,
+ "failed to release exclusive mode on fini\n");
}
return 0;
@@ -3581,8 +3629,10 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "sw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
}
}
adev->ip_blocks[i].status.sw = false;
@@ -3615,7 +3665,7 @@ static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
r = amdgpu_ib_ring_tests(adev);
if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
+ dev_err(adev->dev, "ib ring test failed (%d).\n", r);
}
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@@ -3756,8 +3806,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
if (r) {
- DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
- adev->mp1_state, r);
+ dev_err(adev->dev,
+ "SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
return r;
}
}
@@ -4041,12 +4092,14 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
/**
* amdgpu_device_asic_has_dc_support - determine if DC supports the asic
*
+ * @pdev : pci device context
* @asic_type: AMD asic type
*
* Check if there is DC (new modesetting infrastructre) support for an asic.
* returns true if DC has support, false if not.
*/
-bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
+ enum amd_asic_type asic_type)
{
switch (asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
@@ -4089,7 +4142,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#else
default:
if (amdgpu_dc > 0)
- DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
+ dev_info_once(
+ &pdev->dev,
+ "Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
return false;
#endif
}
@@ -4108,7 +4163,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
(adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
return false;
- return amdgpu_device_asic_has_dc_support(adev->asic_type);
+ return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type);
}
static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
@@ -4130,13 +4185,13 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
task_barrier_enter(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
+ adev->asic_reset_res = amdgpu_device_baco_enter(adev);
if (adev->asic_reset_res)
goto fail;
task_barrier_exit(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
+ adev->asic_reset_res = amdgpu_device_baco_exit(adev);
if (adev->asic_reset_res)
goto fail;
@@ -4150,7 +4205,8 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
fail:
if (adev->asic_reset_res)
- DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
+ dev_warn(adev->dev,
+ "ASIC reset failed with error, %d for drm dev, %s",
adev->asic_reset_res, adev_to_drm(adev)->unique);
amdgpu_put_xgmi_hive(hive);
}
@@ -4164,18 +4220,10 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
int ret = 0;
/*
- * By default timeout for non compute jobs is 10000
- * and 60000 for compute jobs.
- * In SR-IOV or passthrough mode, timeout for compute
- * jobs are 60000 by default.
+ * By default timeout for jobs is 10 sec
*/
- adev->gfx_timeout = msecs_to_jiffies(10000);
+ adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000);
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- if (amdgpu_sriov_vf(adev))
- adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
- msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
- else
- adev->compute_timeout = msecs_to_jiffies(60000);
if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
while ((timeout_setting = strsep(&input, ",")) &&
@@ -4274,7 +4322,7 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
adev->gfx.mcbp = true;
if (adev->gfx.mcbp)
- DRM_INFO("MCBP is enabled\n");
+ dev_info(adev->dev, "MCBP is enabled\n");
}
/**
@@ -4290,7 +4338,6 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
int amdgpu_device_init(struct amdgpu_device *adev,
uint32_t flags)
{
- struct drm_device *ddev = adev_to_drm(adev);
struct pci_dev *pdev = adev->pdev;
int r, i;
bool px = false;
@@ -4342,9 +4389,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
- DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
- amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
- pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
+ dev_info(
+ adev->dev,
+ "initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
+ amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
/* mutex initialization are all done here so we
* can recall function without having locking issues
@@ -4461,8 +4510,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (!adev->rmmio)
return -ENOMEM;
- DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
- DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
+ dev_info(adev->dev, "register mmio base: 0x%08X\n",
+ (uint32_t)adev->rmmio_base);
+ dev_info(adev->dev, "register mmio size: %u\n",
+ (unsigned int)adev->rmmio_size);
/*
* Reset domain needs to be present early, before XGMI hive discovered
@@ -4599,7 +4650,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = -EINVAL;
goto failed;
}
- DRM_INFO("GPU posting now...\n");
+ dev_info(adev->dev, "GPU posting now...\n");
r = amdgpu_device_asic_init(adev);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
@@ -4709,12 +4760,12 @@ fence_driver_init:
r = amdgpu_pm_sysfs_init(adev);
if (r)
- DRM_ERROR("registering pm sysfs failed (%d).\n", r);
+ dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
r = amdgpu_ucode_sysfs_init(adev);
if (r) {
adev->ucode_sysfs_en = false;
- DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
+ dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
} else
adev->ucode_sysfs_en = true;
@@ -4747,7 +4798,7 @@ fence_driver_init:
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
- px = amdgpu_device_supports_px(ddev);
+ px = amdgpu_device_supports_px(adev);
if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
@@ -4913,7 +4964,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
kfree(adev->xcp_mgr);
adev->xcp_mgr = NULL;
- px = amdgpu_device_supports_px(adev_to_drm(adev));
+ px = amdgpu_device_supports_px(adev);
if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
@@ -4962,8 +5013,16 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
return 0;
ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
- if (ret)
- DRM_WARN("evicting device resources failed\n");
+ if (ret) {
+ dev_warn(adev->dev, "evicting device resources failed\n");
+ return ret;
+ }
+
+ if (adev->in_s4) {
+ ret = ttm_device_prepare_hibernation(&adev->mman.bdev);
+ if (ret)
+ dev_err(adev->dev, "prepare hibernation failed, %d\n", ret);
+ }
return ret;
}
@@ -5035,6 +5094,28 @@ int amdgpu_device_prepare(struct drm_device *dev)
}
/**
+ * amdgpu_device_complete - complete power state transition
+ *
+ * @dev: drm dev pointer
+ *
+ * Undo the changes from amdgpu_device_prepare. This will be
+ * called on all resume transitions, including those that failed.
+ */
+void amdgpu_device_complete(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ if (!adev->ip_blocks[i].version->funcs->complete)
+ continue;
+ adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]);
+ }
+}
+
+/**
* amdgpu_device_suspend - initiate device suspend
*
* @dev: drm dev pointer
@@ -5055,14 +5136,16 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
adev->in_suspend = true;
if (amdgpu_sriov_vf(adev)) {
+ if (!adev->in_s0ix && !adev->in_runpm)
+ amdgpu_amdkfd_suspend_process(adev);
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false);
if (r)
return r;
}
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
- DRM_WARN("smart shift update failed\n");
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3))
+ dev_warn(adev->dev, "smart shift update failed\n");
if (notify_clients)
drm_client_dev_suspend(adev_to_drm(adev), false);
@@ -5074,7 +5157,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
amdgpu_device_ip_suspend_phase1(adev);
if (!adev->in_s0ix) {
- amdgpu_amdkfd_suspend(adev, adev->in_runpm);
+ amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
amdgpu_userq_suspend(adev);
}
@@ -5098,6 +5181,32 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
return 0;
}
+static inline int amdgpu_virt_resume(struct amdgpu_device *adev)
+{
+ int r;
+ unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id;
+
+ /* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO)
+ * may not work. The access could be blocked by nBIF protection as VF isn't in
+ * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX
+ * so that QEMU reprograms MSIX table.
+ */
+ amdgpu_restore_msix(adev);
+
+ r = adev->gfxhub.funcs->get_xgmi_info(adev);
+ if (r)
+ return r;
+
+ dev_info(adev->dev, "xgmi node, old id %d, new id %d\n",
+ prev_physical_node_id, adev->gmc.xgmi.physical_node_id);
+
+ adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
+ adev->vm_manager.vram_base_offset +=
+ adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+
+ return 0;
+}
+
/**
* amdgpu_device_resume - initiate device resume
*
@@ -5119,6 +5228,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
return r;
}
+ if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
+ r = amdgpu_virt_resume(adev);
+ if (r)
+ goto exit;
+ }
+
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -5140,7 +5255,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
}
if (!adev->in_s0ix) {
- r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
+ r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
if (r)
goto exit;
@@ -5159,6 +5274,9 @@ exit:
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
+
+ if (!adev->in_s0ix && !r && !adev->in_runpm)
+ r = amdgpu_amdkfd_resume_process(adev);
}
if (r)
@@ -5197,8 +5315,8 @@ exit:
amdgpu_vram_mgr_clear_reset_blocks(adev);
adev->in_suspend = false;
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
- DRM_WARN("smart shift update failed\n");
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0))
+ dev_warn(adev->dev, "smart shift update failed\n");
return 0;
}
@@ -5729,7 +5847,9 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
if (vram_lost) {
- DRM_INFO("VRAM is lost due to GPU reset!\n");
+ dev_info(
+ tmp_adev->dev,
+ "VRAM is lost due to GPU reset!\n");
amdgpu_inc_vram_lost(tmp_adev);
}
@@ -6008,14 +6128,9 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle)
{
struct amdgpu_device *tmp_adev;
int ret = 0;
- u32 status;
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status);
- if (PCI_POSSIBLE_ERROR(status)) {
- dev_err(tmp_adev->dev, "device lost from bus!");
- ret = -ENODEV;
- }
+ ret |= amdgpu_device_bus_status_check(tmp_adev);
}
return ret;
@@ -6080,14 +6195,15 @@ static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev,
amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
}
-static int amdgpu_device_halt_activities(
- struct amdgpu_device *adev, struct amdgpu_job *job,
- struct amdgpu_reset_context *reset_context,
- struct list_head *device_list, struct amdgpu_hive_info *hive,
- bool need_emergency_restart)
+static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive,
+ bool need_emergency_restart)
{
struct amdgpu_device *tmp_adev = NULL;
- int i, r = 0;
+ int i;
/* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list, reset_list) {
@@ -6139,8 +6255,6 @@ static int amdgpu_device_halt_activities(
}
atomic_inc(&tmp_adev->gpu_reset_counter);
}
-
- return r;
}
static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
@@ -6245,8 +6359,10 @@ static int amdgpu_device_sched_resume(struct list_head *device_list,
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
- if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
- DRM_WARN("smart shift update failed\n");
+ if (amdgpu_acpi_smart_shift_update(tmp_adev,
+ AMDGPU_SS_DEV_D0))
+ dev_warn(tmp_adev->dev,
+ "smart shift update failed\n");
}
}
@@ -6327,7 +6443,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
amdgpu_ras_get_context(adev)->reboot) {
- DRM_WARN("Emergency reboot.");
+ dev_warn(adev->dev, "Emergency reboot.");
ksys_sync_helper();
emergency_restart();
@@ -6351,11 +6467,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* We need to lock reset domain only once both for XGMI and single device */
amdgpu_device_recovery_get_reset_lock(adev, &device_list);
- r = amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
- hive, need_emergency_restart);
- if (r)
- goto reset_unlock;
-
+ amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
+ hive, need_emergency_restart);
if (need_emergency_restart)
goto skip_sched_resume;
/*
@@ -6392,8 +6505,17 @@ end_reset:
atomic_set(&adev->reset_domain->reset_res, r);
- if (!r)
- drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
+ if (!r) {
+ struct amdgpu_task_info *ti = NULL;
+
+ if (job)
+ ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
+
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
+ ti ? &ti->task : NULL);
+
+ amdgpu_vm_put_task_info(ti);
+ }
return r;
}
@@ -6712,12 +6834,11 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
#endif
}
-int amdgpu_device_baco_enter(struct drm_device *dev)
+int amdgpu_device_baco_enter(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (!amdgpu_device_supports_baco(dev))
+ if (!amdgpu_device_supports_baco(adev))
return -ENOTSUPP;
if (ras && adev->ras_enabled &&
@@ -6727,13 +6848,12 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
return amdgpu_dpm_baco_enter(adev);
}
-int amdgpu_device_baco_exit(struct drm_device *dev)
+int amdgpu_device_baco_exit(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int ret = 0;
- if (!amdgpu_device_supports_baco(dev))
+ if (!amdgpu_device_supports_baco(adev))
return -ENOTSUPP;
ret = amdgpu_dpm_baco_exit(adev);
@@ -6767,7 +6887,6 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
struct amdgpu_reset_context reset_context;
struct list_head device_list;
- int r = 0;
dev_info(adev->dev, "PCI error: detected callback!!\n");
@@ -6794,14 +6913,12 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
amdgpu_device_recovery_prepare(adev, &device_list, hive);
amdgpu_device_recovery_get_reset_lock(adev, &device_list);
- r = amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
- hive, false);
+ amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
+ hive, false);
if (hive) {
mutex_unlock(&hive->hive_lock);
amdgpu_put_xgmi_hive(hive);
}
- if (r)
- return PCI_ERS_RESULT_DISCONNECT;
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
@@ -6983,11 +7100,11 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
adev->pci_state = pci_store_saved_state(pdev);
if (!adev->pci_state) {
- DRM_ERROR("Failed to store PCI saved state");
+ dev_err(adev->dev, "Failed to store PCI saved state");
return false;
}
} else {
- DRM_WARN("Failed to save PCI state, err:%d\n", r);
+ dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r);
return false;
}
@@ -7008,7 +7125,7 @@ bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
if (!r) {
pci_restore_state(pdev);
} else {
- DRM_WARN("Failed to load PCI state, err:%d\n", r);
+ dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r);
return false;
}
@@ -7254,7 +7371,7 @@ struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
dep = amdgpu_sync_peek_fence(&isolation->prev, ring);
r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT);
if (r)
- DRM_WARN("OOM tracking isolation\n");
+ dev_warn(adev->dev, "OOM tracking isolation\n");
out_grab_ref:
dma_fence_get(dep);
@@ -7322,9 +7439,11 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
tmp_ = RREG32(reg_addr);
loop--;
if (!loop) {
- DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
- inst, reg_name, (uint32_t)expected_value,
- (uint32_t)(tmp_ & (mask)));
+ dev_warn(
+ adev->dev,
+ "Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
+ inst, reg_name, (uint32_t)expected_value,
+ (uint32_t)(tmp_ & (mask)));
ret = -ETIMEDOUT;
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 35c778426a7c..51bab32fd8c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1196,13 +1196,14 @@ static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb
static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
rfb->base.obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &rfb->base, info, mode_cmd);
/* Verify that the modifier is supported. */
if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
@@ -1297,6 +1298,7 @@ static int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct amdgpu_framebuffer *amdgpu_fb;
@@ -1317,7 +1319,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
bo = gem_to_amdgpu_bo(obj);
domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
- if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
+ if (drm_gem_is_imported(obj) && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
@@ -1330,7 +1332,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
}
ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
- mode_cmd, obj);
+ info, mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index dfa0d642ac16..930c171473b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -44,6 +44,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
const struct drm_format_info *
amdgpu_lookup_format_info(u32 format, uint64_t modifier);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 44e120f9f764..ff98c87b2e0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -513,8 +513,8 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
if (!adev)
return false;
- if (obj->import_attach) {
- struct dma_buf *dma_buf = obj->import_attach->dmabuf;
+ if (drm_gem_is_imported(obj)) {
+ struct dma_buf *dma_buf = obj->dma_buf;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
/* No XGMI with non AMD GPUs */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
index 3f3662e8b871..3040437d99c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
@@ -41,7 +41,8 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
if (index < adev->doorbell.num_kernel_doorbells)
return readl(adev->doorbell.cpu_addr + index);
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n",
+ index);
return 0;
}
@@ -63,7 +64,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
if (index < adev->doorbell.num_kernel_doorbells)
writel(v, adev->doorbell.cpu_addr + index);
else
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev,
+ "writing beyond doorbell aperture: 0x%08x!\n", index);
}
/**
@@ -83,7 +85,8 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
if (index < adev->doorbell.num_kernel_doorbells)
return atomic64_read((atomic64_t *)(adev->doorbell.cpu_addr + index));
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n",
+ index);
return 0;
}
@@ -105,7 +108,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
if (index < adev->doorbell.num_kernel_doorbells)
atomic64_set((atomic64_t *)(adev->doorbell.cpu_addr + index), v);
else
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev,
+ "writing beyond doorbell aperture: 0x%08x!\n", index);
}
/**
@@ -166,7 +170,8 @@ int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev)
NULL,
(void **)&adev->doorbell.cpu_addr);
if (r) {
- DRM_ERROR("Failed to allocate kernel doorbells, err=%d\n", r);
+ dev_err(adev->dev,
+ "Failed to allocate kernel doorbells, err=%d\n", r);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 4db92e0a60da..395c6be901ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -144,6 +144,7 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),
AMDGPU_DEBUG_SMU_POOL = BIT(7),
AMDGPU_DEBUG_VM_USERPTR = BIT(8),
+ AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9)
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -361,12 +362,12 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint
* The second one is for Compute. The third and fourth ones are
* for SDMA and Video.
*
- * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
- * jobs is 10000. The timeout for compute is 60000.
+ * By default(with no lockup_timeout settings), the timeout for all jobs is 10000.
*/
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; "
- "for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
- "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
+MODULE_PARM_DESC(lockup_timeout,
+ "GPU lockup timeout in ms (default: 10000 for all jobs. "
+ "0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
+ "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
/**
@@ -2278,6 +2279,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: VM mode debug for userptr is enabled\n");
adev->debug_vm_userptr = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_RAS_CE_LOG) {
+ pr_info("debug: disable kernel logs of correctable errors\n");
+ adev->debug_disable_ce_logs = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2321,7 +2327,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
amdgpu_aspm = 0;
if (amdgpu_virtual_display ||
- amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
+ amdgpu_device_asic_has_dc_support(pdev, flags & AMD_ASIC_MASK))
supports_atomic = true;
if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
@@ -2451,10 +2457,10 @@ retry_init:
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
/* only need to skip on ATPX */
- if (amdgpu_device_supports_px(ddev))
+ if (amdgpu_device_supports_px(adev))
dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
/* we want direct complete for BOCO */
- if (amdgpu_device_supports_boco(ddev))
+ if (amdgpu_device_supports_boco(adev))
dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE |
DPM_FLAG_SMART_SUSPEND |
DPM_FLAG_MAY_SKIP_RESUME);
@@ -2487,9 +2493,9 @@ retry_init:
* into D0 state. Then there will be a PMFW-aware D-state
* transition(D0->D3) on runpm suspend.
*/
- if (amdgpu_device_supports_baco(ddev) &&
+ if (amdgpu_device_supports_baco(adev) &&
!(adev->flags & AMD_IS_APU) &&
- (adev->asic_type >= CHIP_NAVI10))
+ adev->asic_type >= CHIP_NAVI10)
amdgpu_get_secondary_funcs(adev);
}
@@ -2506,6 +2512,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
+ amdgpu_ras_eeprom_check_and_recover(adev);
amdgpu_xcp_dev_unplug(adev);
amdgpu_gmc_prepare_nps_mode_change(adev);
drm_dev_unplug(dev);
@@ -2535,6 +2542,10 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
if (amdgpu_ras_intr_triggered())
return;
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return;
+
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown.
* unfortunately we can't detect certain
@@ -2551,11 +2562,14 @@ static int amdgpu_pmops_prepare(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return 0;
+
/* Return a positive number here so
* DPM_FLAG_SMART_SUSPEND works properly
*/
- if (amdgpu_device_supports_boco(drm_dev) &&
- pm_runtime_suspended(dev))
+ if (amdgpu_device_supports_boco(adev) && pm_runtime_suspended(dev))
return 1;
/* if we will not support s3 or s2i for the device
@@ -2570,7 +2584,7 @@ static int amdgpu_pmops_prepare(struct device *dev)
static void amdgpu_pmops_complete(struct device *dev)
{
- /* nothing to do */
+ amdgpu_device_complete(dev_get_drvdata(dev));
}
static int amdgpu_pmops_suspend(struct device *dev)
@@ -2650,12 +2664,21 @@ static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ /* do not resume device if it's normal hibernation */
+ if (!pm_hibernate_is_recovering())
+ return 0;
+
return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_poweroff(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return 0;
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2828,7 +2851,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
/* nothing to do */
} else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
- amdgpu_device_baco_enter(drm_dev);
+ amdgpu_device_baco_enter(adev);
}
dev_dbg(&pdev->dev, "asic/device is runtime suspended\n");
@@ -2869,7 +2892,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
} else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
- amdgpu_device_baco_exit(drm_dev);
+ amdgpu_device_baco_exit(adev);
}
ret = amdgpu_device_resume(drm_dev, false);
if (ret) {
@@ -3107,10 +3130,6 @@ static int __init amdgpu_init(void)
if (r)
goto error_sync;
- r = amdgpu_fence_slab_init();
- if (r)
- goto error_fence;
-
r = amdgpu_userq_fence_slab_init();
if (r)
goto error_fence;
@@ -3145,7 +3164,6 @@ static void __exit amdgpu_exit(void)
amdgpu_unregister_atpx_handler();
amdgpu_acpi_release();
amdgpu_sync_fini();
- amdgpu_fence_slab_fini();
amdgpu_userq_fence_slab_fini();
mmu_notifier_synchronize();
amdgpu_xcp_drv_release();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
index 8b919ad3af29..23d7d0b0d625 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
@@ -143,7 +143,6 @@ static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
}
static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
- .use_64bit_seqno = true,
.get_driver_name = amdgpu_eviction_fence_get_driver_name,
.get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
.enable_signaling = amdgpu_eviction_fence_enable_signaling,
@@ -169,9 +168,9 @@ amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr)
ev_fence->evf_mgr = evf_mgr;
get_task_comm(ev_fence->timeline_name, current);
spin_lock_init(&ev_fence->lock);
- dma_fence_init(&ev_fence->base, &amdgpu_eviction_fence_ops,
- &ev_fence->lock, evf_mgr->ev_fence_ctx,
- atomic_inc_return(&evf_mgr->ev_fence_seq));
+ dma_fence_init64(&ev_fence->base, &amdgpu_eviction_fence_ops,
+ &ev_fence->lock, evf_mgr->ev_fence_ctx,
+ atomic_inc_return(&evf_mgr->ev_fence_seq));
return ev_fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 5fec808d7f54..9e7506965cab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -41,21 +41,6 @@
#include "amdgpu_trace.h"
#include "amdgpu_reset.h"
-static struct kmem_cache *amdgpu_fence_slab;
-
-int amdgpu_fence_slab_init(void)
-{
- amdgpu_fence_slab = KMEM_CACHE(amdgpu_fence, SLAB_HWCACHE_ALIGN);
- if (!amdgpu_fence_slab)
- return -ENOMEM;
- return 0;
-}
-
-void amdgpu_fence_slab_fini(void)
-{
- rcu_barrier();
- kmem_cache_destroy(amdgpu_fence_slab);
-}
/*
* Cast helper
*/
@@ -114,14 +99,14 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
*
* @ring: ring the fence is associated with
* @f: resulting fence object
- * @job: job the fence is embedded in
+ * @af: amdgpu fence input
* @flags: flags to pass into the subordinate .emit_fence() call
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
- unsigned int flags)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+ struct amdgpu_fence *af, unsigned int flags)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
@@ -130,40 +115,35 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
uint32_t seq;
int r;
- if (job == NULL) {
- /* create a sperate hw fence */
- am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
- if (am_fence == NULL)
+ if (!af) {
+ /* create a separate hw fence */
+ am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL);
+ if (!am_fence)
return -ENOMEM;
+ am_fence->context = 0;
} else {
- /* take use of job-embedded fence */
- am_fence = &job->hw_fence;
+ am_fence = af;
}
fence = &am_fence->base;
am_fence->ring = ring;
seq = ++ring->fence_drv.sync_seq;
- if (job && job->job_run_counter) {
- /* reinit seq for resubmitted jobs */
- fence->seqno = seq;
- /* TO be inline with external fence creation and other drivers */
+ am_fence->seq = seq;
+ if (af) {
+ dma_fence_init(fence, &amdgpu_job_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
+ /* Against remove in amdgpu_job_{free, free_cb} */
dma_fence_get(fence);
} else {
- if (job) {
- dma_fence_init(fence, &amdgpu_job_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- /* Against remove in amdgpu_job_{free, free_cb} */
- dma_fence_get(fence);
- } else {
- dma_fence_init(fence, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- }
+ dma_fence_init(fence, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
}
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
+ amdgpu_fence_save_wptr(fence);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
@@ -276,6 +256,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
do {
struct dma_fence *fence, **ptr;
+ struct amdgpu_fence *am_fence;
++last_seq;
last_seq &= drv->num_fences_mask;
@@ -288,6 +269,12 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
+ /* Save the wptr in the fence driver so we know what the last processed
+ * wptr was. This is required for re-emitting the ring state for
+ * queues that are reset but are not guilty and thus have no guilty fence.
+ */
+ am_fence = container_of(fence, struct amdgpu_fence, base);
+ drv->signalled_wptr = am_fence->wptr;
dma_fence_signal(fence);
dma_fence_put(fence);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
@@ -310,7 +297,9 @@ static void amdgpu_fence_fallback(struct timer_list *t)
fence_drv.fallback_timer);
if (amdgpu_fence_process(ring))
- DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
+ dev_warn(ring->adev->dev,
+ "Fence fallback timer expired on ring %s\n",
+ ring->name);
}
/**
@@ -748,6 +737,86 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
amdgpu_fence_process(ring);
}
+
+/**
+ * Kernel queue reset handling
+ *
+ * The driver can reset individual queues for most engines, but those queues
+ * may contain work from multiple contexts. Resetting the queue will reset
+ * lose all of that state. In order to minimize the collateral damage, the
+ * driver will save the ring contents which are not associated with the guilty
+ * context prior to resetting the queue. After resetting the queue the queue
+ * contents from the other contexts is re-emitted to the rings so that it can
+ * be processed by the engine. To handle this, we save the queue's write
+ * pointer (wptr) in the fences associated with each context. If we get a
+ * queue timeout, we can then use the wptrs from the fences to determine
+ * which data needs to be saved out of the queue's ring buffer.
+ */
+
+/**
+ * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence
+ *
+ * @fence: fence of the ring to signal
+ *
+ */
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence)
+{
+ dma_fence_set_error(&fence->base, -ETIME);
+ amdgpu_fence_write(fence->ring, fence->seq);
+ amdgpu_fence_process(fence->ring);
+}
+
+void amdgpu_fence_save_wptr(struct dma_fence *fence)
+{
+ struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base);
+
+ am_fence->wptr = am_fence->ring->wptr;
+}
+
+static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
+ u64 start_wptr, u32 end_wptr)
+{
+ unsigned int first_idx = start_wptr & ring->buf_mask;
+ unsigned int last_idx = end_wptr & ring->buf_mask;
+ unsigned int i;
+
+ /* Backup the contents of the ring buffer. */
+ for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask)
+ ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i];
+}
+
+void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ struct dma_fence *unprocessed;
+ struct dma_fence __rcu **ptr;
+ struct amdgpu_fence *fence;
+ u64 wptr, i, seqno;
+
+ seqno = amdgpu_fence_read(ring);
+ wptr = ring->fence_drv.signalled_wptr;
+ ring->ring_backup_entries_to_copy = 0;
+
+ for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) {
+ ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask];
+ rcu_read_lock();
+ unprocessed = rcu_dereference(*ptr);
+
+ if (unprocessed && !dma_fence_is_signaled(unprocessed)) {
+ fence = container_of(unprocessed, struct amdgpu_fence, base);
+
+ /* save everything if the ring is not guilty, otherwise
+ * just save the content from other contexts.
+ */
+ if (!guilty_fence || (fence->context != guilty_fence->context))
+ amdgpu_ring_backup_unprocessed_command(ring, wptr,
+ fence->wptr);
+ wptr = fence->wptr;
+ }
+ rcu_read_unlock();
+ }
+}
+
/*
* Common fence implementation
*/
@@ -814,7 +883,7 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
/* free fence_slab if it's separated fence*/
- kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
+ kfree(to_amdgpu_fence(f));
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 1ae88c459da5..b0082aa7f3c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -144,7 +144,8 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* If algo exists, it means that the i2c_adapter's initialized */
if (!adev->pm.fru_eeprom_i2c_bus || !adev->pm.fru_eeprom_i2c_bus->algo) {
- DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
+ dev_warn(adev->dev,
+ "Cannot access FRU, EEPROM accessor not initialized");
return -ENODEV;
}
@@ -152,19 +153,22 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, fru_addr, buf,
sizeof(buf));
if (len != 8) {
- DRM_ERROR("Couldn't read the IPMI Common Header: %d", len);
+ dev_err(adev->dev, "Couldn't read the IPMI Common Header: %d",
+ len);
return len < 0 ? len : -EIO;
}
if (buf[0] != 1) {
- DRM_ERROR("Bad IPMI Common Header version: 0x%02x", buf[0]);
+ dev_err(adev->dev, "Bad IPMI Common Header version: 0x%02x",
+ buf[0]);
return -EIO;
}
for (csum = 0; len > 0; len--)
csum += buf[len - 1];
if (csum) {
- DRM_ERROR("Bad IPMI Common Header checksum: 0x%02x", csum);
+ dev_err(adev->dev, "Bad IPMI Common Header checksum: 0x%02x",
+ csum);
return -EIO;
}
@@ -179,12 +183,14 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* Read the header of the PIA. */
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, buf, 3);
if (len != 3) {
- DRM_ERROR("Couldn't read the Product Info Area header: %d", len);
+ dev_err(adev->dev,
+ "Couldn't read the Product Info Area header: %d", len);
return len < 0 ? len : -EIO;
}
if (buf[0] != 1) {
- DRM_ERROR("Bad IPMI Product Info Area version: 0x%02x", buf[0]);
+ dev_err(adev->dev, "Bad IPMI Product Info Area version: 0x%02x",
+ buf[0]);
return -EIO;
}
@@ -197,14 +203,16 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, pia, size);
if (len != size) {
kfree(pia);
- DRM_ERROR("Couldn't read the Product Info Area: %d", len);
+ dev_err(adev->dev, "Couldn't read the Product Info Area: %d",
+ len);
return len < 0 ? len : -EIO;
}
for (csum = 0; size > 0; size--)
csum += pia[size - 1];
if (csum) {
- DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum);
+ dev_err(adev->dev, "Bad Product Info Area checksum: 0x%02x",
+ csum);
kfree(pia);
return -EIO;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 0ecc88df7208..6626a6e64ff5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -317,8 +317,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
*/
if (!vm->is_compute_context || !vm->process_info)
return 0;
- if (!obj->import_attach ||
- !dma_buf_is_dynamic(obj->import_attach->dmabuf))
+ if (!drm_gem_is_imported(obj) || !dma_buf_is_dynamic(obj->dma_buf))
return 0;
mutex_lock_nested(&vm->process_info->lock, 1);
if (!WARN_ON(!vm->process_info->eviction_fence)) {
@@ -329,7 +328,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
dev_warn(adev->dev, "validate_and_fence failed: %d\n", r);
if (ti) {
- dev_warn(adev->dev, "pid %d\n", ti->pid);
+ dev_warn(adev->dev, "pid %d\n", ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
}
@@ -1024,7 +1023,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
break;
}
case AMDGPU_GEM_OP_SET_PLACEMENT:
- if (robj->tbo.base.import_attach &&
+ if (drm_gem_is_imported(&robj->tbo.base) &&
args->value & AMDGPU_GEM_DOMAIN_VRAM) {
r = -EINVAL;
amdgpu_bo_unreserve(robj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index c5646af055ab..c80c8f543532 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -149,7 +149,7 @@ static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
{
if (amdgpu_compute_multipipe != -1) {
- DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
+ dev_info(adev->dev, "amdgpu: forcing compute pipe policy %d\n",
amdgpu_compute_multipipe);
return amdgpu_compute_multipipe == 1;
}
@@ -674,7 +674,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
* generation exposes more than 64 queues. If so, the
* definition of queue_mask needs updating */
if (WARN_ON(i > (sizeof(queue_mask)*8))) {
- DRM_ERROR("Invalid KCQ enabled: %d\n", i);
+ dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i);
break;
}
@@ -683,15 +683,15 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
amdgpu_device_flush_hdp(adev, NULL);
- DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
- kiq_ring->queue);
+ dev_info(adev->dev, "kiq ring mec %d pipe %d q %d\n", kiq_ring->me,
+ kiq_ring->pipe, kiq_ring->queue);
spin_lock(&kiq->ring_lock);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_compute_rings +
kiq->pmf->set_resources_size);
if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
spin_unlock(&kiq->ring_lock);
return r;
}
@@ -712,7 +712,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KCQ enable failed\n");
+ dev_err(adev->dev, "KCQ enable failed\n");
return r;
}
@@ -734,7 +734,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_mes_map_legacy_queue(adev,
&adev->gfx.gfx_ring[j]);
if (r) {
- DRM_ERROR("failed to map gfx queue\n");
+ dev_err(adev->dev, "failed to map gfx queue\n");
return r;
}
}
@@ -748,7 +748,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_gfx_rings);
if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
spin_unlock(&kiq->ring_lock);
return r;
}
@@ -769,7 +769,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KGQ enable failed\n");
+ dev_err(adev->dev, "KGQ enable failed\n");
return r;
}
@@ -1030,7 +1030,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
ih_data.head = *ras_if;
- DRM_ERROR("CP ECC ERROR IRQ\n");
+ dev_err(adev->dev, "CP ECC ERROR IRQ\n");
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 6b0fbbb91e57..97b562a79ea8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -38,6 +38,13 @@
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_tt.h>
+static const u64 four_gb = 0x100000000ULL;
+
+bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev)
+{
+ return adev->gmc.xgmi.connected_to_cpu || amdgpu_virt_xgmi_migrate_enabled(adev);
+}
+
/**
* amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
*
@@ -251,10 +258,20 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc
u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
- mc->gart_start = hive_vram_end + 1;
+ /* node_segment_size may not 4GB aligned on SRIOV, align up is needed. */
+ mc->gart_start = ALIGN(hive_vram_end + 1, four_gb);
mc->gart_end = mc->gart_start + mc->gart_size - 1;
- mc->fb_start = hive_vram_start;
- mc->fb_end = hive_vram_end;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
+ /* set mc->vram_start to 0 to switch the returned GPU address of
+ * amdgpu_bo_create_reserved() from FB aperture to GART aperture.
+ */
+ mc->vram_start = 0;
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ mc->visible_vram_size = min(mc->visible_vram_size, mc->real_vram_size);
+ } else {
+ mc->fb_start = hive_vram_start;
+ mc->fb_end = hive_vram_end;
+ }
dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
@@ -276,7 +293,6 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc
void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
enum amdgpu_gart_placement gart_placement)
{
- const uint64_t four_gb = 0x100000000ULL;
u64 size_af, size_bf;
/*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
@@ -1041,9 +1057,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
*/
u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
- u64 vram_addr = adev->vm_manager.vram_base_offset -
- adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- u64 vram_end = vram_addr + vram_size;
+ u64 vram_addr, vram_end;
u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
int idx;
@@ -1056,6 +1070,11 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
flags |= AMDGPU_PDE_PTE_FLAG(adev);
+ vram_addr = adev->vm_manager.vram_base_offset;
+ if (!amdgpu_virt_xgmi_migrate_enabled(adev))
+ vram_addr -= adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ vram_end = vram_addr + vram_size;
+
/* The first n PDE0 entries are used as PTE,
* pointing to vram
*/
@@ -1429,3 +1448,232 @@ bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev)
+{
+ switch (adev->gmc.num_mem_partitions) {
+ case 0:
+ return UNKNOWN_MEMORY_PARTITION_MODE;
+ case 1:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ case 2:
+ return AMDGPU_NPS2_PARTITION_MODE;
+ case 4:
+ return AMDGPU_NPS4_PARTITION_MODE;
+ case 8:
+ return AMDGPU_NPS8_PARTITION_MODE;
+ default:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ }
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
+{
+ enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
+
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->get_memory_partition_mode)
+ mode = adev->nbio.funcs->get_memory_partition_mode(adev,
+ supp_modes);
+ else
+ dev_warn(adev->dev, "memory partition mode query is not supported\n");
+
+ return mode;
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return amdgpu_gmc_get_vf_memory_partition(adev);
+ else
+ return amdgpu_gmc_get_memory_partition(adev, NULL);
+}
+
+static bool amdgpu_gmc_validate_partition_info(struct amdgpu_device *adev)
+{
+ enum amdgpu_memory_partition mode;
+ u32 supp_modes;
+ bool valid;
+
+ mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes);
+
+ /* Mode detected by hardware not present in supported modes */
+ if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
+ !(BIT(mode - 1) & supp_modes))
+ return false;
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ case AMDGPU_NPS1_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 1);
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 2);
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 3 ||
+ adev->gmc.num_mem_partitions == 4);
+ break;
+ case AMDGPU_NPS8_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 8);
+ break;
+ default:
+ valid = false;
+ }
+
+ return valid;
+}
+
+static bool amdgpu_gmc_is_node_present(int *node_ids, int num_ids, int nid)
+{
+ int i;
+
+ /* Check if node with id 'nid' is present in 'node_ids' array */
+ for (i = 0; i < num_ids; ++i)
+ if (node_ids[i] == nid)
+ return true;
+
+ return false;
+}
+
+static void
+amdgpu_gmc_init_acpi_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ struct amdgpu_numa_info numa_info;
+ int node_ids[AMDGPU_MAX_MEM_RANGES];
+ int num_ranges = 0, ret;
+ int num_xcc, xcc_id;
+ uint32_t xcc_mask;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ xcc_mask = (1U << num_xcc) - 1;
+
+ for_each_inst(xcc_id, xcc_mask) {
+ ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
+ if (ret)
+ continue;
+
+ if (numa_info.nid == NUMA_NO_NODE) {
+ mem_ranges[0].size = numa_info.size;
+ mem_ranges[0].numa.node = numa_info.nid;
+ num_ranges = 1;
+ break;
+ }
+
+ if (amdgpu_gmc_is_node_present(node_ids, num_ranges,
+ numa_info.nid))
+ continue;
+
+ node_ids[num_ranges] = numa_info.nid;
+ mem_ranges[num_ranges].numa.node = numa_info.nid;
+ mem_ranges[num_ranges].size = numa_info.size;
+ ++num_ranges;
+ }
+
+ adev->gmc.num_mem_partitions = num_ranges;
+}
+
+void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ enum amdgpu_memory_partition mode;
+ u32 start_addr = 0, size;
+ int i, r, l;
+
+ mode = amdgpu_gmc_query_memory_partition(adev);
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 0;
+ break;
+ case AMDGPU_NPS1_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 2;
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ if (adev->flags & AMD_IS_APU)
+ adev->gmc.num_mem_partitions = 3;
+ else
+ adev->gmc.num_mem_partitions = 4;
+ break;
+ case AMDGPU_NPS8_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 8;
+ break;
+ default:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ }
+
+ /* Use NPS range info, if populated */
+ r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
+ &adev->gmc.num_mem_partitions);
+ if (!r) {
+ l = 0;
+ for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
+ if (mem_ranges[i].range.lpfn >
+ mem_ranges[i - 1].range.lpfn)
+ l = i;
+ }
+
+ } else {
+ if (!adev->gmc.num_mem_partitions) {
+ dev_warn(adev->dev,
+ "Not able to detect NPS mode, fall back to NPS1\n");
+ adev->gmc.num_mem_partitions = 1;
+ }
+ /* Fallback to sw based calculation */
+ size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
+ size /= adev->gmc.num_mem_partitions;
+
+ for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
+ mem_ranges[i].range.fpfn = start_addr;
+ mem_ranges[i].size =
+ ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
+ mem_ranges[i].range.lpfn = start_addr + size - 1;
+ start_addr += size;
+ }
+
+ l = adev->gmc.num_mem_partitions - 1;
+ }
+
+ /* Adjust the last one */
+ mem_ranges[l].range.lpfn =
+ (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
+ mem_ranges[l].size =
+ adev->gmc.real_vram_size -
+ ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
+}
+
+int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev)
+{
+ bool valid;
+
+ adev->gmc.mem_partitions = kcalloc(AMDGPU_MAX_MEM_RANGES,
+ sizeof(struct amdgpu_mem_partition_info),
+ GFP_KERNEL);
+ if (!adev->gmc.mem_partitions)
+ return -ENOMEM;
+
+ if (adev->gmc.is_app_apu)
+ amdgpu_gmc_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
+ else
+ amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+
+ if (amdgpu_sriov_vf(adev))
+ valid = true;
+ else
+ valid = amdgpu_gmc_validate_partition_info(adev);
+ if (!valid) {
+ /* TODO: handle invalid case */
+ dev_warn(adev->dev,
+ "Mem ranges not matching with hardware config\n");
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 80fa29c26e9e..397c6ccdb903 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -84,6 +84,8 @@ enum amdgpu_memory_partition {
#define AMDGPU_GMC_INIT_RESET_NPS BIT(0)
+#define AMDGPU_MAX_MEM_RANGES 8
+
/*
* GMC page fault information
*/
@@ -394,6 +396,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
return addr;
}
+bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev);
int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev);
void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags);
@@ -455,5 +458,13 @@ int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev,
int nps_mode);
void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev);
bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev);
-
+enum amdgpu_memory_partition
+amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev);
+enum amdgpu_memory_partition
+amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes);
+enum amdgpu_memory_partition
+amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev);
+int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev);
+void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 8179d0814db9..57101d24422f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -24,7 +24,6 @@
* Alex Deucher
*/
-#include <linux/export.h>
#include <linux/pci.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 802743efa3b3..7d9bcb72e8dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -128,6 +128,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
struct dma_fence *tmp = NULL;
+ struct amdgpu_fence *af;
bool need_ctx_switch;
struct amdgpu_vm *vm;
uint64_t fence_ctx;
@@ -138,7 +139,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
int vmid = AMDGPU_JOB_GET_VMID(job);
bool need_pipe_sync = false;
unsigned int cond_exec;
-
unsigned int i;
int r = 0;
@@ -154,6 +154,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
csa_va = job->csa_va;
gds_va = job->gds_va;
init_shadow = job->init_shadow;
+ af = &job->hw_fence;
+ /* Save the context of the job for reset handling.
+ * The driver needs this so it can skip the ring
+ * contents for guilty contexts.
+ */
+ af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
} else {
vm = NULL;
fence_ctx = 0;
@@ -161,6 +167,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
csa_va = 0;
gds_va = 0;
init_shadow = false;
+ af = NULL;
}
if (!ring->sched.ready) {
@@ -282,7 +289,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
}
- r = amdgpu_fence_emit(ring, f, job, fence_flags);
+ r = amdgpu_fence_emit(ring, f, af, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vmid)
@@ -304,8 +311,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
ring->funcs->emit_wave_limit(ring, false);
+ /* Save the wptr associated with this fence.
+ * This must be last for resets to work properly
+ * as we need to save the wptr associated with this
+ * fence so we know what rings contents to backup
+ * after we reset the queue.
+ */
+ amdgpu_fence_save_wptr(*f);
+
amdgpu_ring_ib_end(ring);
amdgpu_ring_commit(ring);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 30f16968b578..a6419246e9c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -218,7 +218,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
restart_ih:
count = AMDGPU_IH_MAX_NUM_IVS;
- DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
+ dev_dbg(adev->dev, "%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
rmb();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c
new file mode 100644
index 000000000000..99e1cf4fc955
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ip.h"
+
+static int8_t amdgpu_logical_to_dev_inst(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ int8_t inst)
+{
+ int8_t dev_inst;
+
+ switch (block) {
+ case GC_HWIP:
+ case SDMA0_HWIP:
+ /* Both JPEG and VCN as JPEG is only alias of VCN */
+ case VCN_HWIP:
+ dev_inst = adev->ip_map.dev_inst[block][inst];
+ break;
+ default:
+ /* For rest of the IPs, no look up required.
+ * Assume 'logical instance == physical instance' for all configs. */
+ dev_inst = inst;
+ break;
+ }
+
+ return dev_inst;
+}
+
+static uint32_t amdgpu_logical_to_dev_mask(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ uint32_t mask)
+{
+ uint32_t dev_mask = 0;
+ int8_t log_inst, dev_inst;
+
+ while (mask) {
+ log_inst = ffs(mask) - 1;
+ dev_inst = amdgpu_logical_to_dev_inst(adev, block, log_inst);
+ dev_mask |= (1 << dev_inst);
+ mask &= ~(1 << log_inst);
+ }
+
+ return dev_mask;
+}
+
+static void amdgpu_populate_ip_map(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type ip_block,
+ uint32_t inst_mask)
+{
+ int l = 0, i;
+
+ while (inst_mask) {
+ i = ffs(inst_mask) - 1;
+ adev->ip_map.dev_inst[ip_block][l++] = i;
+ inst_mask &= ~(1 << i);
+ }
+ for (; l < HWIP_MAX_INSTANCE; l++)
+ adev->ip_map.dev_inst[ip_block][l] = -1;
+}
+
+void amdgpu_ip_map_init(struct amdgpu_device *adev)
+{
+ u32 ip_map[][2] = {
+ { GC_HWIP, adev->gfx.xcc_mask },
+ { SDMA0_HWIP, adev->sdma.sdma_mask },
+ { VCN_HWIP, adev->vcn.inst_mask },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
+ amdgpu_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
+
+ adev->ip_map.logical_to_dev_inst = amdgpu_logical_to_dev_inst;
+ adev->ip_map.logical_to_dev_mask = amdgpu_logical_to_dev_mask;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h
new file mode 100644
index 000000000000..2490fd322aec
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_IP_H__
+#define __AMDGPU_IP_H__
+
+void amdgpu_ip_map_init(struct amdgpu_device *adev);
+
+#endif /* __AMDGPU_IP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 13c60cac4261..8112ffc85995 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -142,8 +142,9 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
r = src->funcs->set(adev, src, k,
AMDGPU_IRQ_STATE_DISABLE);
if (r)
- DRM_ERROR("error disabling interrupt (%d)\n",
- r);
+ dev_err(adev->dev,
+ "error disabling interrupt (%d)\n",
+ r);
}
}
}
@@ -242,7 +243,7 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
return true;
}
-static void amdgpu_restore_msix(struct amdgpu_device *adev)
+void amdgpu_restore_msix(struct amdgpu_device *adev)
{
u16 ctrl;
@@ -315,7 +316,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.irq = irq;
adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
- DRM_DEBUG("amdgpu: irq initialized.\n");
+ dev_dbg(adev->dev, "amdgpu: irq initialized.\n");
return 0;
free_vectors:
@@ -461,10 +462,10 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
src_id = entry.src_id;
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
- DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
+ dev_dbg(adev->dev, "Invalid client_id in IV: %d\n", client_id);
} else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
- DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
+ dev_dbg(adev->dev, "Invalid src_id in IV: %d\n", src_id);
} else if (((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) ||
(client_id == SOC15_IH_CLIENTID_ISP)) &&
@@ -472,18 +473,21 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
generic_handle_domain_irq(adev->irq.domain, src_id);
} else if (!adev->irq.client[client_id].sources) {
- DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
- client_id, src_id);
+ dev_dbg(adev->dev,
+ "Unregistered interrupt client_id: %d src_id: %d\n",
+ client_id, src_id);
} else if ((src = adev->irq.client[client_id].sources[src_id])) {
r = src->funcs->process(adev, src, &entry);
if (r < 0)
- DRM_ERROR("error processing interrupt (%d)\n", r);
+ dev_err(adev->dev, "error processing interrupt (%d)\n",
+ r);
else if (r)
handled = true;
} else {
- DRM_DEBUG("Unregistered interrupt src_id: %d of client_id:%d\n",
+ dev_dbg(adev->dev,
+ "Unregistered interrupt src_id: %d of client_id:%d\n",
src_id, client_id);
}
@@ -620,7 +624,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned int type)
{
/* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */
- if (amdgpu_ras_is_rma(adev))
+ if (amdgpu_ras_is_rma(adev) && !amdgpu_irq_enabled(adev, src, type))
return -EINVAL;
if (!adev->irq.installed)
@@ -732,7 +736,7 @@ int amdgpu_irq_add_domain(struct amdgpu_device *adev)
adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
&amdgpu_hw_irqdomain_ops, adev);
if (!adev->irq.domain) {
- DRM_ERROR("GPU irq add domain failed\n");
+ dev_err(adev->dev, "GPU irq add domain failed\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 04c0b4fa17a4..9f0417456abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -146,5 +146,6 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev);
int amdgpu_irq_add_domain(struct amdgpu_device *adev);
void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id);
+void amdgpu_restore_msix(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 43fc941dfa57..9cddbf50442a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -33,6 +33,8 @@
#include "isp_v4_1_0.h"
#include "isp_v4_1_1.h"
+#define ISP_MC_ADDR_ALIGN (1024 * 32)
+
/**
* isp_hw_init - start and test isp block
*
@@ -141,6 +143,179 @@ static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
+static int is_valid_isp_device(struct device *isp_parent, struct device *amdgpu_dev)
+{
+ if (isp_parent != amdgpu_dev)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * isp_user_buffer_alloc - create user buffer object (BO) for isp
+ *
+ * @dev: isp device handle
+ * @dmabuf: DMABUF handle for isp buffer allocated in system memory
+ * @buf_obj: GPU buffer object handle to initialize
+ * @buf_addr: GPU addr of the pinned BO to initialize
+ *
+ * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
+ * GART alloc to generate GPU addr for BO to make it accessible through the
+ * GART aperture for ISP HW.
+ *
+ * This function is exported to allow the V4L2 isp device external to drm device
+ * to create and access the isp user BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int isp_user_buffer_alloc(struct device *dev, void *dmabuf,
+ void **buf_obj, u64 *buf_addr)
+{
+ struct platform_device *ispdev = to_platform_device(dev);
+ const struct isp_platform_data *isp_pdata;
+ struct amdgpu_device *adev;
+ struct mfd_cell *mfd_cell;
+ struct amdgpu_bo *bo;
+ u64 gpu_addr;
+ int ret;
+
+ if (WARN_ON(!ispdev))
+ return -ENODEV;
+
+ if (WARN_ON(!buf_obj))
+ return -EINVAL;
+
+ if (WARN_ON(!buf_addr))
+ return -EINVAL;
+
+ mfd_cell = &ispdev->mfd_cell[0];
+ if (!mfd_cell)
+ return -ENODEV;
+
+ isp_pdata = mfd_cell->platform_data;
+ adev = isp_pdata->adev;
+
+ ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_create_isp_user(adev, dmabuf,
+ AMDGPU_GEM_DOMAIN_GTT, &bo, &gpu_addr);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to alloc gart user buffer (%d)", ret);
+ return ret;
+ }
+
+ *buf_obj = (void *)bo;
+ *buf_addr = gpu_addr;
+
+ return 0;
+}
+EXPORT_SYMBOL(isp_user_buffer_alloc);
+
+/**
+ * isp_user_buffer_free - free isp user buffer object (BO)
+ *
+ * @buf_obj: amdgpu isp user BO to free
+ *
+ * unpin and unref BO for isp internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the isp user BO.
+ */
+void isp_user_buffer_free(void *buf_obj)
+{
+ amdgpu_bo_free_isp_user(buf_obj);
+}
+EXPORT_SYMBOL(isp_user_buffer_free);
+
+/**
+ * isp_kernel_buffer_alloc - create kernel buffer object (BO) for isp
+ *
+ * @dev: isp device handle
+ * @size: size for the new BO
+ * @buf_obj: GPU BO handle to initialize
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: CPU address mapping of BO
+ *
+ * Allocates and pins a kernel BO for internal isp firmware use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to create and access the kernel BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int isp_kernel_buffer_alloc(struct device *dev, u64 size,
+ void **buf_obj, u64 *gpu_addr, void **cpu_addr)
+{
+ struct platform_device *ispdev = to_platform_device(dev);
+ struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
+ const struct isp_platform_data *isp_pdata;
+ struct amdgpu_device *adev;
+ struct mfd_cell *mfd_cell;
+ int ret;
+
+ if (WARN_ON(!ispdev))
+ return -ENODEV;
+
+ if (WARN_ON(!buf_obj))
+ return -EINVAL;
+
+ if (WARN_ON(!gpu_addr))
+ return -EINVAL;
+
+ if (WARN_ON(!cpu_addr))
+ return -EINVAL;
+
+ mfd_cell = &ispdev->mfd_cell[0];
+ if (!mfd_cell)
+ return -ENODEV;
+
+ isp_pdata = mfd_cell->platform_data;
+ adev = isp_pdata->adev;
+
+ ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_create_kernel(adev,
+ size,
+ ISP_MC_ADDR_ALIGN,
+ AMDGPU_GEM_DOMAIN_GTT,
+ bo,
+ gpu_addr,
+ cpu_addr);
+ if (!cpu_addr || ret) {
+ drm_err(&adev->ddev, "failed to alloc gart kernel buffer (%d)", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(isp_kernel_buffer_alloc);
+
+/**
+ * isp_kernel_buffer_free - free isp kernel buffer object (BO)
+ *
+ * @buf_obj: amdgpu isp user BO to free
+ * @gpu_addr: GPU addr of isp kernel BO
+ * @cpu_addr: CPU addr of isp kernel BO
+ *
+ * unmaps and unpin a isp kernel BO.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the kernel BO.
+ */
+void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr)
+{
+ struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
+
+ amdgpu_bo_free_kernel(bo, gpu_addr, cpu_addr);
+}
+EXPORT_SYMBOL(isp_kernel_buffer_free);
+
static const struct amd_ip_funcs isp_ip_funcs = {
.name = "isp_ip",
.early_init = isp_early_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
index 4f3b7b5d9c1f..d6f4ffa4c97c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -28,16 +28,13 @@
#ifndef __AMDGPU_ISP_H__
#define __AMDGPU_ISP_H__
+#include <drm/amd/isp.h>
+#include <linux/pm_domain.h>
+
#define ISP_REGS_OFFSET_END 0x629A4
struct amdgpu_isp;
-struct isp_platform_data {
- void *adev;
- u32 asic_type;
- resource_size_t base_rmmio_size;
-};
-
struct isp_funcs {
int (*hw_init)(struct amdgpu_isp *isp);
int (*hw_fini)(struct amdgpu_isp *isp);
@@ -54,6 +51,7 @@ struct amdgpu_isp {
struct isp_platform_data *isp_pdata;
unsigned int harvest_config;
const struct firmware *fw;
+ struct generic_pm_domain ispgpd;
};
extern const struct amdgpu_ip_block_version isp_v4_1_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index ddb9d3269357..e6061d45f142 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -89,10 +89,10 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
- struct amdgpu_task_info *ti;
+ struct drm_wedge_task_info *info = NULL;
+ struct amdgpu_task_info *ti = NULL;
struct amdgpu_device *adev = ring->adev;
- int idx;
- int r;
+ int idx, r;
if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s",
@@ -112,6 +112,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_job_core_dump(adev, job);
if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_SOFT_RESET) &&
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
s_job->sched->name);
@@ -124,53 +125,30 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
if (ti) {
- dev_err(adev->dev,
- "Process information: process %s pid %d thread %s pid %d\n",
- ti->process_name, ti->tgid, ti->task_name, ti->pid);
- amdgpu_vm_put_task_info(ti);
+ amdgpu_vm_print_task_info(adev, ti);
+ info = &ti->task;
}
/* attempt a per ring reset */
if (unlikely(adev->debug_disable_gpu_ring_reset)) {
dev_err(adev->dev, "Ring reset disabled by debug mask\n");
- } else if (amdgpu_gpu_recovery && ring->funcs->reset) {
- bool is_guilty;
-
- dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name);
- /* stop the scheduler, but don't mess with the
- * bad job yet because if ring reset fails
- * we'll fall back to full GPU reset.
- */
- drm_sched_wqueue_stop(&ring->sched);
-
- /* for engine resets, we need to reset the engine,
- * but individual queues may be unaffected.
- * check here to make sure the accounting is correct.
- */
- if (ring->funcs->is_guilty)
- is_guilty = ring->funcs->is_guilty(ring);
- else
- is_guilty = true;
-
- if (is_guilty)
- dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
-
- r = amdgpu_ring_reset(ring, job->vmid);
+ } else if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) &&
+ ring->funcs->reset) {
+ dev_err(adev->dev, "Starting %s ring reset\n",
+ s_job->sched->name);
+ r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence);
if (!r) {
- if (amdgpu_ring_sched_ready(ring))
- drm_sched_stop(&ring->sched, s_job);
- if (is_guilty) {
- atomic_inc(&ring->adev->gpu_reset_counter);
- amdgpu_fence_driver_force_completion(ring);
- }
- if (amdgpu_ring_sched_ready(ring))
- drm_sched_start(&ring->sched, 0);
- dev_err(adev->dev, "Ring %s reset succeeded\n", ring->sched.name);
- drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
+ atomic_inc(&ring->adev->gpu_reset_counter);
+ dev_err(adev->dev, "Ring %s reset succeeded\n",
+ ring->sched.name);
+ drm_dev_wedged_event(adev_to_drm(adev),
+ DRM_WEDGE_RECOVERY_NONE, info);
goto exit;
}
- dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name);
+ dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name);
}
+
dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
if (amdgpu_device_should_recover_gpu(ring->adev)) {
@@ -198,13 +176,15 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
exit:
+ amdgpu_vm_put_task_info(ti);
drm_dev_exit(idx);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_sched_entity *entity, void *owner,
- unsigned int num_ibs, struct amdgpu_job **job)
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id)
{
if (num_ibs == 0)
return -EINVAL;
@@ -222,7 +202,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!entity)
return 0;
- return drm_sched_job_init(&(*job)->base, entity, 1, owner);
+ return drm_sched_job_init(&(*job)->base, entity, 1, owner,
+ drm_client_id);
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
@@ -232,7 +213,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
{
int r;
- r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
+ r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, 0);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 931fed8892cc..2f302266662b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -91,7 +91,8 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_sched_entity *entity, void *owner,
- unsigned int num_ibs, struct amdgpu_job **job);
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index dda29132dfb2..82d58ac7afb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -463,7 +463,8 @@ int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
adev->jpeg.ip_dump = kcalloc(adev->jpeg.num_jpeg_inst * count,
sizeof(uint32_t), GFP_KERNEL);
if (!adev->jpeg.ip_dump) {
- DRM_ERROR("Failed to allocate memory for JPEG IP Dump\n");
+ dev_err(adev->dev,
+ "Failed to allocate memory for JPEG IP Dump\n");
return -ENOMEM;
}
adev->jpeg.reg_list = reg;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d2ce7d86dbc8..8a76960803c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -91,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev->rmmio == NULL)
return;
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_UNLOAD))
DRM_WARN("smart shift update failed\n");
amdgpu_acpi_fini(adev);
@@ -161,7 +161,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
if (acpi_status)
dev_dbg(dev->dev, "Error during ACPI methods call\n");
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_LOAD))
DRM_WARN("smart shift update failed\n");
out:
@@ -399,6 +399,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
uint32_t ib_size_alignment = 0;
enum amd_ip_block_type type;
unsigned int num_rings = 0;
+ uint32_t num_slots = 0;
unsigned int i, j;
if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
@@ -411,6 +412,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->gfx.gfx_ring[i].sched.ready &&
!adev->gfx.gfx_ring[i].no_user_submission)
++num_rings;
+
+ if (!adev->gfx.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
+ num_slots += hweight32(adev->mes.gfx_hqd_mask[i]);
+ }
+
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
@@ -420,6 +427,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->gfx.compute_ring[i].sched.ready &&
!adev->gfx.compute_ring[i].no_user_submission)
++num_rings;
+
+ if (!adev->sdma.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++)
+ num_slots += hweight32(adev->mes.compute_hqd_mask[i]);
+ }
+
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
@@ -429,6 +442,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->sdma.instance[i].ring.sched.ready &&
!adev->sdma.instance[i].ring.no_user_submission)
++num_rings;
+
+ if (!adev->gfx.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++)
+ num_slots += hweight32(adev->mes.sdma_hqd_mask[i]);
+ }
+
ib_start_alignment = 256;
ib_size_alignment = 4;
break;
@@ -570,6 +589,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
}
result->capabilities_flags = 0;
result->available_rings = (1 << num_rings) - 1;
+ result->userq_num_slots = num_slots;
result->ib_start_alignment = ib_start_alignment;
result->ib_size_alignment = ib_size_alignment;
return 0;
@@ -1395,6 +1415,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (r)
goto error_pasid;
+ amdgpu_debugfs_vm_init(file_priv);
+
r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
if (r)
goto error_pasid;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 6fa9fa11c8f3..135598502c8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -47,7 +47,7 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
/* Bitmap for dynamic allocation of kernel doorbells */
mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
if (!mes->doorbell_bitmap) {
- DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
+ dev_err(adev->dev, "Failed to allocate MES doorbell bitmap\n");
return -ENOMEM;
}
@@ -256,7 +256,7 @@ int amdgpu_mes_suspend(struct amdgpu_device *adev)
r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to suspend all gangs");
+ dev_err(adev->dev, "failed to suspend all gangs");
return r;
}
@@ -280,7 +280,7 @@ int amdgpu_mes_resume(struct amdgpu_device *adev)
r = adev->mes.funcs->resume_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to resume all gangs");
+ dev_err(adev->dev, "failed to resume all gangs");
return r;
}
@@ -304,7 +304,7 @@ int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to map legacy queue\n");
+ dev_err(adev->dev, "failed to map legacy queue\n");
return r;
}
@@ -329,7 +329,7 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to unmap legacy queue\n");
+ dev_err(adev->dev, "failed to unmap legacy queue\n");
return r;
}
@@ -361,7 +361,7 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to reset legacy queue\n");
+ dev_err(adev->dev, "failed to reset legacy queue\n");
return r;
}
@@ -469,7 +469,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
int r;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes set shader debugger is not supported!\n");
+ dev_err(adev->dev,
+ "mes set shader debugger is not supported!\n");
return -EINVAL;
}
@@ -493,7 +494,7 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to set_shader_debugger\n");
+ dev_err(adev->dev, "failed to set_shader_debugger\n");
amdgpu_mes_unlock(&adev->mes);
@@ -507,7 +508,8 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
int r;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes flush shader debugger is not supported!\n");
+ dev_err(adev->dev,
+ "mes flush shader debugger is not supported!\n");
return -EINVAL;
}
@@ -519,7 +521,7 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to set_shader_debugger\n");
+ dev_err(adev->dev, "failed to set_shader_debugger\n");
amdgpu_mes_unlock(&adev->mes);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index d085687a47ea..e56ba93a8df6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -53,6 +53,15 @@ u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev)
return 0;
}
+bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev) || !adev->asic_funcs->get_pcie_replay_count ||
+ (!adev->nbio.funcs || !adev->nbio.funcs->get_pcie_replay_count))
+ return false;
+
+ return true;
+}
+
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 79c2f807b9fe..b528de6a01f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -119,4 +119,6 @@ int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev);
+bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 73403744331a..122a88294883 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -32,6 +32,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
+#include <linux/export.h>
#include <drm/drm_drv.h>
#include <drm/amdgpu_drm.h>
@@ -62,7 +63,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_kunmap(bo);
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
drm_gem_object_release(&bo->tbo.base);
amdgpu_bo_unref(&bo->parent);
@@ -351,7 +352,6 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
return 0;
}
-EXPORT_SYMBOL(amdgpu_bo_create_kernel);
/**
* amdgpu_bo_create_isp_user - create user BO for isp
@@ -420,7 +420,6 @@ error_unreserve:
return r;
}
-EXPORT_SYMBOL(amdgpu_bo_create_isp_user);
/**
* amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
@@ -524,7 +523,6 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
if (cpu_addr)
*cpu_addr = NULL;
}
-EXPORT_SYMBOL(amdgpu_bo_free_kernel);
/**
* amdgpu_bo_free_isp_user - free BO for isp use
@@ -547,7 +545,6 @@ void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo)
}
amdgpu_bo_unref(&bo);
}
-EXPORT_SYMBOL(amdgpu_bo_free_isp_user);
/* Validate bo size is bit bigger than the request domain */
static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
@@ -939,7 +936,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
domain = bo->preferred_domains & domain;
/* A shared bo cannot be migrated to VRAM */
- if (bo->tbo.base.import_attach) {
+ if (drm_gem_is_imported(&bo->tbo.base)) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else
@@ -967,7 +964,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
*/
domain = amdgpu_bo_get_preferred_domain(adev, domain);
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_pin(bo->tbo.base.import_attach);
/* force to pin into visible video ram */
@@ -1018,7 +1015,7 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
if (bo->tbo.pin_count)
return;
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_unpin(bo->tbo.base.import_attach);
if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
@@ -1263,7 +1260,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
amdgpu_bo_kunmap(abo);
- if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
+ if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) &&
old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
@@ -1473,6 +1470,26 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
}
/**
+ * amdgpu_bo_fb_aper_addr - return FB aperture GPU offset of the VRAM bo
+ * @bo: amdgpu VRAM buffer object for which we query the offset
+ *
+ * Returns:
+ * current FB aperture GPU offset of the object.
+ */
+u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ uint64_t offset, fb_base;
+
+ WARN_ON_ONCE(bo->tbo.resource->mem_type != TTM_PL_VRAM);
+
+ fb_base = adev->gmc.fb_start;
+ fb_base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ offset = (bo->tbo.resource->start << PAGE_SHIFT) + fb_base;
+ return amdgpu_gmc_sign_extend(offset);
+}
+
+/**
* amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 375448627f7b..c316920f3450 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -304,6 +304,7 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
bool intr);
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
+u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo);
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 98cc9c450192..0bd51a04be79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -252,6 +252,7 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block)
break;
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
+ adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
psp_v14_0_set_psp_funcs(psp);
break;
case IP_VERSION(14, 0, 5):
@@ -574,9 +575,11 @@ static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
-int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t reg_val, uint32_t mask, bool check_changed)
+int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val,
+ uint32_t mask, uint32_t flags)
{
+ bool check_changed = flags & PSP_WAITREG_CHANGED;
+ bool verbose = !(flags & PSP_WAITREG_NOVERBOSE);
uint32_t val;
int i;
struct amdgpu_device *adev = psp->adev;
@@ -596,6 +599,11 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
udelay(1);
}
+ if (verbose)
+ dev_err(adev->dev,
+ "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x",
+ reg_index, mask, val, reg_val);
+
return -ETIME;
}
@@ -654,6 +662,10 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
return "BOOT_CFG";
case GFX_CMD_ID_CONFIG_SQ_PERFMON:
return "CONFIG_SQ_PERFMON";
+ case GFX_CMD_ID_FB_FW_RESERV_ADDR:
+ return "FB_FW_RESERV_ADDR";
+ case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
+ return "FB_FW_RESERV_EXT_ADDR";
default:
return "UNKNOWN CMD";
}
@@ -871,6 +883,8 @@ static int psp_tmr_init(struct psp_context *psp)
&psp->tmr_bo, &psp->tmr_mc_addr,
pptr);
}
+ if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo)
+ psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo);
return ret;
}
@@ -984,6 +998,93 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
return ret;
}
+static int psp_get_fw_reservation_info(struct psp_context *psp,
+ uint32_t cmd_id,
+ uint64_t *addr,
+ uint32_t *size)
+{
+ int ret;
+ uint32_t status;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = cmd_id;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+ if (ret) {
+ release_psp_cmd_buf(psp);
+ return ret;
+ }
+
+ status = cmd->resp.status;
+ if (status == PSP_ERR_UNKNOWN_COMMAND) {
+ release_psp_cmd_buf(psp);
+ *addr = 0;
+ *size = 0;
+ return 0;
+ }
+
+ *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 |
+ cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo;
+ *size = cmd->resp.uresp.fw_reserve_info.reserve_size;
+
+ release_psp_cmd_buf(psp);
+
+ return 0;
+}
+
+int psp_update_fw_reservation(struct psp_context *psp)
+{
+ int ret;
+ uint64_t reserv_addr, reserv_addr_ext;
+ uint32_t reserv_size, reserv_size_ext;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 2)) &&
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 3)))
+ return 0;
+
+ ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
+ if (ret)
+ return ret;
+ ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext);
+ if (ret)
+ return ret;
+
+ if (reserv_addr != adev->gmc.real_vram_size - reserv_size) {
+ dev_warn(adev->dev, "reserve fw region is not valid!\n");
+ return 0;
+ }
+
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
+
+ reserv_size = roundup(reserv_size, SZ_1M);
+
+ ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL);
+ if (ret) {
+ dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
+ return ret;
+ }
+
+ reserv_size_ext = roundup(reserv_size_ext, SZ_1M);
+
+ ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext,
+ &adev->mman.fw_reserved_memory_extend, NULL);
+ if (ret) {
+ dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL);
+ return ret;
+ }
+
+ return 0;
+}
+
static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
{
struct psp_context *psp = &adev->psp;
@@ -1270,6 +1371,11 @@ int psp_ta_load(struct psp_context *psp, struct ta_context *context)
psp_copy_fw(psp, context->bin_desc.start_addr,
context->bin_desc.size_bytes);
+ if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) &&
+ context->mem_context.shared_bo)
+ context->mem_context.shared_mc_addr =
+ amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo);
+
psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
@@ -2337,11 +2443,27 @@ bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
return false;
}
+static void psp_update_gpu_addresses(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+
+ if (psp->cmd_buf_bo && psp->cmd_buf_mem) {
+ psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo);
+ psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo);
+ psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo);
+ }
+ if (adev->firmware.rbuf && psp->km_ring.ring_mem)
+ psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf);
+}
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
int ret;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev))
+ psp_update_gpu_addresses(adev);
+
if (!amdgpu_sriov_vf(adev)) {
if ((is_psp_fw_valid(psp->kdb)) &&
(psp->funcs->bootloader_load_kdb != NULL)) {
@@ -2440,6 +2562,14 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ ret = psp_update_fw_reservation(psp);
+ if (ret) {
+ dev_err(adev->dev, "update fw reservation failed!\n");
+ return ret;
+ }
+ }
+
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
goto skip_pin_bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 428adc7f741d..237b624aa51c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -51,6 +51,17 @@
#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI 0x10
#define C2PMSG_CMD_SPI_GET_FLASH_IMAGE 0x11
+/* Command register bit 31 set to indicate readiness */
+#define MBOX_TOS_READY_FLAG (GFX_FLAG_RESPONSE)
+#define MBOX_TOS_READY_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK)
+
+/* Values to check for a successful GFX_CMD response wait. Check against
+ * both status bits and response state - helps to detect a command failure
+ * or other unexpected cases like a device drop reading all 0xFFs
+ */
+#define MBOX_TOS_RESP_FLAG (GFX_FLAG_RESPONSE)
+#define MBOX_TOS_RESP_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK)
+
extern const struct attribute_group amdgpu_flash_attr_group;
enum psp_shared_mem_size {
@@ -123,6 +134,9 @@ enum psp_reg_prog_id {
PSP_REG_LAST
};
+#define PSP_WAITREG_CHANGED BIT(0) /* check if the value has changed */
+#define PSP_WAITREG_NOVERBOSE BIT(1) /* No error verbose */
+
struct psp_funcs {
int (*init_microcode)(struct psp_context *psp);
int (*wait_for_bootloader)(struct psp_context *psp);
@@ -521,8 +535,8 @@ extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block;
extern const struct amdgpu_ip_block_version psp_v14_0_ip_block;
-extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t field_val, uint32_t mask, bool check_changed);
+int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
+ uint32_t field_val, uint32_t mask, uint32_t flags);
extern int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
uint32_t field_val, uint32_t mask, uint32_t msec_timeout);
@@ -588,7 +602,7 @@ int psp_init_cap_microcode(struct psp_context *psp,
const char *chip_name);
int psp_get_fw_attestation_records_addr(struct psp_context *psp,
uint64_t *output_ptr);
-
+int psp_update_fw_reservation(struct psp_context *psp);
int psp_load_fw_list(struct psp_context *psp,
struct amdgpu_firmware_info **ucode_list, int ucode_count);
void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 9c8829bd5a58..540817e296da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1107,6 +1107,9 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
err_info->de_count, blk_name);
}
} else {
+ if (adev->debug_disable_ce_logs)
+ return;
+
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
@@ -2854,6 +2857,13 @@ static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
if (amdgpu_umc_pages_in_a_row(adev, err_data,
bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ err_data->err_addr[i].address = bps[0].address;
+ err_data->err_addr[i].mem_channel = bps[0].mem_channel;
+ err_data->err_addr[i].bank = bps[0].bank;
+ err_data->err_addr[i].err_type = bps[0].err_type;
+ err_data->err_addr[i].mcumc_id = bps[0].mcumc_id;
+ }
} else {
if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
return -EINVAL;
@@ -2885,6 +2895,7 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
struct eeprom_table_record *bps, struct ras_err_data *err_data,
enum amdgpu_memory_partition nps)
{
+ int i = 0;
enum amdgpu_memory_partition save_nps;
save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
@@ -2894,6 +2905,13 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
if (amdgpu_umc_pages_in_a_row(adev, err_data,
bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ err_data->err_addr[i].address = bps->address;
+ err_data->err_addr[i].mem_channel = bps->mem_channel;
+ err_data->err_addr[i].bank = bps->bank;
+ err_data->err_addr[i].err_type = bps->err_type;
+ err_data->err_addr[i].mcumc_id = bps->mcumc_id;
+ }
} else {
if (bps->address) {
if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
@@ -3003,6 +3021,15 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
return 0;
}
+ if (!con->eeprom_control.is_eeprom_valid) {
+ dev_warn(adev->dev,
+ "Failed to save EEPROM table data because of EEPROM data corruption!");
+ if (new_cnt)
+ *new_cnt = 0;
+
+ return 0;
+ }
+
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
@@ -3294,7 +3321,6 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
uint64_t de_queried_count;
uint32_t new_detect_count, total_detect_count;
uint32_t need_query_count = poison_creation_count;
- bool query_data_timeout = false;
enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
memset(&info, 0, sizeof(info));
@@ -3323,21 +3349,13 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
if (timeout) {
- if (!--timeout) {
- query_data_timeout = true;
+ if (!--timeout)
break;
- }
msleep(1);
}
}
} while (total_detect_count < need_query_count);
- if (query_data_timeout) {
- dev_warn(adev->dev, "Can't find deferred error! count: %u\n",
- (need_query_count - total_detect_count));
- return -ENOENT;
- }
-
if (total_detect_count)
schedule_delayed_work(&ras->page_retirement_dwork, 0);
@@ -3488,8 +3506,7 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
control = &con->eeprom_control;
ret = amdgpu_ras_eeprom_init(control);
- if (ret)
- return ret;
+ control->is_eeprom_valid = !ret;
if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
control->ras_num_pa_recs = control->ras_num_recs;
@@ -3498,10 +3515,12 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
adev->umc.ras->get_retire_flip_bits)
adev->umc.ras->get_retire_flip_bits(adev);
- if (control->ras_num_recs) {
+ if (control->ras_num_recs && control->is_eeprom_valid) {
ret = amdgpu_ras_load_bad_pages(adev);
- if (ret)
- return ret;
+ if (ret) {
+ control->is_eeprom_valid = false;
+ return 0;
+ }
amdgpu_dpm_send_hbm_bad_pages_num(
adev, control->ras_num_bad_pages);
@@ -3520,7 +3539,7 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
}
- return ret;
+ return 0;
}
int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
@@ -4414,8 +4433,10 @@ void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
struct amdgpu_ras *ras;
ras = amdgpu_ras_get_context(adev);
- if (ras)
+ if (ras) {
ras->ras_err_state = 0;
+ ras->gpu_reset_flags = 0;
+ }
}
void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 2c58e09e56f9..9bda9ad13f88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -277,10 +277,11 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control)
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Failed to write EEPROM table header:%d", res);
+ dev_err(adev->dev, "Failed to write EEPROM table header:%d",
+ res);
} else if (res < RAS_TABLE_HEADER_SIZE) {
- DRM_ERROR("Short write:%d out of %d\n",
- res, RAS_TABLE_HEADER_SIZE);
+ dev_err(adev->dev, "Short write:%d out of %d\n", res,
+ RAS_TABLE_HEADER_SIZE);
res = -EIO;
} else {
res = 0;
@@ -323,7 +324,8 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control)
buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Failed to alloc buf to write table ras info\n");
+ dev_err(adev->dev,
+ "Failed to alloc buf to write table ras info\n");
return -ENOMEM;
}
@@ -338,10 +340,11 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control)
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Failed to write EEPROM table ras info:%d", res);
+ dev_err(adev->dev, "Failed to write EEPROM table ras info:%d",
+ res);
} else if (res < RAS_TABLE_V2_1_INFO_SIZE) {
- DRM_ERROR("Short write:%d out of %d\n",
- res, RAS_TABLE_V2_1_INFO_SIZE);
+ dev_err(adev->dev, "Short write:%d out of %d\n", res,
+ RAS_TABLE_V2_1_INFO_SIZE);
res = -EIO;
} else {
res = 0;
@@ -476,6 +479,8 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
control->ras_num_recs = 0;
control->ras_num_bad_pages = 0;
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
control->ras_fri = 0;
amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_bad_pages);
@@ -607,13 +612,13 @@ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control,
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Writing %d EEPROM table records error:%d",
- num, res);
+ dev_err(adev->dev, "Writing %d EEPROM table records error:%d",
+ num, res);
} else if (res < buf_size) {
/* Short write, return error.
*/
- DRM_ERROR("Wrote %d records out of %d",
- res / RAS_TABLE_RECORD_SIZE, num);
+ dev_err(adev->dev, "Wrote %d records out of %d",
+ res / RAS_TABLE_RECORD_SIZE, num);
res = -EIO;
} else {
res = 0;
@@ -761,18 +766,17 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
- control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
- if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
- control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
- control->tbl_rai.health_percent = 0;
- }
-
if ((amdgpu_bad_page_threshold != -1) &&
- (amdgpu_bad_page_threshold != -2))
+ (amdgpu_bad_page_threshold != -2)) {
+ control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
+ control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
+ control->tbl_rai.health_percent = 0;
+ }
ras->is_rma = true;
-
- /* ignore the -ENOTSUPP return value */
- amdgpu_dpm_send_rma_reason(adev);
+ /* ignore the -ENOTSUPP return value */
+ amdgpu_dpm_send_rma_reason(adev);
+ }
}
if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
@@ -787,8 +791,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("allocating memory for table of size %d bytes failed\n",
- control->tbl_hdr.tbl_size);
+ dev_err(adev->dev,
+ "allocating memory for table of size %d bytes failed\n",
+ control->tbl_hdr.tbl_size);
res = -ENOMEM;
goto Out;
}
@@ -800,12 +805,11 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("EEPROM failed reading records:%d\n",
- res);
+ dev_err(adev->dev, "EEPROM failed reading records:%d\n", res);
goto Out;
} else if (res < buf_size) {
- DRM_ERROR("EEPROM read %d out of %d bytes\n",
- res, buf_size);
+ dev_err(adev->dev, "EEPROM read %d out of %d bytes\n", res,
+ buf_size);
res = -EIO;
goto Out;
}
@@ -866,11 +870,12 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
return 0;
if (num == 0) {
- DRM_ERROR("will not append 0 records\n");
+ dev_err(adev->dev, "will not append 0 records\n");
return -EINVAL;
} else if (num > control->ras_max_record_count) {
- DRM_ERROR("cannot append %d records than the size of table %d\n",
- num, control->ras_max_record_count);
+ dev_err(adev->dev,
+ "cannot append %d records than the size of table %d\n",
+ num, control->ras_max_record_count);
return -EINVAL;
}
@@ -924,13 +929,13 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Reading %d EEPROM table records error:%d",
- num, res);
+ dev_err(adev->dev, "Reading %d EEPROM table records error:%d",
+ num, res);
} else if (res < buf_size) {
/* Short read, return error.
*/
- DRM_ERROR("Read %d records out of %d",
- res / RAS_TABLE_RECORD_SIZE, num);
+ dev_err(adev->dev, "Read %d records out of %d",
+ res / RAS_TABLE_RECORD_SIZE, num);
res = -EIO;
} else {
res = 0;
@@ -964,11 +969,11 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
return 0;
if (num == 0) {
- DRM_ERROR("will not read 0 records\n");
+ dev_err(adev->dev, "will not read 0 records\n");
return -EINVAL;
} else if (num > control->ras_num_recs) {
- DRM_ERROR("too many records to read:%d available:%d\n",
- num, control->ras_num_recs);
+ dev_err(adev->dev, "too many records to read:%d available:%d\n",
+ num, control->ras_num_recs);
return -EINVAL;
}
@@ -1300,7 +1305,8 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Out of memory checking RAS table checksum.\n");
+ dev_err(adev->dev,
+ "Out of memory checking RAS table checksum.\n");
return -ENOMEM;
}
@@ -1309,7 +1315,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
control->ras_header_offset,
buf, buf_size);
if (res < buf_size) {
- DRM_ERROR("Partial read for checksum, res:%d\n", res);
+ dev_err(adev->dev, "Partial read for checksum, res:%d\n", res);
/* On partial reads, return -EIO.
*/
if (res >= 0)
@@ -1334,7 +1340,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control)
buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Failed to alloc buf to read EEPROM table ras info\n");
+ dev_err(adev->dev,
+ "Failed to alloc buf to read EEPROM table ras info\n");
return -ENOMEM;
}
@@ -1346,7 +1353,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control)
control->i2c_address + control->ras_info_offset,
buf, RAS_TABLE_V2_1_INFO_SIZE);
if (res < RAS_TABLE_V2_1_INFO_SIZE) {
- DRM_ERROR("Failed to read EEPROM table ras info, res:%d", res);
+ dev_err(adev->dev,
+ "Failed to read EEPROM table ras info, res:%d", res);
res = res >= 0 ? -EIO : res;
goto Out;
}
@@ -1387,7 +1395,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
control->i2c_address + control->ras_header_offset,
buf, RAS_TABLE_HEADER_SIZE);
if (res < RAS_TABLE_HEADER_SIZE) {
- DRM_ERROR("Failed to read EEPROM table header, res:%d", res);
+ dev_err(adev->dev, "Failed to read EEPROM table header, res:%d",
+ res);
return res >= 0 ? -EIO : res;
}
@@ -1452,8 +1461,9 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
control->ras_num_mca_recs * adev->umc.retire_unit;
if (hdr->header == RAS_TABLE_HDR_VAL) {
- DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
- control->ras_num_bad_pages);
+ dev_dbg(adev->dev,
+ "Found existing EEPROM table with %d records",
+ control->ras_num_bad_pages);
if (hdr->version >= RAS_TABLE_VER_V2_1) {
res = __read_table_ras_info(control);
@@ -1521,3 +1531,31 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
return res < 0 ? res : 0;
}
+
+void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control;
+ int res;
+
+ if (!__is_ras_eeprom_supported(adev) || !ras)
+ return;
+ control = &ras->eeprom_control;
+ if (!control->is_eeprom_valid)
+ return;
+ res = __verify_ras_table_checksum(control);
+ if (res) {
+ dev_warn(adev->dev,
+ "RAS table incorrect checksum or error:%d, try to recover\n",
+ res);
+ if (!amdgpu_ras_eeprom_reset_table(control))
+ if (!amdgpu_ras_save_bad_pages(adev, NULL))
+ if (!__verify_ras_table_checksum(control)) {
+ dev_info(adev->dev, "RAS table recovery succeed\n");
+ return;
+ }
+ dev_err(adev->dev, "RAS table recovery failed\n");
+ control->is_eeprom_valid = false;
+ }
+ return;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index ec6d7ea37ad0..ebfca4cb5688 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -114,6 +114,8 @@ struct amdgpu_ras_eeprom_control {
/* Record channel info which occurred bad pages
*/
u32 bad_channel_bitmap;
+
+ bool is_eeprom_valid;
};
/*
@@ -159,6 +161,8 @@ void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control);
+void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev);
+
extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops;
extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 6ac0ce361a2d..a5c3f64cbce6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -99,6 +99,29 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
return 0;
}
+/**
+ * amdgpu_ring_alloc_reemit - allocate space on the ring buffer for reemit
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Allocate @ndw dwords in the ring buffer (all asics).
+ * doesn't check the max_dw limit as we may be reemitting
+ * several submissions.
+ */
+static void amdgpu_ring_alloc_reemit(struct amdgpu_ring *ring, unsigned int ndw)
+{
+ /* Align requested size with padding so unlock_commit can
+ * pad safely */
+ ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
+
+ ring->count_dw = ndw;
+ ring->wptr_old = ring->wptr;
+
+ if (ring->funcs->begin_use)
+ ring->funcs->begin_use(ring);
+}
+
/** amdgpu_ring_insert_nop - insert NOP packets
*
* @ring: amdgpu_ring structure holding ring information
@@ -333,6 +356,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
/* Initialize cached_rptr to 0 */
ring->cached_rptr = 0;
+ if (!ring->ring_backup) {
+ ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL);
+ if (!ring->ring_backup)
+ return -ENOMEM;
+ }
+
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
@@ -342,6 +371,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
(void **)&ring->ring);
if (r) {
dev_err(adev->dev, "(%d) ring create failed\n", r);
+ kvfree(ring->ring_backup);
return r;
}
amdgpu_ring_clear_ring(ring);
@@ -385,6 +415,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
amdgpu_bo_free_kernel(&ring->ring_obj,
&ring->gpu_addr,
(void **)&ring->ring);
+ kvfree(ring->ring_backup);
+ ring->ring_backup = NULL;
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
@@ -758,3 +790,69 @@ bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
return true;
}
+
+void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ /* Stop the scheduler to prevent anybody else from touching the ring buffer. */
+ drm_sched_wqueue_stop(&ring->sched);
+ /* back up the non-guilty commands */
+ amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence);
+}
+
+int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ unsigned int i;
+ int r;
+
+ /* verify that the ring is functional */
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ return r;
+
+ /* signal the fence of the bad job */
+ if (guilty_fence)
+ amdgpu_fence_driver_guilty_force_completion(guilty_fence);
+ /* Re-emit the non-guilty commands */
+ if (ring->ring_backup_entries_to_copy) {
+ amdgpu_ring_alloc_reemit(ring, ring->ring_backup_entries_to_copy);
+ for (i = 0; i < ring->ring_backup_entries_to_copy; i++)
+ amdgpu_ring_write(ring, ring->ring_backup[i]);
+ amdgpu_ring_commit(ring);
+ }
+ /* Start the scheduler again */
+ drm_sched_wqueue_start(&ring->sched);
+ return 0;
+}
+
+bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
+ u32 reset_type)
+{
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ if (ring->adev->gfx.gfx_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ if (ring->adev->gfx.compute_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ if (ring->adev->sdma.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_DEC:
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ if (ring->adev->vcn.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ if (ring->adev->jpeg.supported_reset & reset_type)
+ return true;
+ break;
+ default:
+ break;
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index e1f25218943a..7670f5d82b9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -118,6 +118,7 @@ struct amdgpu_fence_driver {
/* sync_seq is protected by ring emission lock */
uint32_t sync_seq;
atomic_t last_seq;
+ u64 signalled_wptr;
bool initialized;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
@@ -141,6 +142,12 @@ struct amdgpu_fence {
/* RB, DMA, etc. */
struct amdgpu_ring *ring;
ktime_t start_timestamp;
+
+ /* wptr for the fence for resets */
+ u64 wptr;
+ /* fence context for resets */
+ u64 context;
+ uint32_t seq;
};
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
@@ -148,6 +155,8 @@ extern const struct drm_sched_backend_ops amdgpu_sched_ops;
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence);
+void amdgpu_fence_save_wptr(struct dma_fence *fence);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
@@ -157,8 +166,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
- unsigned flags);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+ struct amdgpu_fence *af, unsigned int flags);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
uint32_t timeout);
bool amdgpu_fence_process(struct amdgpu_ring *ring);
@@ -268,9 +277,9 @@ struct amdgpu_ring_funcs {
void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
- int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
+ int (*reset)(struct amdgpu_ring *ring, unsigned int vmid,
+ struct amdgpu_fence *timedout_fence);
void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
- bool (*is_guilty)(struct amdgpu_ring *ring);
};
/**
@@ -284,6 +293,9 @@ struct amdgpu_ring {
struct amdgpu_bo *ring_obj;
uint32_t *ring;
+ /* backups for resets */
+ uint32_t *ring_backup;
+ unsigned int ring_backup_entries_to_copy;
unsigned rptr_offs;
u64 rptr_gpu_addr;
volatile u32 *rptr_cpu_addr;
@@ -425,7 +437,7 @@ struct amdgpu_ring {
#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
-#define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v))
+#define amdgpu_ring_reset(r, v, f) (r)->funcs->reset((r), (v), (f))
unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
@@ -550,4 +562,12 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
+void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
+ u32 reset_type);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 9b54a1ece447..8b8a04138711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -534,71 +534,48 @@ bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_rin
static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
{
struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
- int r = -EOPNOTSUPP;
-
- switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
- case IP_VERSION(4, 4, 2):
- case IP_VERSION(4, 4, 4):
- case IP_VERSION(4, 4, 5):
- /* For SDMA 4.x, use the existing DPM interface for backward compatibility,
- * we need to convert the logical instance ID to physical instance ID before reset.
- */
- r = amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, instance_id));
- break;
- case IP_VERSION(5, 0, 0):
- case IP_VERSION(5, 0, 1):
- case IP_VERSION(5, 0, 2):
- case IP_VERSION(5, 0, 5):
- case IP_VERSION(5, 2, 0):
- case IP_VERSION(5, 2, 2):
- case IP_VERSION(5, 2, 4):
- case IP_VERSION(5, 2, 5):
- case IP_VERSION(5, 2, 6):
- case IP_VERSION(5, 2, 3):
- case IP_VERSION(5, 2, 1):
- case IP_VERSION(5, 2, 7):
- if (sdma_instance->funcs->soft_reset_kernel_queue)
- r = sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id);
- break;
- default:
- break;
- }
- return r;
+ if (sdma_instance->funcs->soft_reset_kernel_queue)
+ return sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id);
+
+ return -EOPNOTSUPP;
}
/**
* amdgpu_sdma_reset_engine - Reset a specific SDMA engine
* @adev: Pointer to the AMDGPU device
* @instance_id: Logical ID of the SDMA engine instance to reset
+ * @caller_handles_kernel_queues: Skip kernel queue processing. Caller
+ * will handle it.
*
* Returns: 0 on success, or a negative error code on failure.
*/
-int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id,
+ bool caller_handles_kernel_queues)
{
int ret = 0;
struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
struct amdgpu_ring *page_ring = &sdma_instance->page;
- bool gfx_sched_stopped = false, page_sched_stopped = false;
mutex_lock(&sdma_instance->engine_reset_mutex);
- /* Stop the scheduler's work queue for the GFX and page rings if they are running.
- * This ensures that no new tasks are submitted to the queues while
- * the reset is in progress.
- */
- if (!amdgpu_ring_sched_ready(gfx_ring)) {
+
+ if (!caller_handles_kernel_queues) {
+ /* Stop the scheduler's work queue for the GFX and page rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
drm_sched_wqueue_stop(&gfx_ring->sched);
- gfx_sched_stopped = true;
- }
- if (adev->sdma.has_page_queue && !amdgpu_ring_sched_ready(page_ring)) {
- drm_sched_wqueue_stop(&page_ring->sched);
- page_sched_stopped = true;
+ if (adev->sdma.has_page_queue)
+ drm_sched_wqueue_stop(&page_ring->sched);
}
- if (sdma_instance->funcs->stop_kernel_queue)
+ if (sdma_instance->funcs->stop_kernel_queue) {
sdma_instance->funcs->stop_kernel_queue(gfx_ring);
+ if (adev->sdma.has_page_queue)
+ sdma_instance->funcs->stop_kernel_queue(page_ring);
+ }
/* Perform the SDMA reset for the specified instance */
ret = amdgpu_sdma_soft_reset(adev, instance_id);
@@ -607,20 +584,25 @@ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
goto exit;
}
- if (sdma_instance->funcs->start_kernel_queue)
+ if (sdma_instance->funcs->start_kernel_queue) {
sdma_instance->funcs->start_kernel_queue(gfx_ring);
+ if (adev->sdma.has_page_queue)
+ sdma_instance->funcs->start_kernel_queue(page_ring);
+ }
exit:
- /* Restart the scheduler's work queue for the GFX and page rings
- * if they were stopped by this function. This allows new tasks
- * to be submitted to the queues after the reset is complete.
- */
- if (!ret) {
- if (gfx_sched_stopped && amdgpu_ring_sched_ready(gfx_ring)) {
+ if (!caller_handles_kernel_queues) {
+ /* Restart the scheduler's work queue for the GFX and page rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ if (!ret) {
+ amdgpu_fence_driver_force_completion(gfx_ring);
drm_sched_wqueue_start(&gfx_ring->sched);
- }
- if (page_sched_stopped && amdgpu_ring_sched_ready(page_ring)) {
- drm_sched_wqueue_start(&page_ring->sched);
+ if (adev->sdma.has_page_queue) {
+ amdgpu_fence_driver_force_completion(page_ring);
+ drm_sched_wqueue_start(&page_ring->sched);
+ }
}
}
mutex_unlock(&sdma_instance->engine_reset_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index e5f8951bbb6f..34311f32be4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -172,7 +172,8 @@ struct amdgpu_buffer_funcs {
uint32_t byte_count);
};
-int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id);
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id,
+ bool caller_handles_kernel_queues);
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 11dd2e0f7979..d13e64a69e25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -167,25 +167,23 @@ TRACE_EVENT(amdgpu_cs_ioctl,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
+ __field(u64, context)
+ __field(u64, seqno)
__field(struct dma_fence *, fence)
__string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
__assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
__assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
__entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -193,24 +191,22 @@ TRACE_EVENT(amdgpu_sched_run_job,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
+ __field(u64, context)
+ __field(u64, seqno)
__string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
__assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
__assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
__entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -551,23 +547,19 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
__string(ring, sched_job->base.sched->name)
- __field(uint64_t, id)
__field(struct dma_fence *, fence)
- __field(uint64_t, ctx)
- __field(unsigned, seqno)
+ __field(u64, ctx)
+ __field(u64, seqno)
),
TP_fast_assign(
__assign_str(ring);
- __entry->id = sched_job->base.id;
__entry->fence = fence;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
- TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
- __get_str(ring), __entry->id,
- __entry->fence, __entry->ctx,
- __entry->seqno)
+ TP_printk("job ring=%s need pipe sync to fence=%llu:%llu",
+ __get_str(ring), __entry->ctx, __entry->seqno)
);
TRACE_EVENT(amdgpu_reset_reg_dumps,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9c5df35f05b7..27ab4e754b2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -299,7 +299,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_bo *abo_src, *abo_dst;
if (!adev->mman.buffer_funcs_enabled) {
- DRM_ERROR("Trying to move memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to move memory with ring turned off.\n");
return -EINVAL;
}
@@ -934,7 +935,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
if (gtt->userptr) {
r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
if (r) {
- DRM_ERROR("failed to pin userptr\n");
+ dev_err(adev->dev, "failed to pin userptr\n");
return r;
}
} else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
@@ -1060,7 +1061,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr) {
amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
- } else if (ttm->sg && gtt->gobj->import_attach) {
+ } else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) {
struct dma_buf_attachment *attach;
attach = gtt->gobj->import_attach;
@@ -1781,7 +1782,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
&ctx->c2p_bo,
NULL);
if (ret) {
- DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
+ dev_err(adev->dev, "alloc c2p_bo failed(%d)!\n", ret);
amdgpu_ttm_training_reserve_vram_fini(adev);
return ret;
}
@@ -1793,7 +1794,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
adev, adev->gmc.real_vram_size - reserve_size,
reserve_size, &adev->mman.fw_reserved_memory, NULL);
if (ret) {
- DRM_ERROR("alloc tmr failed(%d)!\n", ret);
+ dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
NULL, NULL);
return ret;
@@ -1864,13 +1865,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
adev->need_swiotlb,
dma_addressing_limited(adev->dev));
if (r) {
- DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+ dev_err(adev->dev,
+ "failed initializing buffer object driver(%d).\n", r);
return r;
}
r = amdgpu_ttm_pools_init(adev);
if (r) {
- DRM_ERROR("failed to init ttm pools(%d).\n", r);
+ dev_err(adev->dev, "failed to init ttm pools(%d).\n", r);
return r;
}
adev->mman.initialized = true;
@@ -1878,7 +1880,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Initialize VRAM pool with all of VRAM divided into pages */
r = amdgpu_vram_mgr_init(adev);
if (r) {
- DRM_ERROR("Failed initializing VRAM heap.\n");
+ dev_err(adev->dev, "Failed initializing VRAM heap.\n");
return r;
}
@@ -1958,7 +1960,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n");
}
- DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
+ dev_info(adev->dev, "amdgpu: %uM of VRAM memory ready\n",
(unsigned int)(adev->gmc.real_vram_size / (1024 * 1024)));
/* Compute GTT size, either based on TTM limit
@@ -1981,10 +1983,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Initialize GTT memory pool */
r = amdgpu_gtt_mgr_init(adev, gtt_size);
if (r) {
- DRM_ERROR("Failed initializing GTT heap.\n");
+ dev_err(adev->dev, "Failed initializing GTT heap.\n");
return r;
}
- DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
+ dev_info(adev->dev, "amdgpu: %uM of GTT memory ready.\n",
(unsigned int)(gtt_size / (1024 * 1024)));
if (adev->flags & AMD_IS_APU) {
@@ -1995,40 +1997,40 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Initialize doorbell pool on PCI BAR */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
if (r) {
- DRM_ERROR("Failed initializing doorbell heap.\n");
+ dev_err(adev->dev, "Failed initializing doorbell heap.\n");
return r;
}
/* Create a boorbell page for kernel usages */
r = amdgpu_doorbell_create_kernel_doorbells(adev);
if (r) {
- DRM_ERROR("Failed to initialize kernel doorbells.\n");
+ dev_err(adev->dev, "Failed to initialize kernel doorbells.\n");
return r;
}
/* Initialize preemptible memory pool */
r = amdgpu_preempt_mgr_init(adev);
if (r) {
- DRM_ERROR("Failed initializing PREEMPT heap.\n");
+ dev_err(adev->dev, "Failed initializing PREEMPT heap.\n");
return r;
}
/* Initialize various on-chip memory pools */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
if (r) {
- DRM_ERROR("Failed initializing GDS heap.\n");
+ dev_err(adev->dev, "Failed initializing GDS heap.\n");
return r;
}
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
if (r) {
- DRM_ERROR("Failed initializing gws heap.\n");
+ dev_err(adev->dev, "Failed initializing gws heap.\n");
return r;
}
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
if (r) {
- DRM_ERROR("Failed initializing oa heap.\n");
+ dev_err(adev->dev, "Failed initializing oa heap.\n");
return r;
}
if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
@@ -2060,6 +2062,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
/* return the FW reserved memory back to VRAM */
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
NULL);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL,
+ NULL);
if (adev->mman.stolen_reserved_size)
amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
NULL, NULL);
@@ -2089,7 +2093,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
- DRM_INFO("amdgpu: ttm finalized\n");
+ dev_info(adev->dev, "amdgpu: ttm finalized\n");
}
/**
@@ -2121,8 +2125,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL);
if (r) {
- DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
- r);
+ dev_err(adev->dev,
+ "Failed setting up TTM BO move entity (%d)\n",
+ r);
return;
}
@@ -2130,8 +2135,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
DRM_SCHED_PRIORITY_NORMAL, &sched,
1, NULL);
if (r) {
- DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
- r);
+ dev_err(adev->dev,
+ "Failed setting up TTM BO move entity (%d)\n",
+ r);
goto error_free_entity;
}
} else {
@@ -2202,7 +2208,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
int r;
if (!direct_submit && !ring->sched.ready) {
- DRM_ERROR("Trying to move memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to move memory with ring turned off.\n");
return -EINVAL;
}
@@ -2237,7 +2244,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
error_free:
amdgpu_job_free(job);
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ dev_err(adev->dev, "Error scheduling IBs (%d)\n", r);
return r;
}
@@ -2356,7 +2363,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
int r;
if (!adev->mman.buffer_funcs_enabled) {
- DRM_ERROR("Trying to clear memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to clear memory with ring turned off.\n");
return -EINVAL;
}
@@ -2416,7 +2424,7 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
man = ttm_manager_type(&adev->mman.bdev, mem_type);
break;
default:
- DRM_ERROR("Trying to evict invalid memory type\n");
+ dev_err(adev->dev, "Trying to evict invalid memory type\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 450e4bf093b7..2309df3f68a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -86,6 +86,7 @@ struct amdgpu_mman {
uint32_t discovery_tmr_size;
/* fw reserved memory */
struct amdgpu_bo *fw_reserved_memory;
+ struct amdgpu_bo *fw_reserved_memory_extend;
/* firmware VRAM reservation */
u64 fw_vram_usage_start_offset;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index eaddc441c51a..a0b50a8ac9c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -1159,6 +1159,9 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM;
}
+ if (amdgpu_virt_xgmi_migrate_enabled(adev) && adev->firmware.fw_buf)
+ adev->firmware.fw_buf_mc = amdgpu_bo_fb_aper_addr(adev->firmware.fw_buf);
+
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
@@ -1397,8 +1400,8 @@ bool amdgpu_is_kicker_fw(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) {
if (adev->pdev->device == kicker_device_list[i].device &&
- adev->pdev->revision == kicker_device_list[i].revision)
- return true;
+ adev->pdev->revision == kicker_device_list[i].revision)
+ return true;
}
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 295e7186e156..c3ace8030530 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -318,6 +318,10 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
amdgpu_bo_unreserve(queue->db_obj.obj);
}
amdgpu_bo_unref(&queue->db_obj.obj);
+
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_remove_recursive(queue->debugfs_queue);
+#endif
r = amdgpu_userq_unmap_helper(uq_mgr, queue);
amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
mutex_unlock(&uq_mgr->userq_mutex);
@@ -343,6 +347,46 @@ static int amdgpu_userq_priority_permit(struct drm_file *filp,
return -EACCES;
}
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
+{
+ struct amdgpu_usermode_queue *queue = m->private;
+ struct amdgpu_bo *bo;
+ int r;
+
+ if (!queue || !queue->mqd.obj)
+ return -EINVAL;
+
+ bo = amdgpu_bo_ref(queue->mqd.obj);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ amdgpu_bo_unref(&bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "queue_type %d\n", queue->queue_type);
+ seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
+
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+}
+
+static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_mqd_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_mqd_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_mqd_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
static int
amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
{
@@ -352,6 +396,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
const struct amdgpu_userq_funcs *uq_funcs;
struct amdgpu_usermode_queue *queue;
struct amdgpu_db_info db_info;
+ char *queue_name;
bool skip_map_queue;
uint64_t index;
int qid, r = 0;
@@ -475,6 +520,18 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
}
}
+ queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid);
+ if (!queue_name) {
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+#if defined(CONFIG_DEBUG_FS)
+ /* Queue dentry per client to hold MQD information */
+ queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client);
+ debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
+#endif
+ kfree(queue_name);
args->out.queue_id = qid;
@@ -664,7 +721,7 @@ static void amdgpu_userq_restore_worker(struct work_struct *work)
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
int ret;
- flush_work(&fpriv->evf_mgr.suspend_work.work);
+ flush_delayed_work(&fpriv->evf_mgr.suspend_work);
mutex_lock(&uq_mgr->userq_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index ec040c2fd6c9..b1ca91b7cda4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -65,6 +65,7 @@ struct amdgpu_usermode_queue {
struct dma_fence *last_fence;
u32 xcp_id;
int priority;
+ struct dentry *debugfs_queue;
};
struct amdgpu_userq_funcs {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index a86616c6deef..c2a983ff23c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -239,8 +239,8 @@ static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
fence = &userq_fence->base;
userq_fence->fence_drv = fence_drv;
- dma_fence_init(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
- fence_drv->context, seq);
+ dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
+ fence_drv->context, seq);
amdgpu_userq_fence_driver_get(fence_drv);
dma_fence_get(fence);
@@ -334,7 +334,6 @@ static void amdgpu_userq_fence_release(struct dma_fence *f)
}
static const struct dma_fence_ops amdgpu_userq_fence_ops = {
- .use_64bit_seqno = true,
.get_driver_name = amdgpu_userq_fence_get_driver_name,
.get_timeline_name = amdgpu_userq_fence_get_timeline_name,
.signaled = amdgpu_userq_fence_signaled,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index c8885c3d54b3..f1f67521c29c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -134,6 +134,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
mutex_init(&adev->vcn.inst[i].vcn_pg_lock);
+ mutex_init(&adev->vcn.inst[i].engine_reset_mutex);
atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0);
INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler);
atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
@@ -1451,3 +1452,78 @@ int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
return ret;
}
+
+/**
+ * amdgpu_vcn_reset_engine - Reset a specific VCN engine
+ * @adev: Pointer to the AMDGPU device
+ * @instance_id: VCN engine instance to reset
+ *
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+static int amdgpu_vcn_reset_engine(struct amdgpu_device *adev,
+ uint32_t instance_id)
+{
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[instance_id];
+ int r, i;
+
+ mutex_lock(&vinst->engine_reset_mutex);
+ /* Stop the scheduler's work queue for the dec and enc rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
+ drm_sched_wqueue_stop(&vinst->ring_dec.sched);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ drm_sched_wqueue_stop(&vinst->ring_enc[i].sched);
+
+ /* Perform the VCN reset for the specified instance */
+ r = vinst->reset(vinst);
+ if (r)
+ goto unlock;
+ r = amdgpu_ring_test_ring(&vinst->ring_dec);
+ if (r)
+ goto unlock;
+ for (i = 0; i < vinst->num_enc_rings; i++) {
+ r = amdgpu_ring_test_ring(&vinst->ring_enc[i]);
+ if (r)
+ goto unlock;
+ }
+ amdgpu_fence_driver_force_completion(&vinst->ring_dec);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ amdgpu_fence_driver_force_completion(&vinst->ring_enc[i]);
+
+ /* Restart the scheduler's work queue for the dec and enc rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ drm_sched_wqueue_start(&vinst->ring_dec.sched);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ drm_sched_wqueue_start(&vinst->ring_enc[i].sched);
+
+unlock:
+ mutex_unlock(&vinst->engine_reset_mutex);
+
+ return r;
+}
+
+/**
+ * amdgpu_vcn_ring_reset - Reset a VCN ring
+ * @ring: ring to reset
+ * @vmid: vmid of guilty job
+ * @timedout_fence: fence of timed out job
+ *
+ * This helper is for VCN blocks without unified queues because
+ * resetting the engine resets all queues in that case. With
+ * unified queues we have one queue per engine.
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (adev->vcn.inst[ring->me].using_unified_queue)
+ return -EINVAL;
+
+ return amdgpu_vcn_reset_engine(adev, ring->me);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 83adf81defc7..0bc0a94d7cf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -330,7 +330,9 @@ struct amdgpu_vcn_inst {
struct dpg_pause_state *new_state);
int (*set_pg_state)(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state);
+ int (*reset)(struct amdgpu_vcn_inst *vinst);
bool using_unified_queue;
+ struct mutex engine_reset_mutex;
};
struct amdgpu_vcn_ras {
@@ -552,5 +554,7 @@ void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev);
int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
-
+int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *guilty_fence);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 577c6194db78..3da3ebb1d9a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -152,8 +152,10 @@ enum AMDGIM_REG_ACCESS_FLAG {
AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
/* Use RLC to program GC regs */
AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
- /* Use PSP to program L1_TLB_CNTL*/
+ /* Use PSP to program L1_TLB_CNTL */
AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN = (1 << 3),
+ /* Use RLCG to program SQ_CONFIG1 */
+ AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG = (1 << 4),
};
struct amdgim_pf2vf_info_v1 {
@@ -301,6 +303,9 @@ struct amdgpu_virt {
union amd_sriov_ras_caps ras_telemetry_en_caps;
struct amdgpu_virt_ras ras;
struct amd_sriov_ras_telemetry_error_count count_cache;
+
+ /* hibernate and resume with different VF feature for xgmi enabled system */
+ bool is_xgmi_node_migrate_enabled;
};
struct amdgpu_video_codec_info;
@@ -343,6 +348,10 @@ struct amdgpu_video_codec_info;
#define amdgpu_sriov_rlcg_error_report_enabled(adev) \
(amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
+#define amdgpu_sriov_reg_access_sq_config(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG)))
+
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
@@ -386,6 +395,10 @@ static inline bool is_virtual_machine(void)
((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE)
#define amdgpu_sriov_is_mes_info_enable(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE)
+
+#define amdgpu_virt_xgmi_migrate_enabled(adev) \
+ ((adev)->virt.is_xgmi_node_migrate_enabled && (adev)->gmc.xgmi.node_segment_size != 0)
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 3911c78f8282..d5c0637d7392 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -622,7 +622,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
pr_warn_ratelimited("Evicted user BO is not reserved\n");
if (ti) {
- pr_warn_ratelimited("pid %d\n", ti->pid);
+ pr_warn_ratelimited("pid %d\n", ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
@@ -765,6 +765,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool cleaner_shader_needed = false;
bool pasid_mapping_needed = false;
struct dma_fence *fence = NULL;
+ struct amdgpu_fence *af;
unsigned int patch;
int r;
@@ -830,6 +831,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
r = amdgpu_fence_emit(ring, &fence, NULL, 0);
if (r)
return r;
+ /* this is part of the job's context */
+ af = container_of(fence, struct amdgpu_fence, base);
+ af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
}
if (vm_flush_needed) {
@@ -1271,8 +1275,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
} else {
struct drm_gem_object *obj = &bo->tbo.base;
- if (obj->import_attach && bo_va->is_xgmi) {
- struct dma_buf *dma_buf = obj->import_attach->dmabuf;
+ if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
+ struct dma_buf *dma_buf = obj->dma_buf;
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
@@ -1631,7 +1635,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
* validation
*/
if (vm->is_compute_context &&
- bo_va->base.bo->tbo.base.import_attach &&
+ drm_gem_is_imported(&bo_va->base.bo->tbo.base) &&
(!bo_va->base.bo->tbo.resource ||
bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
amdgpu_vm_bo_evicted_user(&bo_va->base);
@@ -2395,10 +2399,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
else
adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
- DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
- vm_size, adev->vm_manager.num_level + 1,
- adev->vm_manager.block_size,
- adev->vm_manager.fragment_size);
+ dev_info(
+ adev->dev,
+ "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
+ vm_size, adev->vm_manager.num_level + 1,
+ adev->vm_manager.block_size, adev->vm_manager.fragment_size);
}
/**
@@ -2447,7 +2452,8 @@ amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
*/
void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
{
- kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
+ if (task_info)
+ kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
}
/**
@@ -2507,11 +2513,11 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
if (!vm->task_info)
return;
- if (vm->task_info->pid == current->pid)
+ if (vm->task_info->task.pid == current->pid)
return;
- vm->task_info->pid = current->pid;
- get_task_comm(vm->task_info->task_name, current);
+ vm->task_info->task.pid = current->pid;
+ get_task_comm(vm->task_info->task.comm, current);
if (current->group_leader->mm != current->mm)
return;
@@ -2564,8 +2570,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_GFX);
- DRM_DEBUG_DRIVER("VM update mode is %s\n",
- vm->use_cpu_for_update ? "CPU" : "SDMA");
+ dev_dbg(adev->dev, "VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update &&
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
@@ -2607,7 +2613,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
r = amdgpu_vm_create_task_info(vm);
if (r)
- DRM_DEBUG("Failed to create task info for VM\n");
+ dev_dbg(adev->dev, "Failed to create task info for VM\n");
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
@@ -2658,8 +2664,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
/* Update VM state */
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
- DRM_DEBUG_DRIVER("VM update mode is %s\n",
- vm->use_cpu_for_update ? "CPU" : "SDMA");
+ dev_dbg(adev->dev, "VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update &&
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
@@ -2774,7 +2780,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dev_warn(adev->dev,
"VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
- ti->process_name, ti->pid, ti->task_name, ti->tgid);
+ ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
}
amdgpu_vm_put_task_info(vm->task_info);
@@ -2982,7 +2988,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
error_unlock:
amdgpu_bo_unreserve(root);
if (r < 0)
- DRM_ERROR("Can't handle page fault (%d)\n", r);
+ dev_err(adev->dev, "Can't handle page fault (%d)\n", r);
error_unref:
amdgpu_bo_unref(&root);
@@ -3156,3 +3162,12 @@ bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
{
return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
}
+
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info)
+{
+ dev_err(adev->dev,
+ " Process %s pid %d thread %s pid %d\n",
+ task_info->process_name, task_info->tgid,
+ task_info->task.comm, task_info->task.pid);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index f3ad687125ad..fd086efd8457 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -236,9 +236,8 @@ struct amdgpu_vm_pte_funcs {
};
struct amdgpu_task_info {
+ struct drm_wedge_task_info task;
char process_name[TASK_COMM_LEN];
- char task_name[TASK_COMM_LEN];
- pid_t pid;
pid_t tgid;
struct kref refcount;
};
@@ -668,4 +667,7 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
index 51cddfa3f1e8..5d26797356a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
@@ -71,7 +71,6 @@ static void amdgpu_tlb_fence_work(struct work_struct *work)
}
static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
- .use_64bit_seqno = true,
.get_driver_name = amdgpu_tlb_fence_get_driver_name,
.get_timeline_name = amdgpu_tlb_fence_get_timeline_name
};
@@ -101,8 +100,8 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm
INIT_WORK(&f->work, amdgpu_tlb_fence_work);
spin_lock_init(&f->lock);
- dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
- vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
+ dma_fence_init64(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
+ vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
/* TODO: We probably need a separate wq here */
dma_fence_get(&f->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
index b256cbc2bc27..2c88d5fd87da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -66,7 +66,10 @@ to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res)
{
- to_amdgpu_vram_mgr_resource(res)->flags |= DRM_BUDDY_CLEARED;
+ struct amdgpu_vram_mgr_resource *ares = to_amdgpu_vram_mgr_resource(res);
+
+ WARN_ON(ares->flags & DRM_BUDDY_CLEARED);
+ ares->flags |= DRM_BUDDY_CLEARED;
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index 322816805bfb..c417f8689220 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -218,15 +218,27 @@ int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
}
-int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+static bool __amdgpu_xcp_is_cached_mode_valid(struct amdgpu_xcp_mgr *xcp_mgr)
{
- int mode;
+ if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
+ return true;
if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
- return xcp_mgr->mode;
+ return true;
- if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
+ if (xcp_mgr->mode != AMDGPU_XCP_MODE_NONE &&
+ xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS)
+ return true;
+
+ return false;
+}
+
+int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ int mode;
+
+ if (__amdgpu_xcp_is_cached_mode_valid(xcp_mgr))
return xcp_mgr->mode;
if (!(flags & AMDGPU_XCP_FL_LOCKED))
@@ -445,6 +457,222 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
}
}
+int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
+ u32 hw_ip, u32 hw_prio,
+ struct amdgpu_fpriv *fpriv,
+ unsigned int *num_scheds,
+ struct drm_gpu_scheduler ***scheds)
+{
+ u32 sel_xcp_id;
+ int i;
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+
+ if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
+ u32 least_ref_cnt = ~0;
+
+ fpriv->xcp_id = 0;
+ for (i = 0; i < xcp_mgr->num_xcps; i++) {
+ u32 total_ref_cnt;
+
+ total_ref_cnt = atomic_read(&xcp_mgr->xcp[i].ref_cnt);
+ if (total_ref_cnt < least_ref_cnt) {
+ fpriv->xcp_id = i;
+ least_ref_cnt = total_ref_cnt;
+ }
+ }
+ }
+ sel_xcp_id = fpriv->xcp_id;
+
+ if (xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
+ *num_scheds =
+ xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
+ *scheds =
+ xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
+ atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
+ dev_dbg(adev->dev, "Selected partition #%d", sel_xcp_id);
+ } else {
+ dev_err(adev->dev, "Failed to schedule partition #%d.", sel_xcp_id);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static void amdgpu_set_xcp_id(struct amdgpu_device *adev,
+ uint32_t inst_idx,
+ struct amdgpu_ring *ring)
+{
+ int xcp_id;
+ enum AMDGPU_XCP_IP_BLOCK ip_blk;
+ uint32_t inst_mask;
+
+ ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
+ if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
+ (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
+ return;
+
+ inst_mask = 1 << inst_idx;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_HW_IP_GFX:
+ case AMDGPU_RING_TYPE_COMPUTE:
+ case AMDGPU_RING_TYPE_KIQ:
+ ip_blk = AMDGPU_XCP_GFX;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ ip_blk = AMDGPU_XCP_SDMA;
+ break;
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ ip_blk = AMDGPU_XCP_VCN;
+ break;
+ default:
+ dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type);
+ return;
+ }
+
+ for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
+ if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
+ ring->xcp_id = xcp_id;
+ dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
+ ring->xcp_id);
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
+ break;
+ }
+ }
+}
+
+static void amdgpu_xcp_gpu_sched_update(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ unsigned int sel_xcp_id)
+{
+ unsigned int *num_gpu_sched;
+
+ num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
+ .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
+ adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
+ .sched[(*num_gpu_sched)++] = &ring->sched;
+ dev_dbg(adev->dev, "%s :[%d] gpu_sched[%d][%d] = %d",
+ ring->name, sel_xcp_id, ring->funcs->type,
+ ring->hw_prio, *num_gpu_sched);
+}
+
+static int amdgpu_xcp_sched_list_update(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i;
+
+ for (i = 0; i < MAX_XCP; i++) {
+ atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
+ memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
+ }
+
+ if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+ return 0;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ ring = adev->rings[i];
+ if (!ring || !ring->sched.ready || ring->no_scheduler)
+ continue;
+
+ amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
+
+ /* VCN may be shared by two partitions under CPX MODE in certain
+ * configs.
+ */
+ if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+ (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst))
+ amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
+ }
+
+ return 0;
+}
+
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->num_rings; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
+ ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ amdgpu_set_xcp_id(adev, ring->xcc_id, ring);
+ else
+ amdgpu_set_xcp_id(adev, ring->me, ring);
+ }
+
+ return amdgpu_xcp_sched_list_update(adev);
+}
+
+void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+
+ xcp_mgr->supp_xcp_modes = 0;
+
+ switch (NUM_XCC(adev->gfx.xcc_mask)) {
+ case 8:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_QPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 6:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_TPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 4:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 2:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 1:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+
+ default:
+ break;
+ }
+}
+
+int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ /* TODO:
+ * Stop user queues and threads, and make sure GPU is empty of work.
+ */
+
+ if (flags & AMDGPU_XCP_OPS_KFD)
+ amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
+
+ return 0;
+}
+
+int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ int ret = 0;
+
+ if (flags & AMDGPU_XCP_OPS_KFD) {
+ amdgpu_amdkfd_device_probe(xcp_mgr->adev);
+ amdgpu_amdkfd_device_init(xcp_mgr->adev);
+ /* If KFD init failed, return failure */
+ if (!xcp_mgr->adev->kfd.init_complete)
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
/*====================== xcp sysfs - configuration ======================*/
#define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \
static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
index 454b33f889fb..70a0f8400b57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -39,6 +39,8 @@
#define AMDGPU_XCP_NO_PARTITION (~0)
+#define AMDGPU_XCP_OPS_KFD (1 << 0)
+
struct amdgpu_fpriv;
enum AMDGPU_XCP_IP_BLOCK {
@@ -144,10 +146,6 @@ struct amdgpu_xcp_mgr_funcs {
int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
- int (*select_scheds)(struct amdgpu_device *adev,
- u32 hw_ip, u32 hw_prio, struct amdgpu_fpriv *fpriv,
- unsigned int *num_scheds, struct drm_gpu_scheduler ***scheds);
- int (*update_partition_sched_list)(struct amdgpu_device *adev);
};
int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
@@ -176,19 +174,18 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
struct drm_file *file_priv);
void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
struct amdgpu_ctx_entity *entity);
-
+int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
+ u32 hw_ip, u32 hw_prio,
+ struct amdgpu_fpriv *fpriv,
+ unsigned int *num_scheds,
+ struct drm_gpu_scheduler ***scheds);
+void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr);
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev);
+int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
+int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev);
void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev);
-#define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \
- ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
- (adev)->xcp_mgr->funcs->select_scheds ? \
- (adev)->xcp_mgr->funcs->select_scheds((adev), (e), (c), (d), (x), (y)) : -ENOENT)
-#define amdgpu_xcp_update_partition_sched_list(adev) \
- ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
- (adev)->xcp_mgr->funcs->update_partition_sched_list ? \
- (adev)->xcp_mgr->funcs->update_partition_sched_list(adev) : 0)
-
static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr)
{
if (!xcp_mgr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index d9ad37711c3e..1ede308a7c67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -1771,16 +1771,25 @@ void amdgpu_xgmi_early_init(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
- adev->gmc.xgmi.max_speed = XGMI_SPEED_25GT;
+ /* 25 GT/s */
+ adev->gmc.xgmi.max_speed = 25;
adev->gmc.xgmi.max_width = 16;
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
case IP_VERSION(9, 5, 0):
- adev->gmc.xgmi.max_speed = XGMI_SPEED_32GT;
+ /* 32 GT/s */
+ adev->gmc.xgmi.max_speed = 32;
adev->gmc.xgmi.max_width = 16;
break;
default:
break;
}
}
+
+void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
+ uint16_t max_speed, uint8_t max_width)
+{
+ adev->gmc.xgmi.max_speed = max_speed;
+ adev->gmc.xgmi.max_width = max_width;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index f994be985f42..bba0b26fee8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -25,12 +25,6 @@
#include <drm/task_barrier.h>
#include "amdgpu_ras.h"
-enum amdgpu_xgmi_link_speed {
- XGMI_SPEED_16GT = 16,
- XGMI_SPEED_25GT = 25,
- XGMI_SPEED_32GT = 32
-};
-
struct amdgpu_hive_info {
struct kobject kobj;
uint64_t hive_id;
@@ -97,7 +91,7 @@ struct amdgpu_xgmi {
struct ras_common_if *ras_if;
bool connected_to_cpu;
struct amdgpu_xgmi_ras *ras;
- enum amdgpu_xgmi_link_speed max_speed;
+ uint16_t max_speed;
uint8_t max_width;
};
@@ -130,4 +124,6 @@ int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num);
void amdgpu_xgmi_early_init(struct amdgpu_device *adev);
uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev);
+void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
+ uint16_t max_speed, uint8_t max_width);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 92ca13097aaa..33edad1f9dcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -113,7 +113,8 @@ union amd_sriov_reg_access_flags {
uint32_t vf_reg_access_mmhub : 1;
uint32_t vf_reg_access_gc : 1;
uint32_t vf_reg_access_l1_tlb_cntl : 1;
- uint32_t reserved : 28;
+ uint32_t vf_reg_access_sq_config : 1;
+ uint32_t reserved : 27;
} flags;
uint32_t all;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 1c083304ae77..914cf4bfb033 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -29,12 +29,11 @@
#include "gfx_v9_4_3.h"
#include "gfxhub_v1_2.h"
#include "sdma_v4_4_2.h"
+#include "amdgpu_ip.h"
#define XCP_INST_MASK(num_inst, xcp_id) \
(num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
-#define AMDGPU_XCP_OPS_KFD (1 << 0)
-
void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
{
int i;
@@ -62,234 +61,6 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
}
-static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
-{
- return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
-}
-
-static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
- uint32_t inst_idx, struct amdgpu_ring *ring)
-{
- int xcp_id;
- enum AMDGPU_XCP_IP_BLOCK ip_blk;
- uint32_t inst_mask;
-
- ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
- adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
- if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
- (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
- return;
-
- inst_mask = 1 << inst_idx;
-
- switch (ring->funcs->type) {
- case AMDGPU_HW_IP_GFX:
- case AMDGPU_RING_TYPE_COMPUTE:
- case AMDGPU_RING_TYPE_KIQ:
- ip_blk = AMDGPU_XCP_GFX;
- break;
- case AMDGPU_RING_TYPE_SDMA:
- ip_blk = AMDGPU_XCP_SDMA;
- break;
- case AMDGPU_RING_TYPE_VCN_ENC:
- case AMDGPU_RING_TYPE_VCN_JPEG:
- ip_blk = AMDGPU_XCP_VCN;
- break;
- default:
- DRM_ERROR("Not support ring type %d!", ring->funcs->type);
- return;
- }
-
- for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
- if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
- ring->xcp_id = xcp_id;
- dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
- ring->xcp_id);
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
- adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
- break;
- }
- }
-}
-
-static void aqua_vanjaram_xcp_gpu_sched_update(
- struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- unsigned int sel_xcp_id)
-{
- unsigned int *num_gpu_sched;
-
- num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
- .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
- adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
- .sched[(*num_gpu_sched)++] = &ring->sched;
- DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
- sel_xcp_id, ring->funcs->type,
- ring->hw_prio, *num_gpu_sched);
-}
-
-static int aqua_vanjaram_xcp_sched_list_update(
- struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring;
- int i;
-
- for (i = 0; i < MAX_XCP; i++) {
- atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
- memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
- }
-
- if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
- return 0;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- ring = adev->rings[i];
- if (!ring || !ring->sched.ready || ring->no_scheduler)
- continue;
-
- aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
-
- /* VCN may be shared by two partitions under CPX MODE in certain
- * configs.
- */
- if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
- ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
- aqua_vanjaram_xcp_vcn_shared(adev))
- aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
- }
-
- return 0;
-}
-
-static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->num_rings; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
- ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
- aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
- else
- aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
- }
-
- return aqua_vanjaram_xcp_sched_list_update(adev);
-}
-
-static int aqua_vanjaram_select_scheds(
- struct amdgpu_device *adev,
- u32 hw_ip,
- u32 hw_prio,
- struct amdgpu_fpriv *fpriv,
- unsigned int *num_scheds,
- struct drm_gpu_scheduler ***scheds)
-{
- u32 sel_xcp_id;
- int i;
-
- if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
- u32 least_ref_cnt = ~0;
-
- fpriv->xcp_id = 0;
- for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
- u32 total_ref_cnt;
-
- total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
- if (total_ref_cnt < least_ref_cnt) {
- fpriv->xcp_id = i;
- least_ref_cnt = total_ref_cnt;
- }
- }
- }
- sel_xcp_id = fpriv->xcp_id;
-
- if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
- *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
- *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
- atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
- DRM_DEBUG("Selected partition #%d", sel_xcp_id);
- } else {
- DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
- return -ENOENT;
- }
-
- return 0;
-}
-
-static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
- enum amd_hw_ip_block_type block,
- int8_t inst)
-{
- int8_t dev_inst;
-
- switch (block) {
- case GC_HWIP:
- case SDMA0_HWIP:
- /* Both JPEG and VCN as JPEG is only alias of VCN */
- case VCN_HWIP:
- dev_inst = adev->ip_map.dev_inst[block][inst];
- break;
- default:
- /* For rest of the IPs, no look up required.
- * Assume 'logical instance == physical instance' for all configs. */
- dev_inst = inst;
- break;
- }
-
- return dev_inst;
-}
-
-static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
- enum amd_hw_ip_block_type block,
- uint32_t mask)
-{
- uint32_t dev_mask = 0;
- int8_t log_inst, dev_inst;
-
- while (mask) {
- log_inst = ffs(mask) - 1;
- dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
- dev_mask |= (1 << dev_inst);
- mask &= ~(1 << log_inst);
- }
-
- return dev_mask;
-}
-
-static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
- enum amd_hw_ip_block_type ip_block,
- uint32_t inst_mask)
-{
- int l = 0, i;
-
- while (inst_mask) {
- i = ffs(inst_mask) - 1;
- adev->ip_map.dev_inst[ip_block][l++] = i;
- inst_mask &= ~(1 << i);
- }
- for (; l < HWIP_MAX_INSTANCE; l++)
- adev->ip_map.dev_inst[ip_block][l] = -1;
-}
-
-void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
-{
- u32 ip_map[][2] = {
- { GC_HWIP, adev->gfx.xcc_mask },
- { SDMA0_HWIP, adev->sdma.sdma_mask },
- { VCN_HWIP, adev->vcn.inst_mask },
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
- aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
-
- adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
- adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
-}
-
/* Fixed pattern for smn addressing on different AIDs:
* bit[34]: indicate cross AID access
* bit[33:32]: indicate target AID id
@@ -353,11 +124,14 @@ static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
if (adev->nbio.funcs->get_compute_partition_mode) {
mode = adev->nbio.funcs->get_compute_partition_mode(adev);
- if (mode != derv_mode)
+ if (mode != derv_mode) {
dev_warn(
adev->dev,
"Mismatch in compute partition mode - reported : %d derived : %d",
mode, derv_mode);
+ if (derv_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
+ amdgpu_device_bus_status_check(adev);
+ }
}
return mode;
@@ -593,72 +367,6 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
return false;
}
-static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
-{
- /* TODO:
- * Stop user queues and threads, and make sure GPU is empty of work.
- */
-
- if (flags & AMDGPU_XCP_OPS_KFD)
- amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
-
- return 0;
-}
-
-static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
-{
- int ret = 0;
-
- if (flags & AMDGPU_XCP_OPS_KFD) {
- amdgpu_amdkfd_device_probe(xcp_mgr->adev);
- amdgpu_amdkfd_device_init(xcp_mgr->adev);
- /* If KFD init failed, return failure */
- if (!xcp_mgr->adev->kfd.init_complete)
- ret = -EIO;
- }
-
- return ret;
-}
-
-static void
-__aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
-{
- struct amdgpu_device *adev = xcp_mgr->adev;
-
- xcp_mgr->supp_xcp_modes = 0;
-
- switch (NUM_XCC(adev->gfx.xcc_mask)) {
- case 8:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_DPX_PARTITION_MODE) |
- BIT(AMDGPU_QPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- case 6:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_TPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- case 4:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_DPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- /* this seems only existing in emulation phase */
- case 2:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- case 1:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
-
- default:
- break;
- }
-}
-
static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
{
int mode;
@@ -705,7 +413,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
goto out;
}
- ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
+ ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags);
if (ret)
goto unlock;
@@ -718,7 +426,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
*num_xcps = num_xcc / num_xcc_per_xcp;
amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
- ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
+ ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags);
if (!ret)
__aqua_vanjaram_update_available_partition_mode(xcp_mgr);
unlock:
@@ -801,9 +509,6 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
.get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
- .select_scheds = &aqua_vanjaram_select_scheds,
- .update_partition_sched_list =
- &aqua_vanjaram_update_partition_sched_list
};
static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
@@ -818,7 +523,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
if (ret)
return ret;
- __aqua_vanjaram_update_supported_modes(adev->xcp_mgr);
+ amdgpu_xcp_update_supported_modes(adev->xcp_mgr);
/* TODO: Default memory node affinity init */
return ret;
@@ -858,7 +563,7 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
if (ret)
return ret;
- aqua_vanjaram_ip_map_init(adev);
+ amdgpu_ip_map_init(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 75ea071744eb..7bd506f06eb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4952,11 +4952,15 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
}
- /* TODO: Add queue reset mask when FW fully supports it */
+
adev->gfx.gfx_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0);
if (r) {
@@ -9046,21 +9050,6 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask);
}
-static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t value = 0;
-
- value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
- value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
- value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
- value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- WREG32_SOC15(GC, 0, mmSQ_CMD, value);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-}
-
static void
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -9522,7 +9511,9 @@ static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_insert_nop(ring, num_nop - 1);
}
-static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
@@ -9532,15 +9523,14 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
u64 addr;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
- if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7 + kiq->pmf->map_queues_size)) {
+ if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7)) {
spin_unlock_irqrestore(&kiq->ring_lock, flags);
return -ENOMEM;
}
@@ -9560,12 +9550,9 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
0, 1, 0x20);
gfx_v10_0_ring_emit_reg_wait(kiq_ring,
SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffffffff);
- kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r)
return r;
@@ -9575,11 +9562,25 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
@@ -9587,12 +9588,11 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int i, r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -9603,9 +9603,8 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
0, 0);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r)
return r;
@@ -9641,13 +9640,12 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
}
kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r)
return r;
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -9882,7 +9880,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kgq,
.emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
@@ -9923,7 +9920,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kcq,
.emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index ec9b84f92d46..c01c241a1b06 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1806,12 +1806,17 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
if ((adev->gfx.me_fw_version >= 2280) &&
- (adev->gfx.mec_fw_version >= 2410)) {
- adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
- adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ (adev->gfx.mec_fw_version >= 2410) &&
+ !amdgpu_sriov_vf(adev)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
break;
default:
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
break;
}
@@ -6283,21 +6288,6 @@ static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask, 0x20);
}
-static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t value = 0;
-
- value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
- value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
- value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
- value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- WREG32_SOC15(GC, 0, regSQ_CMD, value);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-}
-
static void
gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -6811,13 +6801,14 @@ static int gfx_v11_reset_gfx_pipe(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
if (r) {
@@ -6840,7 +6831,7 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring)
@@ -6973,13 +6964,14 @@ static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r = 0;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
@@ -7000,7 +6992,7 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -7236,7 +7228,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kgq,
.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
@@ -7278,7 +7269,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kcq,
.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 1234c8d64e20..09bf72237d1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -1542,10 +1542,14 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
if ((adev->gfx.me_fw_version >= 2660) &&
- (adev->gfx.mec_fw_version >= 2920)) {
- adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
- adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ (adev->gfx.mec_fw_version >= 2920) &&
+ !amdgpu_sriov_vf(adev)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
+ break;
+ default:
+ break;
}
if (!adev->enable_mes_kiq) {
@@ -4690,21 +4694,6 @@ static void gfx_v12_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask, 0x20);
}
-static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t value = 0;
-
- value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
- value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
- value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
- value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- WREG32_SOC15(GC, 0, regSQ_CMD, value);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-}
-
static void
gfx_v12_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -5307,13 +5296,14 @@ static int gfx_v12_reset_gfx_pipe(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
if (r) {
@@ -5335,7 +5325,7 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring)
@@ -5421,13 +5411,14 @@ static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
@@ -5448,7 +5439,7 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v12_0_ring_begin_use(struct amdgpu_ring *ring)
@@ -5526,7 +5517,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kgq,
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
@@ -5565,7 +5555,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kcq,
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index da0534ff1271..2aa323dab34e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4884,76 +4884,6 @@ static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
}
-static void gfx_v7_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
- int mem_space, int opt, uint32_t addr0,
- uint32_t addr1, uint32_t ref, uint32_t mask,
- uint32_t inv)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- amdgpu_ring_write(ring,
- /* memory (1) or register (0) */
- (WAIT_REG_MEM_MEM_SPACE(mem_space) |
- WAIT_REG_MEM_OPERATION(opt) | /* wait */
- WAIT_REG_MEM_FUNCTION(3) | /* equal */
- WAIT_REG_MEM_ENGINE(eng_sel)));
-
- if (mem_space)
- BUG_ON(addr0 & 0x3); /* Dword align */
- amdgpu_ring_write(ring, addr0);
- amdgpu_ring_write(ring, addr1);
- amdgpu_ring_write(ring, ref);
- amdgpu_ring_write(ring, mask);
- amdgpu_ring_write(ring, inv); /* poll interval */
-}
-
-static void gfx_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask)
-{
- gfx_v7_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
-}
-
-static int gfx_v7_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- unsigned long flags;
- u32 tmp;
- int r;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- if (amdgpu_ring_alloc(kiq_ring, 5)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- gfx_v7_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
- amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
-
- if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
- return -ENOMEM;
- gfx_v7_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
- ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
- gfx_v7_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
- gfx_v7_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
-
- return amdgpu_ring_test_ring(ring);
-}
-
static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
@@ -5003,7 +4933,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_wreg = gfx_v7_0_ring_emit_wreg,
.soft_recovery = gfx_v7_0_ring_soft_recovery,
.emit_mem_sync = gfx_v7_0_emit_mem_sync,
- .reset = gfx_v7_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index bc983ecf3d99..367449d8061b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -6340,34 +6340,6 @@ static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
amdgpu_ring_write(ring, val);
}
-static void gfx_v8_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
- int mem_space, int opt, uint32_t addr0,
- uint32_t addr1, uint32_t ref, uint32_t mask,
- uint32_t inv)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- amdgpu_ring_write(ring,
- /* memory (1) or register (0) */
- (WAIT_REG_MEM_MEM_SPACE(mem_space) |
- WAIT_REG_MEM_OPERATION(opt) | /* wait */
- WAIT_REG_MEM_FUNCTION(3) | /* equal */
- WAIT_REG_MEM_ENGINE(eng_sel)));
-
- if (mem_space)
- BUG_ON(addr0 & 0x3); /* Dword align */
- amdgpu_ring_write(ring, addr0);
- amdgpu_ring_write(ring, addr1);
- amdgpu_ring_write(ring, ref);
- amdgpu_ring_write(ring, mask);
- amdgpu_ring_write(ring, inv); /* poll interval */
-}
-
-static void gfx_v8_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask)
-{
- gfx_v8_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
-}
-
static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -6844,48 +6816,6 @@ static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
}
-static int gfx_v8_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- unsigned long flags;
- u32 tmp;
- int r;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- if (amdgpu_ring_alloc(kiq_ring, 5)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- gfx_v8_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
- amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
-
- if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
- return -ENOMEM;
- gfx_v8_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
- ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
- gfx_v8_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
- gfx_v8_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
-
- return amdgpu_ring_test_ring(ring);
-}
-
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
@@ -6951,7 +6881,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.soft_recovery = gfx_v8_0_ring_soft_recovery,
.emit_mem_sync = gfx_v8_0_emit_mem_sync,
- .reset = gfx_v8_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index ad9be3656653..20b30f4b3c7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2410,6 +2410,8 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
if (r) {
@@ -7171,53 +7173,9 @@ static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_insert_nop(ring, num_nop - 1);
}
-static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- unsigned long flags;
- u32 tmp;
- int r;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- if (amdgpu_ring_alloc(kiq_ring, 5)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- gfx_v9_0_ring_emit_wreg(kiq_ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
- amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
-
- if (amdgpu_ring_alloc(ring, 7 + 7 + 5))
- return -ENOMEM;
- gfx_v9_0_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
- ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
- gfx_v9_0_ring_emit_reg_wait(ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffff);
- gfx_v9_0_ring_emit_wreg(ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0);
-
- return amdgpu_ring_test_ring(ring);
-}
-
static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
@@ -7225,12 +7183,11 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int i, r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -7280,13 +7237,13 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
}
kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r) {
DRM_ERROR("fail to remap queue\n");
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -7496,7 +7453,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
- .reset = gfx_v9_0_reset_kgq,
.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index c233edf60569..51babf5c78c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -1148,13 +1148,15 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
- if (adev->gfx.mec_fw_version >= 155) {
+ if ((adev->gfx.mec_fw_version >= 155) &&
+ !amdgpu_sriov_vf(adev)) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
break;
case IP_VERSION(9, 5, 0):
- if (adev->gfx.mec_fw_version >= 21) {
+ if ((adev->gfx.mec_fw_version >= 21) &&
+ !amdgpu_sriov_vf(adev)) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
@@ -1349,7 +1351,9 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
/* ToDo: GC 9.4.4 */
case IP_VERSION(9, 4, 3):
- if (adev->gfx.mec_fw_version >= 184)
+ if (adev->gfx.mec_fw_version >= 184 &&
+ (amdgpu_sriov_reg_access_sq_config(adev) ||
+ !amdgpu_sriov_vf(adev)))
adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
break;
case IP_VERSION(9, 5, 0):
@@ -3552,7 +3556,8 @@ static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
}
static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
@@ -3560,12 +3565,11 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -3591,7 +3595,9 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
pipe_reset:
- if(r) {
+ if (r) {
+ if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
+ return -EOPNOTSUPP;
r = gfx_v9_4_3_reset_hw_pipe(ring);
dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
r ? "failed" : "successfully");
@@ -3612,14 +3618,14 @@ pipe_reset:
}
kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r) {
dev_err(adev->dev, "fail to remap queue\n");
return r;
}
- return amdgpu_ring_test_ring(ring);
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
enum amdgpu_gfx_cp_ras_mem_id {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index cb25f7f0dfc1..6c03bf9f1ae8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -74,6 +74,8 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev,
static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
uint32_t xcc_mask)
{
+ uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ?
+ adev->gmc.vram_start : adev->gmc.fb_start;
uint64_t pt_base;
int i;
@@ -91,10 +93,10 @@ static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.fb_start >> 12));
+ (u32)(gart_start >> 12));
WREG32_SOC15(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.fb_start >> 44));
+ (u32)(gart_start >> 44));
WREG32_SOC15(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
@@ -180,7 +182,7 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
/* In the case squeezing vram into GART aperture, we don't use
* FB aperture and AGP aperture. Disable them.
*/
- if (adev->gmc.pdb0_bo) {
+ if (adev->gmc.pdb0_bo && adev->gmc.xgmi.connected_to_cpu) {
WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, 0);
WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index a3e2787501f1..7923f491cf73 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -164,10 +164,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 72211409227b..f15d691e9a20 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -134,10 +134,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index b645d3e6a6c8..de763105fdfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -127,10 +127,7 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 99ca08e9bdb5..b45fa0cea9d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1458,9 +1458,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev, " for process %s pid %d thread %s pid %d\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 282197f4ffb1..c4d69cf4e06c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -78,8 +78,6 @@
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
-#define MAX_MEM_RANGES 8
-
static const char * const gfxhub_client_ids[] = {
"CB",
"DB",
@@ -411,11 +409,6 @@ static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
(0x001d43e0 + 0x00001800),
};
-static inline bool gmc_v9_0_is_multi_chiplet(struct amdgpu_device *adev)
-{
- return !!adev->aid_mask;
-}
-
static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned int type,
@@ -641,10 +634,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " for process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -652,7 +642,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr, entry->client_id,
soc15_ih_clientid_name[entry->client_id]);
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
@@ -801,7 +791,7 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub)
{
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
- gmc_v9_0_is_multi_chiplet(adev))
+ amdgpu_is_multi_aid(adev))
return false;
return ((vmhub == AMDGPU_MMHUB0(0) ||
@@ -1131,8 +1121,8 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
- struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -1142,7 +1132,6 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
AMDGPU_GEM_CREATE_EXT_COHERENT);
bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
- struct amdgpu_vm *vm = mapping->bo_va->base.vm;
unsigned int mtype_local, mtype;
uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
bool snoop = false;
@@ -1172,7 +1161,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
mtype = MTYPE_UC;
else
mtype = MTYPE_NC;
- if (mapping->bo_va->is_xgmi)
+ if (amdgpu_xgmi_same_hive(adev, bo_adev))
snoop = true;
}
} else {
@@ -1264,7 +1253,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
}
if ((*flags & AMDGPU_PTE_VALID) && bo)
- gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags);
+ gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.vm, bo,
+ flags);
}
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
@@ -1385,46 +1375,6 @@ static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
return size;
}
-static enum amdgpu_memory_partition
-gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
-{
- enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
-
- if (adev->nbio.funcs->get_memory_partition_mode)
- mode = adev->nbio.funcs->get_memory_partition_mode(adev,
- supp_modes);
-
- return mode;
-}
-
-static enum amdgpu_memory_partition
-gmc_v9_0_query_vf_memory_partition(struct amdgpu_device *adev)
-{
- switch (adev->gmc.num_mem_partitions) {
- case 0:
- return UNKNOWN_MEMORY_PARTITION_MODE;
- case 1:
- return AMDGPU_NPS1_PARTITION_MODE;
- case 2:
- return AMDGPU_NPS2_PARTITION_MODE;
- case 4:
- return AMDGPU_NPS4_PARTITION_MODE;
- default:
- return AMDGPU_NPS1_PARTITION_MODE;
- }
-
- return AMDGPU_NPS1_PARTITION_MODE;
-}
-
-static enum amdgpu_memory_partition
-gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
-{
- if (amdgpu_sriov_vf(adev))
- return gmc_v9_0_query_vf_memory_partition(adev);
-
- return gmc_v9_0_get_memory_partition(adev, NULL);
-}
-
static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev)
{
if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested &&
@@ -1446,7 +1396,7 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.get_vm_pte = gmc_v9_0_get_vm_pte,
.override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
- .query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
+ .query_mem_partition_mode = &amdgpu_gmc_query_memory_partition,
.request_mem_partition_mode = &amdgpu_gmc_request_memory_partition,
.need_reset_on_init = &gmc_v9_0_need_reset_on_init,
};
@@ -1553,7 +1503,7 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
else
adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
@@ -1599,7 +1549,7 @@ static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
return;
- mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
+ mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes);
/* Mode detected by hardware and supported modes available */
if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && supp_modes) {
@@ -1635,7 +1585,7 @@ static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
- gmc_v9_0_is_multi_chiplet(adev))
+ amdgpu_is_multi_aid(adev))
adev->gmc.xgmi.supported = true;
if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
@@ -1722,7 +1672,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- if (adev->gmc.xgmi.connected_to_cpu) {
+ if (amdgpu_gmc_is_pdb0_enabled(adev)) {
amdgpu_gmc_sysvm_location(adev, mc);
} else {
amdgpu_gmc_vram_location(adev, mc, base);
@@ -1837,7 +1787,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
return 0;
}
- if (adev->gmc.xgmi.connected_to_cpu) {
+ if (amdgpu_gmc_is_pdb0_enabled(adev)) {
adev->gmc.vmid0_page_table_depth = 1;
adev->gmc.vmid0_page_table_block_size = 12;
} else {
@@ -1863,7 +1813,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
if (r)
return r;
- if (adev->gmc.xgmi.connected_to_cpu)
+ if (amdgpu_gmc_is_pdb0_enabled(adev))
r = amdgpu_gmc_pdb0_alloc(adev);
}
@@ -1885,188 +1835,6 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
}
-static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev)
-{
- enum amdgpu_memory_partition mode;
- u32 supp_modes;
- bool valid;
-
- mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
-
- /* Mode detected by hardware not present in supported modes */
- if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
- !(BIT(mode - 1) & supp_modes))
- return false;
-
- switch (mode) {
- case UNKNOWN_MEMORY_PARTITION_MODE:
- case AMDGPU_NPS1_PARTITION_MODE:
- valid = (adev->gmc.num_mem_partitions == 1);
- break;
- case AMDGPU_NPS2_PARTITION_MODE:
- valid = (adev->gmc.num_mem_partitions == 2);
- break;
- case AMDGPU_NPS4_PARTITION_MODE:
- valid = (adev->gmc.num_mem_partitions == 3 ||
- adev->gmc.num_mem_partitions == 4);
- break;
- default:
- valid = false;
- }
-
- return valid;
-}
-
-static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid)
-{
- int i;
-
- /* Check if node with id 'nid' is present in 'node_ids' array */
- for (i = 0; i < num_ids; ++i)
- if (node_ids[i] == nid)
- return true;
-
- return false;
-}
-
-static void
-gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
- struct amdgpu_mem_partition_info *mem_ranges)
-{
- struct amdgpu_numa_info numa_info;
- int node_ids[MAX_MEM_RANGES];
- int num_ranges = 0, ret;
- int num_xcc, xcc_id;
- uint32_t xcc_mask;
-
- num_xcc = NUM_XCC(adev->gfx.xcc_mask);
- xcc_mask = (1U << num_xcc) - 1;
-
- for_each_inst(xcc_id, xcc_mask) {
- ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
- if (ret)
- continue;
-
- if (numa_info.nid == NUMA_NO_NODE) {
- mem_ranges[0].size = numa_info.size;
- mem_ranges[0].numa.node = numa_info.nid;
- num_ranges = 1;
- break;
- }
-
- if (gmc_v9_0_is_node_present(node_ids, num_ranges,
- numa_info.nid))
- continue;
-
- node_ids[num_ranges] = numa_info.nid;
- mem_ranges[num_ranges].numa.node = numa_info.nid;
- mem_ranges[num_ranges].size = numa_info.size;
- ++num_ranges;
- }
-
- adev->gmc.num_mem_partitions = num_ranges;
-}
-
-static void
-gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
- struct amdgpu_mem_partition_info *mem_ranges)
-{
- enum amdgpu_memory_partition mode;
- u32 start_addr = 0, size;
- int i, r, l;
-
- mode = gmc_v9_0_query_memory_partition(adev);
-
- switch (mode) {
- case UNKNOWN_MEMORY_PARTITION_MODE:
- adev->gmc.num_mem_partitions = 0;
- break;
- case AMDGPU_NPS1_PARTITION_MODE:
- adev->gmc.num_mem_partitions = 1;
- break;
- case AMDGPU_NPS2_PARTITION_MODE:
- adev->gmc.num_mem_partitions = 2;
- break;
- case AMDGPU_NPS4_PARTITION_MODE:
- if (adev->flags & AMD_IS_APU)
- adev->gmc.num_mem_partitions = 3;
- else
- adev->gmc.num_mem_partitions = 4;
- break;
- default:
- adev->gmc.num_mem_partitions = 1;
- break;
- }
-
- /* Use NPS range info, if populated */
- r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
- &adev->gmc.num_mem_partitions);
- if (!r) {
- l = 0;
- for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
- if (mem_ranges[i].range.lpfn >
- mem_ranges[i - 1].range.lpfn)
- l = i;
- }
-
- } else {
- if (!adev->gmc.num_mem_partitions) {
- dev_err(adev->dev,
- "Not able to detect NPS mode, fall back to NPS1");
- adev->gmc.num_mem_partitions = 1;
- }
- /* Fallback to sw based calculation */
- size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
- size /= adev->gmc.num_mem_partitions;
-
- for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
- mem_ranges[i].range.fpfn = start_addr;
- mem_ranges[i].size =
- ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
- mem_ranges[i].range.lpfn = start_addr + size - 1;
- start_addr += size;
- }
-
- l = adev->gmc.num_mem_partitions - 1;
- }
-
- /* Adjust the last one */
- mem_ranges[l].range.lpfn =
- (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
- mem_ranges[l].size =
- adev->gmc.real_vram_size -
- ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
-}
-
-static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
-{
- bool valid;
-
- adev->gmc.mem_partitions = kcalloc(MAX_MEM_RANGES,
- sizeof(struct amdgpu_mem_partition_info),
- GFP_KERNEL);
- if (!adev->gmc.mem_partitions)
- return -ENOMEM;
-
- /* TODO : Get the range from PSP/Discovery for dGPU */
- if (adev->gmc.is_app_apu)
- gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
- else
- gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
-
- if (amdgpu_sriov_vf(adev))
- valid = true;
- else
- valid = gmc_v9_0_validate_partition_info(adev);
- if (!valid) {
- /* TODO: handle invalid case */
- dev_WARN(adev->dev,
- "Mem ranges not matching with hardware config");
- }
-
- return 0;
-}
-
static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
@@ -2088,7 +1856,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
spin_lock_init(&adev->gmc.invalidate_lock);
- if (gmc_v9_0_is_multi_chiplet(adev)) {
+ if (amdgpu_is_multi_aid(adev)) {
gmc_v9_4_3_init_vram_info(adev);
} else if (!adev->bios) {
if (adev->flags & AMD_IS_APU) {
@@ -2238,8 +2006,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_gmc_get_vbios_allocations(adev);
- if (gmc_v9_0_is_multi_chiplet(adev)) {
- r = gmc_v9_0_init_mem_ranges(adev);
+ if (amdgpu_is_multi_aid(adev)) {
+ r = amdgpu_gmc_init_mem_ranges(adev);
if (r)
return r;
}
@@ -2267,7 +2035,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vm_manager.first_kfd_vmid =
(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
- gmc_v9_0_is_multi_chiplet(adev)) ?
+ amdgpu_is_multi_aid(adev)) ?
3 :
8;
@@ -2279,7 +2047,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
amdgpu_gmc_sysfs_init(adev);
return 0;
@@ -2289,7 +2057,7 @@ static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
amdgpu_gmc_sysfs_fini(adev);
amdgpu_gmc_ras_fini(adev);
@@ -2363,7 +2131,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
{
int r;
- if (adev->gmc.xgmi.connected_to_cpu)
+ if (amdgpu_gmc_is_pdb0_enabled(adev))
amdgpu_gmc_init_pdb0(adev);
if (adev->gart.bo == NULL) {
@@ -2521,7 +2289,7 @@ static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
* information again.
*/
if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) {
- gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+ amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
index 574880d67009..a887df520414 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -29,6 +29,12 @@
#include "amdgpu.h"
#include "isp_v4_1_1.h"
+#define ISP_PERFORMANCE_STATE_LOW 0
+#define ISP_PERFORMANCE_STATE_HIGH 1
+
+#define ISP_HIGH_PERFORMANC_XCLK 788
+#define ISP_HIGH_PERFORMANC_ICLK 788
+
static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = {
ISP_4_1__SRCID__ISP_RINGBUFFER_WPT9,
ISP_4_1__SRCID__ISP_RINGBUFFER_WPT10,
@@ -56,17 +62,137 @@ static struct gpiod_lookup_table isp_sensor_gpio_table = {
},
};
+static int isp_poweroff(struct generic_pm_domain *genpd)
+{
+ struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+
+ return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, true, 0);
+}
+
+static int isp_poweron(struct generic_pm_domain *genpd)
+{
+ struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+
+ return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, false, 0);
+}
+
+static int isp_set_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state)
+{
+ struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+ u32 iclk, xclk;
+ int ret;
+
+ switch (state) {
+ case ISP_PERFORMANCE_STATE_HIGH:
+ xclk = ISP_HIGH_PERFORMANC_XCLK;
+ iclk = ISP_HIGH_PERFORMANC_ICLK;
+ break;
+ case ISP_PERFORMANCE_STATE_LOW:
+ /* isp runs at default lowest clock-rate on power-on, do nothing */
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPXCLK, xclk, 0);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to set xclk %u to %u: %d\n",
+ xclk, state, ret);
+ return ret;
+ }
+
+ ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPICLK, iclk, 0);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to set iclk %u to %u: %d\n",
+ iclk, state, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int isp_genpd_add_device(struct device *dev, void *data)
+{
+ struct generic_pm_domain *gpd = data;
+ struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+ int ret;
+
+ if (!pdev)
+ return -EINVAL;
+
+ if (!dev->type->name) {
+ drm_dbg(&adev->ddev, "Invalid device type to add\n");
+ goto exit;
+ }
+
+ if (strcmp(dev->type->name, "mfd_device")) {
+ drm_dbg(&adev->ddev, "Invalid isp mfd device %s to add\n", pdev->mfd_cell->name);
+ goto exit;
+ }
+
+ ret = pm_genpd_add_device(gpd, dev);
+ if (ret) {
+ drm_err(&adev->ddev, "Failed to add dev %s to genpd %d\n",
+ pdev->mfd_cell->name, ret);
+ return -ENODEV;
+ }
+
+exit:
+ /* Continue to add */
+ return 0;
+}
+
+static int isp_genpd_remove_device(struct device *dev, void *data)
+{
+ struct generic_pm_domain *gpd = data;
+ struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+ int ret;
+
+ if (!pdev)
+ return -EINVAL;
+
+ if (!dev->type->name) {
+ drm_dbg(&adev->ddev, "Invalid device type to remove\n");
+ goto exit;
+ }
+
+ if (strcmp(dev->type->name, "mfd_device")) {
+ drm_dbg(&adev->ddev, "Invalid isp mfd device %s to remove\n",
+ pdev->mfd_cell->name);
+ goto exit;
+ }
+
+ ret = pm_genpd_remove_device(dev);
+ if (ret) {
+ drm_err(&adev->ddev, "Failed to remove dev from genpd %d\n", ret);
+ return -ENODEV;
+ }
+
+exit:
+ /* Continue to remove */
+ return 0;
+}
+
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
{
+ const struct software_node *amd_camera_node, *isp4_node;
struct amdgpu_device *adev = isp->adev;
+ struct acpi_device *acpi_dev;
int idx, int_idx, num_res, r;
- u8 isp_dev_hid[ACPI_ID_LEN];
u64 isp_base;
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
return -EINVAL;
- r = amdgpu_acpi_get_isp4_dev_hid(&isp_dev_hid);
+ r = amdgpu_acpi_get_isp4_dev(&acpi_dev);
if (r) {
drm_dbg(&adev->ddev, "Invalid isp platform detected (%d)", r);
/* allow GPU init to progress */
@@ -74,18 +200,28 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
}
/* add GPIO resources required for OMNI5C10 sensor */
- if (!strcmp("OMNI5C10", isp_dev_hid)) {
+ if (!strcmp("OMNI5C10", acpi_device_hid(acpi_dev))) {
gpiod_add_lookup_table(&isp_gpio_table);
gpiod_add_lookup_table(&isp_sensor_gpio_table);
}
isp_base = adev->rmmio_base;
+ isp->ispgpd.name = "ISP_v_4_1_1";
+ isp->ispgpd.power_off = isp_poweroff;
+ isp->ispgpd.power_on = isp_poweron;
+ isp->ispgpd.set_performance_state = isp_set_performance_state;
+
+ r = pm_genpd_init(&isp->ispgpd, NULL, true);
+ if (r) {
+ drm_err(&adev->ddev, "failed to initialize genpd (%d)\n", r);
+ return -EINVAL;
+ }
+
isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp mfd cell alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp mfd cell alloc failed (%d)\n", r);
goto failure;
}
@@ -95,19 +231,20 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp mfd resource alloc failed (%d)\n", r);
goto failure;
}
isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL);
if (!isp->isp_pdata) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp platform data alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp platform data alloc failed (%d)\n", r);
goto failure;
}
+ amd_camera_node = (const struct software_node *)acpi_dev->driver_data;
+ isp4_node = software_node_find_by_name(amd_camera_node, "isp4");
+
/* initialize isp platform data */
isp->isp_pdata->adev = (void *)adev;
isp->isp_pdata->asic_type = adev->asic_type;
@@ -136,14 +273,14 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[0].num_resources = num_res;
isp->isp_cell[0].resources = &isp->isp_res[0];
isp->isp_cell[0].platform_data = isp->isp_pdata;
+ isp->isp_cell[0].swnode = isp4_node;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
/* initialize isp i2c platform data */
isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
if (!isp->isp_i2c_res) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp mfd res alloc failed (%d)\n", r);
goto failure;
}
@@ -162,8 +299,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_gpio_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
if (!isp->isp_gpio_res) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp gpio res alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp gpio resource alloc failed (%d)\n", r);
goto failure;
}
@@ -179,10 +315,23 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[2].platform_data = isp->isp_pdata;
isp->isp_cell[2].pdata_size = sizeof(struct isp_platform_data);
- r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 3);
+ /* add only amd_isp_capture and amd_isp_i2c_designware to genpd */
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
if (r) {
- drm_err(&adev->ddev,
- "%s: add mfd hotplug device failed\n", __func__);
+ drm_err(&adev->ddev, "add mfd hotplug device failed (%d)\n", r);
+ goto failure;
+ }
+
+ r = device_for_each_child(isp->parent, &isp->ispgpd,
+ isp_genpd_add_device);
+ if (r) {
+ drm_err(&adev->ddev, "failed to add devices to genpd (%d)\n", r);
+ goto failure;
+ }
+
+ r = mfd_add_hotplug_devices(isp->parent, &isp->isp_cell[2], 1);
+ if (r) {
+ drm_err(&adev->ddev, "add pinctl hotplug device failed (%d)\n", r);
goto failure;
}
@@ -201,6 +350,9 @@ failure:
static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
{
+ device_for_each_child(isp->parent, NULL,
+ isp_genpd_remove_device);
+
mfd_remove_devices(isp->parent);
kfree(isp->isp_res);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 4cde8a8bcc83..58239c405fda 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -118,7 +118,10 @@ static int jpeg_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -764,11 +767,20 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- jpeg_v2_0_stop(ring->adev);
- jpeg_v2_0_start(ring->adev);
- return amdgpu_ring_test_helper(ring);
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v2_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v2_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 8b39e114f3be..3e2c389242db 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -167,7 +167,10 @@ static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -643,11 +646,14 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
jpeg_v2_5_stop_inst(ring->adev, ring->me);
jpeg_v2_5_start_inst(ring->adev, ring->me);
- return amdgpu_ring_test_helper(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index 2f8510c2986b..a44eb2667664 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -132,7 +132,10 @@ static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -555,11 +558,20 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- jpeg_v3_0_stop(ring->adev);
- jpeg_v3_0_start(ring->adev);
- return amdgpu_ring_test_helper(ring);
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v3_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v3_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index f17ec5414fd6..da3ee69f1a3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -143,7 +143,10 @@ static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -720,14 +723,20 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- if (amdgpu_sriov_vf(ring->adev))
- return -EINVAL;
+ int r;
- jpeg_v4_0_stop(ring->adev);
- jpeg_v4_0_start(ring->adev);
- return amdgpu_ring_test_helper(ring);
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v4_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v4_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 79e342d5ab28..b86288a69e7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -216,12 +216,11 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev)) {
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
- r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- if (r)
- return r;
- }
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return 0;
}
@@ -242,8 +241,7 @@ static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev))
- amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
r = amdgpu_jpeg_sw_fini(adev);
@@ -1143,14 +1141,17 @@ static void jpeg_v4_0_3_core_stall_reset(struct amdgpu_ring *ring)
WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00);
}
-static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
if (amdgpu_sriov_vf(ring->adev))
return -EOPNOTSUPP;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
jpeg_v4_0_3_core_stall_reset(ring);
jpeg_v4_0_3_start_jrbc(ring);
- return amdgpu_ring_test_helper(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 974030a5c03c..481d1a2dbe5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -174,9 +174,10 @@ static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* TODO: Add queue reset mask when FW fully supports it */
adev->jpeg.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
if (r)
return r;
@@ -767,6 +768,22 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v4_0_5_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v4_0_5_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v4_0_5_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
.name = "jpeg_v4_0_5",
.early_init = jpeg_v4_0_5_early_init,
@@ -812,6 +829,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v4_0_5_ring_reset,
};
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index 31d213ccbe0a..e0a71909252b 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -120,13 +120,13 @@ static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* TODO: Add queue reset mask when FW fully supports it */
adev->jpeg.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- if (r)
- return r;
- return 0;
+
+ return r;
}
/**
@@ -644,6 +644,22 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v5_0_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v5_0_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v5_0_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
.name = "jpeg_v5_0_0",
.early_init = jpeg_v5_0_0_early_init,
@@ -689,6 +705,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v5_0_0_ring_reset,
};
static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index 3b6f65a25646..54523dc1f702 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -200,14 +200,13 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev)) {
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
- r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- if (r)
- return r;
- }
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- return 0;
+ return r;
}
/**
@@ -226,8 +225,7 @@ static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev))
- amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
r = amdgpu_jpeg_sw_fini(adev);
@@ -834,14 +832,14 @@ static void jpeg_v5_0_1_core_stall_reset(struct amdgpu_ring *ring)
WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00);
}
-static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- if (amdgpu_sriov_vf(ring->adev))
- return -EOPNOTSUPP;
-
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
jpeg_v5_0_1_core_stall_reset(ring);
jpeg_v5_0_1_init_jrbc(ring);
- return amdgpu_ring_test_helper(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index 76167fadb292..cc688ae79e84 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -76,6 +76,8 @@ static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi
static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
{
+ uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ?
+ adev->gmc.vram_start : adev->gmc.fb_start;
uint64_t pt_base;
u32 inst_mask;
int i;
@@ -95,10 +97,10 @@ static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(MMHUB, i,
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.fb_start >> 12));
+ (u32)(gart_start >> 12));
WREG32_SOC15(MMHUB, i,
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.fb_start >> 44));
+ (u32)(gart_start >> 44));
WREG32_SOC15(MMHUB, i,
regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index a376f072700d..1c22bc11c1f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -31,9 +31,6 @@
#define NPS_MODE_MASK 0x000000FFL
-/* Core 0 Port 0 counter */
-#define smnPCIEP_NAK_COUNTER 0x1A340218
-
static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
@@ -467,22 +464,6 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
}
}
-static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev)
-{
- u32 val, nak_r, nak_g;
-
- if (adev->flags & AMD_IS_APU)
- return 0;
-
- /* Get the number of NAKs received and generated */
- val = RREG32_PCIE(smnPCIEP_NAK_COUNTER);
- nak_r = val & 0xFFFF;
- nak_g = val >> 16;
-
- /* Add the total number of NAKs, i.e the number of replays */
- return (nak_r + nak_g);
-}
-
#define MMIO_REG_HOLE_OFFSET 0x1A000
static void nbio_v7_9_set_reg_remap(struct amdgpu_device *adev)
@@ -524,7 +505,6 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
.get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
.is_nps_switch_requested = nbio_v7_9_is_nps_switch_requested,
.init_registers = nbio_v7_9_init_registers,
- .get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count,
.set_reg_remap = nbio_v7_9_set_reg_remap,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index f4a91b126c73..73f87131a7e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -106,7 +106,9 @@ enum psp_gfx_cmd_id
/*IDs of performance monitoring/profiling*/
GFX_CMD_ID_CONFIG_SQ_PERFMON = 0x00000046, /* Config CGTT_SQ_CLK_CTRL */
/* Dynamic memory partitioninig (NPS mode change)*/
- GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */
+ GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */
+ GFX_CMD_ID_FB_FW_RESERV_ADDR = 0x00000050, /* Query FW reservation addr */
+ GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR = 0x00000051, /* Query FW reservation extended addr */
};
/* PSP boot config sub-commands */
@@ -404,11 +406,19 @@ struct psp_gfx_uresp_bootcfg {
uint32_t boot_cfg; /* boot config data */
};
+/* Command-specific response for fw reserve info */
+struct psp_gfx_uresp_fw_reserve_info {
+ uint32_t reserve_base_address_hi;
+ uint32_t reserve_base_address_lo;
+ uint32_t reserve_size;
+};
+
/* Union of command-specific responses for GPCOM ring. */
union psp_gfx_uresp {
struct psp_gfx_uresp_reserved reserved;
struct psp_gfx_uresp_bootcfg boot_cfg;
struct psp_gfx_uresp_fwar_db_info fwar_db_info;
+ struct psp_gfx_uresp_fw_reserve_info fw_reserve_info;
};
/* Structure of GFX Response buffer.
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 145186a1e48f..3584b8c18fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -94,7 +94,7 @@ static int psp_v10_0_ring_create(struct psp_context *psp,
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -115,7 +115,7 @@ static int psp_v10_0_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 215543575f47..6cc05d36e359 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -152,11 +152,9 @@ static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -252,8 +250,8 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -277,11 +275,13 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -317,13 +317,15 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for sOS ready for ring creation\n");
return ret;
@@ -347,8 +349,9 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -381,7 +384,8 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG,
+ MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_INFO("psp is not working correctly before mode1 reset!\n");
@@ -395,7 +399,8 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK,
+ 0);
if (ret) {
DRM_INFO("psp mode 1 reset failed!\n");
@@ -421,8 +426,9 @@ static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
}
@@ -601,7 +607,7 @@ static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -638,7 +644,7 @@ static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
index 5697760a819b..93787a90d598 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
@@ -41,8 +41,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
@@ -50,8 +51,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -87,13 +89,15 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -117,8 +121,9 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 80153f837470..4c6450d62299 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -82,7 +82,7 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -97,7 +97,7 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
psp_gfxdrv_command_reg);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
return ret;
}
@@ -118,7 +118,7 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -133,8 +133,8 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
psp_gfxdrv_command_reg);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -163,7 +163,7 @@ static int psp_v12_0_ring_create(struct psp_context *psp,
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -184,11 +184,13 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -219,7 +221,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG,
+ MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_INFO("psp is not working correctly before mode1 reset!\n");
@@ -233,7 +236,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK,
+ 0);
if (ret) {
DRM_INFO("psp mode 1 reset failed!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index ead616c11705..af4a7d7c4abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -182,7 +182,7 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
ready having bit 31 of C2PMSG_33 set to 1 */
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_33),
- 0x80000000, 0xffffffff, false);
+ 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
@@ -213,7 +213,7 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < retry_cnt; retry_loop++) {
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0xffffffff, false);
+ 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -362,8 +362,8 @@ static int psp_v13_0_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
if (!ret)
psp_v13_0_init_sos_version(psp);
@@ -384,8 +384,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64,
@@ -393,8 +394,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -430,13 +432,15 @@ static int psp_v13_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -460,8 +464,9 @@ static int psp_v13_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -524,8 +529,9 @@ static int psp_v13_0_memory_training_send_msg(struct psp_context *psp, int msg)
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
}
@@ -677,7 +683,7 @@ static int psp_v13_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -714,7 +720,7 @@ static int psp_v13_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36);
@@ -739,8 +745,9 @@ static int psp_v13_0_exec_spi_cmd(struct psp_context *psp, int cmd)
ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret);
return ret;
@@ -764,7 +771,7 @@ static int psp_v13_0_update_spirom(struct psp_context *psp,
/* Confirm PSP is ready to start */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
return ret;
@@ -799,7 +806,7 @@ static int psp_v13_0_dump_spirom(struct psp_context *psp,
/* Confirm PSP is ready to start */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
return ret;
@@ -926,8 +933,9 @@ static int psp_v13_0_reg_program_no_ring(struct psp_context *psp, uint32_t val,
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, id);
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, val);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, 0);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
index eaa5512a21da..5f39a2edcc95 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
@@ -76,11 +76,9 @@ static int psp_v13_0_4_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -185,8 +183,8 @@ static int psp_v13_0_4_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -204,8 +202,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64,
@@ -213,8 +212,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -250,13 +250,15 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -280,8 +282,9 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index 256288c6cd78..36ef4a72ad1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -109,11 +109,9 @@ static int psp_v14_0_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -228,9 +226,10 @@ static int psp_v14_0_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81),
- 0, true);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81),
+ RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -248,8 +247,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64,
@@ -257,8 +257,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -294,13 +295,15 @@ static int psp_v14_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -324,8 +327,9 @@ static int psp_v14_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -388,8 +392,9 @@ static int psp_v14_0_memory_training_send_msg(struct psp_context *psp, int msg)
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
}
@@ -540,8 +545,9 @@ static int psp_v14_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc
*/
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -577,8 +583,9 @@ static int psp_v14_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, 0);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36);
@@ -602,11 +609,13 @@ static int psp_v14_0_exec_spi_cmd(struct psp_context *psp, int cmd)
ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret);
return ret;
@@ -629,8 +638,9 @@ static int psp_v14_0_update_spirom(struct psp_context *psp,
int ret;
/* Confirm PSP is ready to start */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index f6b75e3e47ff..833830bc3e2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -91,7 +91,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -109,7 +109,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
return ret;
}
@@ -130,7 +130,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -147,8 +147,8 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -168,7 +168,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp)
mdelay(20);
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ 0x80000000, 0x8000FFFF, 0);
/* Change IH ring for UMC */
tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
@@ -180,7 +180,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp)
mdelay(20);
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ 0x80000000, 0x8000FFFF, 0);
}
static int psp_v3_1_ring_create(struct psp_context *psp,
@@ -217,9 +217,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0,
- mmMP0_SMN_C2PMSG_101), 0x80000000,
- 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, 0);
} else {
/* Write low address of the ring to C2PMSG_69 */
@@ -240,10 +240,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0,
- mmMP0_SMN_C2PMSG_64), 0x80000000,
- 0x8000FFFF, false);
-
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, 0);
}
return ret;
}
@@ -267,11 +266,13 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, 0);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, 0);
return ret;
}
@@ -311,7 +312,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, 0);
if (ret) {
DRM_INFO("psp is not working correctly before mode1 reset!\n");
@@ -325,7 +326,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, 0);
if (ret) {
DRM_INFO("psp mode 1 reset failed!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 33ed2b158fcd..f38004e6064e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2187,7 +2187,7 @@ static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev,
dev_dbg_ratelimited(adev->dev,
" for process %s pid %d thread %s pid %d\n",
task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ task_info->task.comm, task_info->task.pid);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index bb82c652e4c0..36b1ca73c2ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -110,6 +110,8 @@ static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring);
static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring);
+static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev,
+ u32 instance_id);
static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
u32 instance, u32 offset)
@@ -1342,6 +1344,7 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = {
.stop_kernel_queue = &sdma_v4_4_2_stop_queue,
.start_kernel_queue = &sdma_v4_4_2_restore_queue,
+ .soft_reset_kernel_queue = &sdma_v4_4_2_soft_reset_engine,
};
static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
@@ -1653,38 +1656,17 @@ static bool sdma_v4_4_2_is_queue_selected(struct amdgpu_device *adev, uint32_t i
return (context_status & SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK) != 0;
}
-static bool sdma_v4_4_2_ring_is_guilty(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t instance_id = ring->me;
-
- return sdma_v4_4_2_is_queue_selected(adev, instance_id, false);
-}
-
-static bool sdma_v4_4_2_page_ring_is_guilty(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t instance_id = ring->me;
-
- if (!adev->sdma.has_page_queue)
- return false;
-
- return sdma_v4_4_2_is_queue_selected(adev, instance_id, true);
-}
-
-static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
u32 id = ring->me;
int r;
- if (!(adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
-
- amdgpu_amdkfd_suspend(adev, false);
- r = amdgpu_sdma_reset_engine(adev, id);
- amdgpu_amdkfd_resume(adev, false);
-
+ amdgpu_amdkfd_suspend(adev, true);
+ r = amdgpu_sdma_reset_engine(adev, id, false);
+ amdgpu_amdkfd_resume(adev, true);
return r;
}
@@ -1730,7 +1712,7 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 inst_mask;
- int i;
+ int i, r;
inst_mask = 1 << ring->me;
udelay(50);
@@ -1747,7 +1729,18 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
return -ETIMEDOUT;
}
- return sdma_v4_4_2_inst_start(adev, inst_mask, true);
+ r = sdma_v4_4_2_inst_start(adev, inst_mask, true);
+
+ return r;
+}
+
+static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev,
+ u32 instance_id)
+{
+ /* For SDMA 4.x, use the existing DPM interface for backward compatibility
+ * we need to convert the logical instance ID to physical instance ID before reset.
+ */
+ return amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, instance_id));
}
static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
@@ -1889,7 +1882,7 @@ static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
if (task_info) {
dev_dbg_ratelimited(adev->dev, " for process %s pid %d thread %s pid %d\n",
task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ task_info->task.comm, task_info->task.pid);
amdgpu_vm_put_task_info(task_info);
}
@@ -2144,7 +2137,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
.reset = sdma_v4_4_2_reset_queue,
- .is_guilty = sdma_v4_4_2_ring_is_guilty,
};
static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
@@ -2177,7 +2169,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
.reset = sdma_v4_4_2_reset_queue,
- .is_guilty = sdma_v4_4_2_page_ring_is_guilty,
};
static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 37f4b5b4a098..7dc67a22a7a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1428,7 +1428,8 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 2):
case IP_VERSION(5, 0, 5):
- if (adev->sdma.instance[0].fw_version >= 35)
+ if ((adev->sdma.instance[0].fw_version >= 35) &&
+ !amdgpu_sriov_vf(adev))
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1539,17 +1540,27 @@ static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- u32 inst_id = ring->me;
int r;
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
+ return -EINVAL;
+ }
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
amdgpu_amdkfd_suspend(adev, true);
- r = amdgpu_sdma_reset_engine(adev, inst_id);
+ r = amdgpu_sdma_reset_engine(adev, ring->me, true);
amdgpu_amdkfd_resume(adev, true);
+ if (r)
+ return r;
- return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring)
@@ -1616,6 +1627,7 @@ static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring)
r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 0b40411b92a0..3bd44c24f692 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1347,11 +1347,13 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(5, 2, 2):
case IP_VERSION(5, 2, 3):
case IP_VERSION(5, 2, 4):
- if (adev->sdma.instance[0].fw_version >= 76)
+ if ((adev->sdma.instance[0].fw_version >= 76) &&
+ !amdgpu_sriov_vf(adev))
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
case IP_VERSION(5, 2, 5):
- if (adev->sdma.instance[0].fw_version >= 34)
+ if ((adev->sdma.instance[0].fw_version >= 34) &&
+ !amdgpu_sriov_vf(adev))
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1452,17 +1454,27 @@ static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
return -ETIMEDOUT;
}
-static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- u32 inst_id = ring->me;
int r;
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
+ return -EINVAL;
+ }
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
amdgpu_amdkfd_suspend(adev, true);
- r = amdgpu_sdma_reset_engine(adev, inst_id);
+ r = amdgpu_sdma_reset_engine(adev, ring->me, true);
amdgpu_amdkfd_resume(adev, true);
+ if (r)
+ return r;
- return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring)
@@ -1532,6 +1544,7 @@ static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring)
r = sdma_v5_2_gfx_resume_instance(adev, inst_id, true);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index a9bdf8d61d6c..e6d8eddda2bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -1355,7 +1355,8 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
- if (adev->sdma.instance[0].fw_version >= 21)
+ if ((adev->sdma.instance[0].fw_version >= 21) &&
+ !amdgpu_sriov_vf(adev))
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1379,6 +1380,10 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
if ((adev->sdma.instance[0].fw_version >= 24) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
+ case IP_VERSION(6, 0, 1):
+ if ((adev->sdma.instance[0].fw_version >= 18) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
case IP_VERSION(6, 0, 2):
if ((adev->sdma.instance[0].fw_version >= 21) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
@@ -1387,6 +1392,22 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
if ((adev->sdma.instance[0].fw_version >= 25) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
+ case IP_VERSION(6, 1, 0):
+ if ((adev->sdma.instance[0].fw_version >= 14) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 1):
+ if ((adev->sdma.instance[0].fw_version >= 17) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 2):
+ if ((adev->sdma.instance[0].fw_version >= 15) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 3):
+ if ((adev->sdma.instance[0].fw_version >= 10) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
default:
break;
}
@@ -1550,29 +1571,29 @@ static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring)
return r;
}
-static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- int i, r;
+ int r;
- if (amdgpu_sriov_vf(adev))
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
return -EINVAL;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
}
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
- return -EINVAL;
- }
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
if (r)
return r;
- return sdma_v6_0_gfx_resume_instance(adev, i, true);
+ r = sdma_v6_0_gfx_resume_instance(adev, ring->me, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index 86903eccbd4e..b8b06d4c5882 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -802,29 +802,29 @@ static bool sdma_v7_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
return false;
}
-static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- int i, r;
+ int r;
- if (amdgpu_sriov_vf(adev))
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
return -EINVAL;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
}
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
- return -EINVAL;
- }
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
if (r)
return r;
- return sdma_v7_0_gfx_resume_instance(adev, i, true);
+ r = sdma_v7_0_gfx_resume_instance(adev, ring->me, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
/**
@@ -1337,7 +1337,8 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->sdma.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
- adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ if (!amdgpu_sriov_vf(adev))
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index ef7c603b50ae..c8ac11a9cdef 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -118,7 +118,6 @@ int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
int arct_reg_base_init(struct amdgpu_device *adev);
int aldebaran_reg_base_init(struct amdgpu_device *adev);
-void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev);
u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id);
int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev);
ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 148b651be7ca..68b4371df0f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -98,6 +98,8 @@ static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
struct dpg_pause_state *new_state);
static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
+static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst);
+
/**
* vcn_v2_0_early_init - set function pointers and load microcode
*
@@ -213,6 +215,12 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
}
adev->vcn.inst[0].pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
+ adev->vcn.inst[0].reset = vcn_v2_0_reset;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -233,6 +241,10 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.ip_dump = ptr;
}
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -260,6 +272,8 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
+
r = amdgpu_vcn_sw_fini(adev, 0);
kfree(adev->vcn.ip_dump);
@@ -1355,6 +1369,16 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
return 0;
}
+static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst)
+{
+ int r;
+
+ r = vcn_v2_0_stop(vinst);
+ if (r)
+ return r;
+ return vcn_v2_0_start(vinst);
+}
+
static bool vcn_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -2176,6 +2200,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
@@ -2205,6 +2230,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 58b527a6b795..bc30a5326866 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -102,6 +102,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
struct dpg_pause_state *new_state);
static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
+static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst);
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
@@ -404,8 +405,14 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.inst[j].pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
+ adev->vcn.inst[j].reset = vcn_v2_5_reset;
}
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -425,6 +432,10 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.ip_dump = ptr;
}
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -455,6 +466,8 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(adev, i);
if (r)
@@ -1816,6 +1829,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
/**
@@ -1914,6 +1928,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
@@ -1942,6 +1957,16 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
}
}
+static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst)
+{
+ int r;
+
+ r = vcn_v2_5_stop(vinst);
+ if (r)
+ return r;
+ return vcn_v2_5_start(vinst);
+}
+
static bool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 9fb0d5380589..4b8f4407047f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -110,6 +110,7 @@ static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state);
static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
struct dpg_pause_state *new_state);
+static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst);
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
@@ -289,8 +290,14 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
+ adev->vcn.inst[i].reset = vcn_v3_0_reset;
}
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -306,6 +313,10 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.ip_dump = ptr;
}
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -338,6 +349,8 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(adev, i);
if (r)
@@ -2033,6 +2046,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
/**
@@ -2131,6 +2145,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
@@ -2164,6 +2179,18 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
}
}
+static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst)
+{
+ int r;
+
+ r = vcn_v3_0_stop(vinst);
+ if (r)
+ return r;
+ vcn_v3_0_enable_clock_gating(vinst);
+ vcn_v3_0_enable_static_power_gating(vinst);
+ return vcn_v3_0_start(vinst);
+}
+
static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index b5071f77f78d..1924e075b66f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -241,7 +241,8 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
- adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
@@ -1967,18 +1968,22 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
return 0;
}
-static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+ int r;
- if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
-
- vcn_v4_0_stop(vinst);
- vcn_v4_0_start(vinst);
-
- return amdgpu_ring_test_helper(ring);
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = vcn_v4_0_stop(vinst);
+ if (r)
+ return r;
+ r = vcn_v4_0_start(vinst);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 5a33140f5723..2a3663b551af 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -1594,18 +1594,16 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
int r = 0;
int vcn_inst;
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
- if (amdgpu_sriov_vf(ring->adev))
- return -EOPNOTSUPP;
-
- if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
vcn_inst = GET_INST(VCN, ring->me);
r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
@@ -1620,9 +1618,8 @@ static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
vcn_v4_0_3_hw_init_inst(vinst);
vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[ring->me].indirect_sram);
- r = amdgpu_ring_test_helper(ring);
- return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 16ade84facc7..caf2d95a85d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -220,7 +220,8 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
}
adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
- adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -1465,18 +1466,22 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+ int r;
- if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
-
- vcn_v4_0_5_stop(vinst);
- vcn_v4_0_5_start(vinst);
-
- return amdgpu_ring_test_helper(ring);
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = vcn_v4_0_5_stop(vinst);
+ if (r)
+ return r;
+ r = vcn_v4_0_5_start(vinst);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index f8e3f0b882da..07a6e9582880 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -198,7 +198,8 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
- adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
vcn_v5_0_0_alloc_ip_dump(adev);
@@ -1192,18 +1193,22 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+ int r;
- if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
-
- vcn_v5_0_0_stop(vinst);
- vcn_v5_0_0_start(vinst);
-
- return amdgpu_ring_test_helper(ring);
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = vcn_v5_0_0_stop(vinst);
+ if (r)
+ return r;
+ r = vcn_v5_0_0_start(vinst);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index 62e88e5362e9..16e12c9913f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -5,7 +5,7 @@
config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
- depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT))
+ depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT) || (LOONGARCH && 64BIT))
select HMM_MIRROR
select MMU_NOTIFIER
select DRM_AMDGPU_USERPTR
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index a2149afa5803..828a9ceef1e7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -22,7 +22,6 @@
*/
#include <linux/device.h>
-#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/file.h>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index bf0854bd5555..7e749f9b6d69 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -971,7 +971,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd,
kfd_smi_event_update_gpu_reset(node, false, reset_context);
}
- kgd2kfd_suspend(kfd, false);
+ kgd2kfd_suspend(kfd, true);
for (i = 0; i < kfd->num_nodes; i++)
kfd_signal_reset_event(kfd->nodes[i]);
@@ -1013,13 +1013,33 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
return 0;
}
-bool kfd_is_locked(void)
+bool kfd_is_locked(struct kfd_dev *kfd)
{
+ uint8_t id = 0;
+ struct kfd_node *dev;
+
lockdep_assert_held(&kfd_processes_mutex);
- return (kfd_locked > 0);
+
+ /* check reset/suspend lock */
+ if (kfd_locked > 0)
+ return true;
+
+ if (kfd)
+ return kfd->kfd_dev_lock > 0;
+
+ /* check lock on all cgroup accessible devices */
+ while (kfd_topology_enum_kfd_devices(id++, &dev) == 0) {
+ if (!dev || kfd_devcgroup_check_permission(dev))
+ continue;
+
+ if (dev->kfd->kfd_dev_lock > 0)
+ return true;
+ }
+
+ return false;
}
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
{
struct kfd_node *node;
int i;
@@ -1027,14 +1047,8 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
if (!kfd->init_complete)
return;
- /* for runtime suspend, skip locking kfd */
- if (!run_pm) {
- mutex_lock(&kfd_processes_mutex);
- /* For first KFD device suspend all the KFD processes */
- if (++kfd_locked == 1)
- kfd_suspend_all_processes();
- mutex_unlock(&kfd_processes_mutex);
- }
+ if (suspend_proc)
+ kgd2kfd_suspend_process(kfd);
for (i = 0; i < kfd->num_nodes; i++) {
node = kfd->nodes[i];
@@ -1042,7 +1056,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
}
}
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
{
int ret, i;
@@ -1055,14 +1069,36 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
return ret;
}
- /* for runtime resume, skip unlocking kfd */
- if (!run_pm) {
- mutex_lock(&kfd_processes_mutex);
- if (--kfd_locked == 0)
- ret = kfd_resume_all_processes();
- WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
- mutex_unlock(&kfd_processes_mutex);
- }
+ if (resume_proc)
+ ret = kgd2kfd_resume_process(kfd);
+
+ return ret;
+}
+
+void kgd2kfd_suspend_process(struct kfd_dev *kfd)
+{
+ if (!kfd->init_complete)
+ return;
+
+ mutex_lock(&kfd_processes_mutex);
+ /* For first KFD device suspend all the KFD processes */
+ if (++kfd_locked == 1)
+ kfd_suspend_all_processes();
+ mutex_unlock(&kfd_processes_mutex);
+}
+
+int kgd2kfd_resume_process(struct kfd_dev *kfd)
+{
+ int ret = 0;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ mutex_lock(&kfd_processes_mutex);
+ if (--kfd_locked == 0)
+ ret = kfd_resume_all_processes();
+ WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
+ mutex_unlock(&kfd_processes_mutex);
return ret;
}
@@ -1442,24 +1478,53 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
kfd_get_num_sdma_engines(node);
}
-int kgd2kfd_check_and_lock_kfd(void)
+int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
{
+ struct kfd_process *p;
+ int r = 0, temp, idx;
+
mutex_lock(&kfd_processes_mutex);
- if (!hash_empty(kfd_processes_table) || kfd_is_locked()) {
- mutex_unlock(&kfd_processes_mutex);
- return -EBUSY;
+
+ if (hash_empty(kfd_processes_table) && !kfd_is_locked(kfd))
+ goto out;
+
+ /* fail under system reset/resume or kfd device is partition switching. */
+ if (kfd_is_locked(kfd)) {
+ r = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * ensure all running processes are cgroup excluded from device before mode switch.
+ * i.e. no pdd was created on the process socket.
+ */
+ idx = srcu_read_lock(&kfd_processes_srcu);
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ int i;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ if (p->pdds[i]->dev->kfd != kfd)
+ continue;
+
+ r = -EBUSY;
+ goto proc_check_unlock;
+ }
}
- ++kfd_locked;
+proc_check_unlock:
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+out:
+ if (!r)
+ ++kfd->kfd_dev_lock;
mutex_unlock(&kfd_processes_mutex);
- return 0;
+ return r;
}
-void kgd2kfd_unlock_kfd(void)
+void kgd2kfd_unlock_kfd(struct kfd_dev *kfd)
{
mutex_lock(&kfd_processes_mutex);
- --kfd_locked;
+ --kfd->kfd_dev_lock;
mutex_unlock(&kfd_processes_mutex);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 76359c6a3f3a..2d91027e2a74 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -2312,7 +2312,7 @@ static int reset_hung_queues_sdma(struct device_queue_manager *dqm)
continue;
/* Reset engine and check. */
- if (amdgpu_sdma_reset_engine(dqm->dev->adev, i) ||
+ if (amdgpu_sdma_reset_engine(dqm->dev->adev, i, false) ||
dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j) ||
!set_sdma_queue_as_reset(dqm, doorbell_off)) {
r = -ENOTRECOVERABLE;
@@ -2339,9 +2339,18 @@ reset_fail:
static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sdma)
{
+ struct amdgpu_device *adev = dqm->dev->adev;
+
while (halt_if_hws_hang)
schedule();
+ if (adev->debug_disable_gpu_ring_reset) {
+ dev_info_once(adev->dev,
+ "%s queue hung, but ring reset disabled",
+ is_sdma ? "sdma" : "compute");
+
+ return -EPERM;
+ }
if (!amdgpu_gpu_recovery)
return -ENOTRECOVERABLE;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 2b294ada3ec0..82905f3e54dd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -1302,7 +1302,7 @@ void kfd_signal_reset_event(struct kfd_node *dev)
if (ti) {
dev_err(dev->adev->dev,
"Queues reset on process %s tid %d thread %s pid %d\n",
- ti->process_name, ti->tgid, ti->task_name, ti->pid);
+ ti->process_name, ti->tgid, ti->task.comm, ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index dbcb60eb54b2..1d170dc50df3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -23,7 +23,6 @@
*/
#include <linux/device.h>
-#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/sched.h>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d221c58dccc3..67694bcd9464 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -372,6 +372,9 @@ struct kfd_dev {
/* bitmap for dynamic doorbell allocation from doorbell object */
unsigned long *doorbell_bitmap;
+
+ /* for dynamic partitioning */
+ int kfd_dev_lock;
};
enum kfd_mempool {
@@ -1536,7 +1539,7 @@ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
int kfd_send_exception_to_runtime(struct kfd_process *p,
unsigned int queue_id,
uint64_t error_reason);
-bool kfd_is_locked(void);
+bool kfd_is_locked(struct kfd_dev *kfd);
/* Compute profile */
void kfd_inc_compute_active(struct kfd_node *dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 722ac1662bdc..5be28c6c4f6a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -854,7 +854,7 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
*/
mutex_lock(&kfd_processes_mutex);
- if (kfd_is_locked()) {
+ if (kfd_is_locked(NULL)) {
pr_debug("KFD is locked! Cannot create process");
process = ERR_PTR(-EINVAL);
goto out;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index 83d9384ac815..a499449fcb06 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -253,9 +253,9 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid)
task_info = amdgpu_vm_get_task_info_pasid(dev->adev, pasid);
if (task_info) {
/* Report VM faults from user applications, not retry from kernel */
- if (task_info->pid)
+ if (task_info->task.pid)
kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, KFD_EVENT_FMT_VMFAULT(
- task_info->pid, task_info->task_name));
+ task_info->task.pid, task_info->task.comm));
amdgpu_vm_put_task_info(task_info);
}
}
@@ -359,8 +359,8 @@ void kfd_smi_event_process(struct kfd_process_device *pdd, bool start)
kfd_smi_event_add(0, pdd->dev,
start ? KFD_SMI_EVENT_PROCESS_START :
KFD_SMI_EVENT_PROCESS_END,
- KFD_EVENT_FMT_PROCESS(task_info->pid,
- task_info->task_name));
+ KFD_EVENT_FMT_PROCESS(task_info->task.pid,
+ task_info->task.comm));
amdgpu_vm_put_task_info(task_info);
}
}
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
index faed84172dd4..8bc36f04b1b7 100644
--- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f58fa5da7fe5..096b23ad4845 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1758,10 +1758,11 @@ dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev,
return DMUB_STATUS_TIMEOUT;
}
-static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
+static void *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
{
- struct dml2_soc_bb *bb;
+ void *bb;
long long addr;
+ unsigned int bb_size;
int i = 0;
uint16_t chunk;
enum dmub_gpint_command send_addrs[] = {
@@ -1774,6 +1775,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(4, 0, 1):
+ bb_size = sizeof(struct dml2_soc_bb);
break;
default:
return NULL;
@@ -1781,7 +1783,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
bb = dm_allocate_gpu_mem(adev,
DC_MEM_ALLOC_TYPE_GART,
- sizeof(struct dml2_soc_bb),
+ bb_size,
&addr);
if (!bb)
return NULL;
@@ -1847,7 +1849,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.audio_lock);
if (amdgpu_dm_irq_init(adev)) {
- drm_err(adev_to_drm(adev), "amdgpu: failed to initialize DM IRQ support.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize DM IRQ support.\n");
goto error;
}
@@ -2037,7 +2039,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
if (!adev->dm.hpd_rx_offload_wq) {
- drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd rx offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n");
goto error;
}
@@ -2053,7 +2055,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
drm_err(adev_to_drm(adev),
- "amdgpu: failed to initialize freesync_module.\n");
+ "failed to initialize freesync_module.\n");
} else
drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n",
adev->dm.freesync_module);
@@ -2064,7 +2066,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.vblank_control_workqueue =
create_singlethread_workqueue("dm_vblank_control_workqueue");
if (!adev->dm.vblank_control_workqueue)
- drm_err(adev_to_drm(adev), "amdgpu: failed to initialize vblank_workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize vblank_workqueue.\n");
}
if (adev->dm.dc->caps.ips_support &&
@@ -2075,7 +2077,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
- drm_err(adev_to_drm(adev), "amdgpu: failed to initialize hdcp_workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize hdcp_workqueue.\n");
else
drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
@@ -2085,20 +2087,20 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_completion(&adev->dm.dmub_aux_transfer_done);
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
if (!adev->dm.dmub_notify) {
- drm_info(adev_to_drm(adev), "amdgpu: fail to allocate adev->dm.dmub_notify");
+ drm_info(adev_to_drm(adev), "fail to allocate adev->dm.dmub_notify");
goto error;
}
adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
if (!adev->dm.delayed_hpd_wq) {
- drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to create hpd offload workqueue.\n");
goto error;
}
amdgpu_dm_outbox_init(adev);
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
dmub_aux_setconfig_callback, false)) {
- drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub aux callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub aux callback");
goto error;
}
@@ -2107,7 +2109,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO,
dmub_aux_fused_io_callback, false)) {
- drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub fused io callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub fused io callback");
goto error;
}
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
@@ -2125,7 +2127,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dm_initialize_drm_device(adev)) {
drm_err(adev_to_drm(adev),
- "amdgpu: failed to initialize sw for display support.\n");
+ "failed to initialize sw for display support.\n");
goto error;
}
@@ -2140,14 +2142,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
drm_err(adev_to_drm(adev),
- "amdgpu: failed to initialize sw for display support.\n");
+ "failed to initialize sw for display support.\n");
goto error;
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
amdgpu_dm_crtc_secure_display_create_contexts(adev);
if (!adev->dm.secure_display_ctx.crtc_ctx)
- drm_err(adev_to_drm(adev), "amdgpu: failed to initialize secure display contexts.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize secure display contexts.\n");
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1))
adev->dm.secure_display_ctx.support_mul_roi = true;
@@ -2404,6 +2406,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_IB_MEM
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
};
int r;
@@ -2570,7 +2573,7 @@ static int dm_sw_init(struct amdgpu_ip_block *ip_block)
adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
if (!adev->dm.cgs_device) {
- drm_err(adev_to_drm(adev), "amdgpu: failed to create cgs device.\n");
+ drm_err(adev_to_drm(adev), "failed to create cgs device.\n");
return -EINVAL;
}
@@ -3060,6 +3063,77 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
}
}
+static int dm_cache_state(struct amdgpu_device *adev)
+{
+ int r;
+
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
+ if (IS_ERR(adev->dm.cached_state)) {
+ r = PTR_ERR(adev->dm.cached_state);
+ adev->dm.cached_state = NULL;
+ }
+
+ return adev->dm.cached_state ? 0 : r;
+}
+
+static void dm_destroy_cached_state(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct dm_plane_state *dm_new_plane_state;
+ struct drm_plane_state *new_plane_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ int i;
+
+ if (!dm->cached_state)
+ return;
+
+ /* Force mode set in atomic commit */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ new_crtc_state->active_changed = true;
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
+ }
+
+ /*
+ * atomic_check is expected to create the dc states. We need to release
+ * them here, since they were duplicated as part of the suspend
+ * procedure.
+ */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->stream) {
+ WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+ dc_stream_release(dm_new_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+ }
+ dm_new_crtc_state->base.color_mgmt_changed = true;
+ }
+
+ for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ if (dm_new_plane_state->dc_state) {
+ WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+ dc_plane_state_release(dm_new_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+ }
+ }
+
+ drm_atomic_helper_resume(ddev, dm->cached_state);
+
+ dm->cached_state = NULL;
+}
+
+static void dm_complete(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ dm_destroy_cached_state(adev);
+}
+
static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -3068,11 +3142,8 @@ static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block)
return 0;
WARN_ON(adev->dm.cached_state);
- adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
- if (IS_ERR(adev->dm.cached_state))
- return PTR_ERR(adev->dm.cached_state);
- return 0;
+ return dm_cache_state(adev);
}
static int dm_suspend(struct amdgpu_ip_block *ip_block)
@@ -3106,9 +3177,10 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block)
}
if (!adev->dm.cached_state) {
- adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
- if (IS_ERR(adev->dm.cached_state))
- return PTR_ERR(adev->dm.cached_state);
+ int r = dm_cache_state(adev);
+
+ if (r)
+ return r;
}
s3_handle_hdmi_cec(adev_to_drm(adev), true);
@@ -3295,12 +3367,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
- struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
- struct dm_crtc_state *dm_new_crtc_state;
- struct drm_plane *plane;
- struct drm_plane_state *new_plane_state;
- struct dm_plane_state *dm_new_plane_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
struct dc_state *dc_state;
@@ -3457,40 +3523,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
}
drm_connector_list_iter_end(&iter);
- /* Force mode set in atomic commit */
- for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
- new_crtc_state->active_changed = true;
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- reset_freesync_config_for_crtc(dm_new_crtc_state);
- }
-
- /*
- * atomic_check is expected to create the dc states. We need to release
- * them here, since they were duplicated as part of the suspend
- * procedure.
- */
- for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- if (dm_new_crtc_state->stream) {
- WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
- dc_stream_release(dm_new_crtc_state->stream);
- dm_new_crtc_state->stream = NULL;
- }
- dm_new_crtc_state->base.color_mgmt_changed = true;
- }
-
- for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
- dm_new_plane_state = to_dm_plane_state(new_plane_state);
- if (dm_new_plane_state->dc_state) {
- WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
- dc_plane_state_release(dm_new_plane_state->dc_state);
- dm_new_plane_state->dc_state = NULL;
- }
- }
-
- drm_atomic_helper_resume(ddev, dm->cached_state);
-
- dm->cached_state = NULL;
+ dm_destroy_cached_state(adev);
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
@@ -3539,6 +3572,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.prepare_suspend = dm_prepare_suspend,
.suspend = dm_suspend,
.resume = dm_resume,
+ .complete = dm_complete,
.is_idle = dm_is_idle,
.wait_for_idle = dm_wait_for_idle,
.check_soft_reset = dm_check_soft_reset,
@@ -4003,19 +4037,19 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD,
dmub_hpd_callback, true)) {
- drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ,
dmub_hpd_callback, true)) {
- drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
dmub_hpd_sense_callback, true)) {
- drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd sense callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd sense callback");
return -EINVAL;
}
}
@@ -4829,6 +4863,14 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
reallow_idle = true;
}
+ if (trace_amdgpu_dm_brightness_enabled()) {
+ trace_amdgpu_dm_brightness(__builtin_return_address(0),
+ user_brightness,
+ brightness,
+ caps->aux_support,
+ power_supply_is_system_supplied() > 0);
+ }
+
if (caps->aux_support) {
rc = dc_link_set_backlight_level_nits(link, true, brightness,
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
@@ -7534,7 +7576,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
dc_result = DC_FAIL_ATTACH_SURFACES;
if (dc_result == DC_OK)
- dc_result = dc_validate_global_state(dc, dc_state, true);
+ dc_result = dc_validate_global_state(dc, dc_state, DC_VALIDATE_MODE_ONLY);
cleanup:
if (dc_state)
@@ -7592,7 +7634,7 @@ create_validate_stream_for_sink(struct drm_connector *connector,
dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
if (dc_result != DC_OK) {
- DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n",
+ DRM_DEBUG_KMS("Pruned mode %d x %d (clk %d) %s %s -- %s\n",
drm_mode->hdisplay,
drm_mode->vdisplay,
drm_mode->clock,
@@ -7859,6 +7901,23 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
int clock, bpp = 0;
bool is_y420 = false;
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ enum drm_mode_status result;
+
+ result = drm_crtc_helper_mode_valid_fixed(encoder->crtc, adjusted_mode, native_mode);
+ if (result != MODE_OK && dm_new_connector_state->scaling == RMX_OFF) {
+ drm_dbg_driver(encoder->dev,
+ "mode %dx%d@%dHz is not native, enabling scaling\n",
+ adjusted_mode->hdisplay, adjusted_mode->vdisplay,
+ drm_mode_vrefresh(adjusted_mode));
+ dm_new_connector_state->scaling = RMX_FULL;
+ }
+ return 0;
+ }
+
if (!aconnector->mst_output_port)
return 0;
@@ -8316,7 +8375,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
drm_add_modes_noedid(connector, 1920, 1080);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
- if (encoder)
+ if (encoder && (connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
amdgpu_dm_connector_add_common_modes(encoder, connector);
amdgpu_dm_connector_add_freesync_modes(connector, drm_edid);
}
@@ -12156,7 +12216,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n");
goto fail;
}
- status = dc_validate_global_state(dc, dm_state->context, true);
+ status = dc_validate_global_state(dc, dm_state->context, DC_VALIDATE_MODE_ONLY);
if (status != DC_OK) {
drm_dbg_atomic(dev, "DC global validation failure: %s (%d)",
dc_status_to_str(status), status);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index d7d92f9911e4..b937da0a4e4a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -636,8 +636,9 @@ struct amdgpu_display_manager {
* @bb_from_dmub:
*
* Bounding box data read from dmub during early initialization for DCN4+
+ * Data is stored as a byte array that should be casted to the appropriate bb struct
*/
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
/**
* @oem_i2c:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 25e8befbcc47..7187d5aedf0a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -107,7 +107,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
if (payload.write && result >= 0) {
if (result) {
/*one byte indicating partially written bytes*/
- drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n");
+ drm_dbg_dp(adev_to_drm(adev), "AUX partially written\n");
result = payload.data[0];
} else if (!payload.reply[0])
/*I2C_ACK|AUX_ACK*/
@@ -133,11 +133,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
break;
}
- drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
+ drm_dbg_dp(adev_to_drm(adev), "DP AUX transfer fail:%d\n", operation_result);
}
if (payload.reply[0])
- drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
+ drm_dbg_dp(adev_to_drm(adev), "AUX reply command not ACK: 0x%02x.",
payload.reply[0]);
return result;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index b7c6e8d13435..eef51652ca35 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -92,9 +92,9 @@ enum dm_micro_swizzle {
MICRO_SWIZZLE_R = 3
};
-const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier)
{
- return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
+ return amdgpu_lookup_format_info(pixel_format, modifier);
}
void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 615d2ab2b803..ea2619b507db 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -58,7 +58,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
unsigned long possible_crtcs,
const struct dc_plane_cap *plane_cap);
-const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier);
void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
bool *per_pixel_alpha, bool *pre_multiplied_alpha,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index 4686d4b0cbad..95f890fda8aa 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -726,6 +726,32 @@ TRACE_EVENT(dcn_optc_lock_unlock_state,
)
);
+TRACE_EVENT(amdgpu_dm_brightness,
+ TP_PROTO(void *function, u32 user_brightness, u32 converted_brightness, bool aux, bool ac),
+ TP_ARGS(function, user_brightness, converted_brightness, aux, ac),
+ TP_STRUCT__entry(
+ __field(void *, function)
+ __field(u32, user_brightness)
+ __field(u32, converted_brightness)
+ __field(bool, aux)
+ __field(bool, ac)
+ ),
+ TP_fast_assign(
+ __entry->function = function;
+ __entry->user_brightness = user_brightness;
+ __entry->converted_brightness = converted_brightness;
+ __entry->aux = aux;
+ __entry->ac = ac;
+ ),
+ TP_printk("%ps: brightness requested=%u converted=%u aux=%s power=%s",
+ (void *)__entry->function,
+ (u32)__entry->user_brightness,
+ (u32)__entry->converted_brightness,
+ (__entry->aux) ? "true" : "false",
+ (__entry->ac) ? "AC" : "DC"
+ )
+);
+
#endif /* _AMDGPU_DM_TRACE_H_ */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 2c645dffec18..f2b1720a6a66 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -396,6 +396,7 @@ static enum bp_result transmitter_control_v1_7(
process_phy_transition_init_params.display_port_link_rate = link->cur_link_settings.link_rate;
process_phy_transition_init_params.transition_bitmask = link->phy_transition_bitmask;
}
+ dig_v1_7.skip_phy_ssc_reduction = link->wa_flags.skip_phy_ssc_reduction;
}
// Handle PRE_OFF_TO_ON: Process ACPI PHY Transition Interlock
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index d9955c5d2e5e..60021671b386 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -112,7 +112,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
###############################################################################
# DCN30
###############################################################################
-CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o
+CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o dcn30m_clk_mgr.o dcn30m_clk_mgr_smu_msg.o
AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 4c3e58c730b1..33b9d36619ff 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -67,7 +67,7 @@ int clk_mgr_helper_get_active_display_cnt(
if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM)
continue;
- if (!stream->dpms_off || (stream_status && stream_status->plane_count))
+ if (!stream->dpms_off || dc->is_switch_in_progress_dest || (stream_status && stream_status->plane_count))
display_count++;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
index fa09c594fd36..06da34676965 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
@@ -56,6 +56,7 @@
#define DALSMC_MSG_SetDisplayRefreshFromMall 0xF
#define DALSMC_MSG_SetExternalClientDfCstateAllow 0x10
#define DALSMC_MSG_BacoAudioD3PME 0x11
-#define DALSMC_Message_Count 0x12
+#define DALSMC_MSG_SmartAccess 0x12
+#define DALSMC_Message_Count 0x13
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 8083a553c60e..ef77fcd164ed 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -30,6 +30,7 @@
#include "dce100/dce_clk_mgr.h"
#include "dcn30/dcn30_clk_mgr.h"
#include "dml/dcn30/dcn30_fpu.h"
+#include "dcn30/dcn30m_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
@@ -498,7 +499,8 @@ static struct clk_mgr_funcs dcn3_funcs = {
.are_clock_states_equal = dcn3_are_clock_states_equal,
.enable_pme_wa = dcn3_enable_pme_wa,
.notify_link_rate_change = dcn30_notify_link_rate_change,
- .is_smu_present = dcn3_is_smu_present
+ .is_smu_present = dcn3_is_smu_present,
+ .set_smartmux_switch = dcn30m_set_smartmux_switch
};
static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c
new file mode 100644
index 000000000000..8e8a11c7437e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "clk_mgr_internal.h"
+#include "dcn30/dcn30m_clk_mgr.h"
+#include "dcn30m_clk_mgr_smu_msg.h"
+
+
+uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return dcn30m_smu_set_smart_mux_switch(clk_mgr, pins_to_set);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h
new file mode 100644
index 000000000000..757985b2eadc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN30M_CLK_MGR_H__
+#define __DCN30M_CLK_MGR_H__
+
+uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set);
+
+#endif //__DCN30M_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c
new file mode 100644
index 000000000000..0dd0583ff21e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn30m_clk_mgr_smu_msg.h"
+
+#include "clk_mgr_internal.h"
+#include "reg_helper.h"
+#include "dm_helpers.h"
+
+#include "dalsmc.h"
+
+#define mmDAL_MSG_REG 0x1628A
+#define mmDAL_ARG_REG 0x16273
+#define mmDAL_RESP_REG 0x16274
+
+#define REG(reg_name) \
+ mm ## reg_name
+
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ CTX->logger
+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
+
+
+/*
+ * Function to be used instead of REG_WAIT macro because the wait ends when
+ * the register is NOT EQUAL to zero, and because the translation in msg_if.h
+ * won't work with REG_WAIT.
+ */
+static uint32_t dcn30m_smu_wait_for_response(struct clk_mgr_internal *clk_mgr,
+ unsigned int delay_us, unsigned int max_retries)
+{
+ uint32_t reg = 0;
+
+ do {
+ reg = REG_READ(DAL_RESP_REG);
+ if (reg)
+ break;
+
+ if (delay_us >= 1000)
+ msleep(delay_us/1000);
+ else if (delay_us > 0)
+ udelay(delay_us);
+ } while (max_retries--);
+
+ /* handle DALSMC_Result_CmdRejectedBusy? */
+
+ /* Log? */
+
+ return reg;
+}
+
+static bool dcn30m_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
+ uint32_t msg_id, uint32_t param_in, uint32_t *param_out)
+{
+ uint32_t result;
+ /* Wait for response register to be ready */
+ dcn30m_smu_wait_for_response(clk_mgr, 10, 200000);
+
+ /* Clear response register */
+ REG_WRITE(DAL_RESP_REG, 0);
+
+ /* Set the parameter register for the SMU message */
+ REG_WRITE(DAL_ARG_REG, param_in);
+
+ /* Trigger the message transaction by writing the message ID */
+ REG_WRITE(DAL_MSG_REG, msg_id);
+
+ result = dcn30m_smu_wait_for_response(clk_mgr, 10, 200000);
+
+ if (IS_SMU_TIMEOUT(result))
+ dm_helpers_smu_timeout(CTX, msg_id, param_in, 10 * 200000);
+
+ /* Wait for response */
+ if (result == DALSMC_Result_OK) {
+ if (param_out)
+ *param_out = REG_READ(DAL_ARG_REG);
+
+ return true;
+ }
+
+ return false;
+}
+
+uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set)
+{
+ uint32_t response = 0;
+
+ smu_print("SMU Set SmartMux Switch: switch_dgpu = %d\n", pins_to_set);
+
+ dcn30m_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SmartAccess, pins_to_set, &response);
+
+ return response;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h
new file mode 100644
index 000000000000..8a59a473fc5e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_
+#define DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_
+
+#include "core_types.h"
+
+struct clk_mgr_internal;
+
+uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set);
+#endif /* DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index 4b17d2fcd565..b59703467128 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -22,8 +22,6 @@
#include "dcn/dcn_4_1_0_offset.h"
#include "dcn/dcn_4_1_0_sh_mask.h"
-#include "dml/dcn401/dcn401_fpu.h"
-
#define DCN_BASE__INST0_SEG1 0x000000C0
#define mmCLK01_CLK0_CLK_PLL_REQ 0x16E37
@@ -183,43 +181,36 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c
static void dcn401_build_wm_range_table(struct clk_mgr *clk_mgr)
{
- /* legacy */
- DC_FP_START();
- dcn401_build_wm_range_table_fpu(clk_mgr);
- DC_FP_END();
-
- if (clk_mgr->ctx->dc->debug.using_dml21) {
- /* For min clocks use as reported by PM FW and report those as min */
- uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
- uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
-
- /* Set A - Normal - default values */
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
-
- /* Set B - Unused on dcn4 */
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false;
-
- /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */
- /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
- if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF;
- } else {
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false;
- }
-
- /* Set 1B - Unused on dcn4 */
- clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false;
+ /* For min clocks use as reported by PM FW and report those as min */
+ uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
+ uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
+
+ /* Set A - Normal - default values */
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
+
+ /* Set B - Unused on dcn4 */
+ clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false;
+
+ /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */
+ /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
+ if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF;
+ } else {
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false;
}
+
+ /* Set 1B - Unused on dcn4 */
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false;
}
void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
@@ -320,6 +311,25 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_build_wm_range_table(clk_mgr_base);
}
+bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return clk_mgr->smu_present && clk_mgr->dpm_present &&
+ ((clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_socclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz));
+}
+
static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
@@ -1490,6 +1500,35 @@ static int dcn401_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
return 0;
}
+unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ unsigned int num_clk_levels;
+
+ switch (clk_type) {
+ case CLK_TYPE_DISPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dispclk;
+ case CLK_TYPE_DPPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
+ return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK) ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dppclk;
+ case CLK_TYPE_DSCCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 :
+ clk_mgr->base.boot_snapshot.dispclk / 3;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static struct clk_mgr_funcs dcn401_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn401_get_dtb_ref_freq_khz,
@@ -1505,6 +1544,8 @@ static struct clk_mgr_funcs dcn401_funcs = {
.get_dispclk_from_dentist = dcn401_get_dispclk_from_dentist,
.get_hard_min_memclk = dcn401_get_hard_min_memclk,
.get_hard_min_fclk = dcn401_get_hard_min_fclk,
+ .is_dc_mode_present = dcn401_is_dc_mode_present,
+ .get_max_clock_khz = dcn401_get_max_clock_khz,
};
struct clk_mgr_internal *dcn401_clk_mgr_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
index 6c9ae5ca2c7e..97a1ce1e8a9e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
@@ -105,10 +105,13 @@ struct dcn401_clk_mgr {
};
void dcn401_init_clocks(struct clk_mgr *clk_mgr_base);
+bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base);
struct clk_mgr_internal *dcn401_clk_mgr_construct(struct dc_context *ctx,
struct dccg *dccg);
void dcn401_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr);
+unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type);
+
#endif /* __DCN401_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index b34b5b52236d..c31f7f8e409f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -976,6 +976,8 @@ static bool dc_construct_ctx(struct dc *dc,
if (!dc_ctx)
return false;
+ dc_stream_init_rmcm_3dlut(dc);
+
dc_ctx->cgs_device = init_params->cgs_device;
dc_ctx->driver_context = init_params->driver;
dc_ctx->dc = dc;
@@ -2381,7 +2383,7 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
context->power_source = params->power_source;
- res = dc_validate_with_context(dc, set, params->stream_count, context, false);
+ res = dc_validate_with_context(dc, set, params->stream_count, context, DC_VALIDATE_MODE_AND_PROGRAMMING);
/*
* Only update link encoder to stream assignment after bandwidth validation passed.
@@ -3304,7 +3306,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (dsc_validate_context) {
stream->timing.dsc_cfg = *update->dsc_config;
stream->timing.flags.DSC = enable_dsc;
- if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true) != DC_OK) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context,
+ DC_VALIDATE_MODE_ONLY) != DC_OK) {
stream->timing.dsc_cfg = old_dsc_cfg;
stream->timing.flags.DSC = old_dsc_enabled;
update->dsc_config = NULL;
@@ -3526,7 +3529,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
if (update_type == UPDATE_TYPE_FULL) {
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) {
BREAK_TO_DEBUGGER();
goto fail;
}
@@ -4632,7 +4635,8 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
/* commit minimal state */
- if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false) == DC_OK) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context,
+ DC_VALIDATE_MODE_AND_PROGRAMMING) == DC_OK) {
/* prevent underflow and corruption when reconfiguring pipes */
force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
} else {
@@ -5155,7 +5159,7 @@ static bool update_planes_and_stream_v1(struct dc *dc,
copy_stream_update_to_stream(dc, context, stream, stream_update);
if (update_type >= UPDATE_TYPE_FULL) {
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) {
DC_ERROR("Mode validation failed for stream update!\n");
dc_state_release(context);
return false;
@@ -5439,8 +5443,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
else
ret = update_planes_and_stream_v2(dc, srf_updates,
surface_count, stream, stream_update);
-
- if (ret)
+ if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2)
clear_update_flags(srf_updates, surface_count, stream);
return ret;
@@ -5471,7 +5474,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
stream_update, state);
- if (ret)
+ if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2)
clear_update_flags(srf_updates, surface_count, stream);
}
@@ -5544,6 +5547,15 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
}
break;
+ case DC_ACPI_CM_POWER_STATE_D3:
+ if (dc->caps.ips_support)
+ dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
+
+ if (dc->caps.ips_v2_support) {
+ if (dc->clk_mgr->funcs->set_low_power_state)
+ dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
+ }
+ break;
default:
ASSERT(dc->current_state->stream_count == 0);
dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
@@ -6341,13 +6353,14 @@ void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
}
-/*
- *****************************************************************************
+/**
* dc_get_power_profile_for_dc_state() - extracts power profile from dc state
*
* Called when DM wants to make power policy decisions based on dc_state
*
- *****************************************************************************
+ * @context: Pointer to the dc_state from which the power profile is extracted.
+ *
+ * Return: The power profile structure containing the power level information.
*/
struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
{
@@ -6363,13 +6376,14 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
return profile;
}
-/*
- **********************************************************************************
+/**
* dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state
*
- * Called when DM wants to log detile buffer size from dc_state
+ * This function is called to log the detile buffer size from the dc_state.
*
- **********************************************************************************
+ * @context: a pointer to the dc_state from which the detile buffer size is extracted.
+ *
+ * Return: the size of the detile buffer, or 0 if not available.
*/
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
{
@@ -6380,26 +6394,27 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
else
return 0;
}
+
/**
- ***********************************************************************************************
* dc_get_host_router_index: Get index of host router from a dpia link
*
* This function return a host router index of the target link. If the target link is dpia link.
*
- * @param [in] link: target link
- * @param [out] host_router_index: host router index of the target link
+ * @link: Pointer to the target link (input)
+ * @host_router_index: Pointer to store the host router index of the target link (output).
*
- * @return: true if the host router index is found and valid.
+ * Return: true if the host router index is found and valid.
*
- ***********************************************************************************************
*/
bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index)
{
- struct dc *dc = link->ctx->dc;
+ struct dc *dc;
- if (link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ if (!link || !host_router_index || link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
return false;
+ dc = link->ctx->dc;
+
if (link->link_index < dc->lowest_dpia_link_index)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 7551d0a3fe82..bbce751b485f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -268,6 +268,8 @@ char *dc_status_to_str(enum dc_status status)
return "Insufficient DP link bandwidth";
case DC_FAIL_HW_CURSOR_SUPPORT:
return "HW Cursor not supported";
+ case DC_FAIL_DP_TUNNEL_BW_VALIDATE:
+ return "Fail DP Tunnel BW validation";
case DC_ERROR_UNEXPECTED:
return "Unexpected error";
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 7014b8d000bb..ec4e80e5b6eb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -427,6 +427,32 @@ void get_hdr_visual_confirm_color(
}
}
+/* Visual Confirm color definition for Smart Mux */
+void get_smartmux_visual_confirm_color(
+ struct dc *dc,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+
+ const struct tg_color sm_ver_colors[5] = {
+ {0, 0, 0}, /* SMUX_MUXCONTROL_UNSUPPORTED - Black */
+ {0, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_v10 - Green */
+ {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_v15 - Cyan */
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_MDM - Yellow */
+ {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_vUNKNOWN - Magenta*/
+ };
+
+ if (dc->caps.is_apu) {
+ /* APU driving the eDP */
+ *color = sm_ver_colors[dc->config.smart_mux_version];
+ } else {
+ /* dGPU driving the eDP - red */
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ }
+}
+
/* Visual Confirm color definition for VABC */
void get_vabc_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index 71e15da4bb69..130455f2802a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -515,7 +515,8 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
link->dc->link_srv->enable_hpd_filter(link, enable);
}
-bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
+enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx)
{
- return dc->link_srv->validate_dpia_bandwidth(streams, count);
+ return dc->link_srv->validate_dp_tunnel_bandwidth(dc, new_ctx);
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 3da25bd8b578..4d6181e7c612 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -3940,7 +3940,9 @@ enum dc_status resource_map_pool_resources(
/* TODO: Add check if ASIC support and EDID audio */
if (!stream->converter_disable_audio &&
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
- stream->audio_info.mode_count && stream->audio_info.flags.all) {
+ stream->audio_info.mode_count &&
+ (stream->audio_info.flags.all ||
+ (stream->sink && stream->sink->edid_caps.panel_patch.skip_audio_sab_check))) {
pipe_ctx->stream_res.audio = find_first_free_audio(
&context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version);
@@ -4053,7 +4055,7 @@ static bool add_all_planes_for_stream(
* @set: An array of dc_validation_set with all the current streams reference
* @set_count: Total of streams
* @context: New context
- * @fast_validate: Enable or disable fast validation
+ * @validate_mode: identify the validation mode
*
* This function updates the potential new stream in the context object. It
* creates multiple lists for the add, remove, and unchanged streams. In
@@ -4068,7 +4070,7 @@ enum dc_status dc_validate_with_context(struct dc *dc,
const struct dc_validation_set set[],
int set_count,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
struct dc_stream_state *unchanged_streams[MAX_PIPES] = { 0 };
struct dc_stream_state *del_streams[MAX_PIPES] = { 0 };
@@ -4242,7 +4244,7 @@ enum dc_status dc_validate_with_context(struct dc *dc,
dc_state_set_stream_subvp_cursor_limit(context->streams[i], context, false);
}
- res = dc_validate_global_state(dc, context, fast_validate);
+ res = dc_validate_global_state(dc, context, validate_mode);
/* calculate pixel rate divider after deciding pxiel clock & odm combine */
if ((dc->hwss.calculate_pix_rate_divider) && (res == DC_OK)) {
@@ -4299,7 +4301,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
*
* @dc: dc struct for this driver
* @new_ctx: state to be validated
- * @fast_validate: set to true if only yes/no to support matters
+ * @validate_mode: identify the validation mode
*
* Checks hardware resource availability and bandwidth requirement.
*
@@ -4309,7 +4311,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
enum dc_status result = DC_ERROR_UNEXPECTED;
int i, j;
@@ -4368,7 +4370,7 @@ enum dc_status dc_validate_global_state(
result = resource_build_scaling_params_for_context(dc, new_ctx);
if (result == DC_OK)
- result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate);
+ result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, validate_mode);
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 4db7383720fd..883054bb18e7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -194,11 +194,6 @@ static void init_state(struct dc *dc, struct dc_state *state)
struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params)
{
struct dc_state *state;
-#ifdef CONFIG_DRM_AMD_DC_FP
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-#endif
state = kvzalloc(sizeof(struct dc_state), GFP_KERNEL);
@@ -211,14 +206,12 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p
#ifdef CONFIG_DRM_AMD_DC_FP
if (dc->debug.using_dml2) {
- dml2_opt->use_clock_dc_limits = false;
- if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2)) {
+ if (!dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2)) {
dc_state_release(state);
return NULL;
}
- dml2_opt->use_clock_dc_limits = true;
- if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source)) {
+ if (!dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) {
dc_state_release(state);
return NULL;
}
@@ -434,6 +427,8 @@ enum dc_status dc_state_remove_stream(
return DC_ERROR_UNEXPECTED;
}
+ dc_stream_release_3dlut_for_stream(dc, stream);
+
dc_stream_release(state->streams[i]);
state->stream_count--;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index b883fb24fa12..4d6bc9fd4faa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -857,6 +857,73 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
}
/*
+* dc_stream_get_3dlut()
+* Requirements:
+* 1. Is stream already owns an RMCM instance, return it.
+* 2. If it doesn't and we don't need to allocate, return NULL.
+* 3. If there's a free RMCM instance, assign to stream and return it.
+* 4. If no free RMCM instances, return NULL.
+*/
+
+struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream,
+ bool allocate_one)
+{
+ unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts;
+
+ // see if one is allocated for this stream
+ for (int i = 0; i < num_rmcm; i++) {
+ if (dc->res_pool->rmcm_3dlut[i].isInUse &&
+ dc->res_pool->rmcm_3dlut[i].stream == stream)
+ return &dc->res_pool->rmcm_3dlut[i];
+ }
+
+ //case: not found one, and dont need to allocate
+ if (!allocate_one)
+ return NULL;
+
+ //see if there is an unused 3dlut, allocate
+ for (int i = 0; i < num_rmcm; i++) {
+ if (!dc->res_pool->rmcm_3dlut[i].isInUse) {
+ dc->res_pool->rmcm_3dlut[i].isInUse = true;
+ dc->res_pool->rmcm_3dlut[i].stream = stream;
+ return &dc->res_pool->rmcm_3dlut[i];
+ }
+ }
+
+ //dont have a 3dlut
+ return NULL;
+}
+
+
+void dc_stream_release_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream)
+{
+ struct dc_rmcm_3dlut *rmcm_3dlut =
+ dc_stream_get_3dlut_for_stream(dc, stream, false);
+
+ if (rmcm_3dlut) {
+ rmcm_3dlut->isInUse = false;
+ rmcm_3dlut->stream = NULL;
+ rmcm_3dlut->protection_bits = 0;
+ }
+}
+
+
+void dc_stream_init_rmcm_3dlut(struct dc *dc)
+{
+ unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts;
+
+ for (int i = 0; i < num_rmcm; i++) {
+ dc->res_pool->rmcm_3dlut[i].isInUse = false;
+ dc->res_pool->rmcm_3dlut[i].stream = NULL;
+ dc->res_pool->rmcm_3dlut[i].protection_bits = 0;
+ }
+}
+
+/*
* Finds the greatest index in refresh_rate_hz that contains a value <= refresh
*/
static int dc_stream_get_nearest_smallest_index(struct dc_stream_state *stream, int refresh)
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index f41073c0147e..59c07756130d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -46,6 +46,8 @@
#include "dmub/inc/dmub_cmd.h"
+#include "sspl/dc_spl_types.h"
+
struct abm_save_restore;
/* forward declaration */
@@ -53,7 +55,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.334"
+#define DC_VER "3.2.340"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -66,8 +68,11 @@ struct dmub_notification;
#define MAX_STREAMS 6
#define MIN_VIEWPORT_SIZE 12
#define MAX_NUM_EDP 2
+#define MAX_SUPPORTED_FORMATS 7
+
#define MAX_HOST_ROUTERS_NUM 3
-#define MAX_DPIA_PER_HOST_ROUTER 2
+#define MAX_DPIA_PER_HOST_ROUTER 3
+#define MAX_DPIA_NUM (MAX_HOST_ROUTERS_NUM * MAX_DPIA_PER_HOST_ROUTER)
/* Display Core Interfaces */
struct dc_versions {
@@ -193,6 +198,34 @@ struct dpp_color_caps {
struct rom_curve_caps ogam_rom_caps;
};
+/* Below structure is to describe the HW support for mem layout, extend support
+ range to match what OS could handle in the roadmap */
+struct lut3d_caps {
+ uint32_t dma_3d_lut : 1; /*< DMA mode support for 3D LUT */
+ struct {
+ uint32_t swizzle_3d_rgb : 1;
+ uint32_t swizzle_3d_bgr : 1;
+ uint32_t linear_1d : 1;
+ } mem_layout_support;
+ struct {
+ uint32_t unorm_12msb : 1;
+ uint32_t unorm_12lsb : 1;
+ uint32_t float_fp1_5_10 : 1;
+ } mem_format_support;
+ struct {
+ uint32_t order_rgba : 1;
+ uint32_t order_bgra : 1;
+ } mem_pixel_order_support;
+ /*< size options are 9, 17, 33, 45, 65 */
+ struct {
+ uint32_t dim_9 : 1; /* 3D LUT support for 9x9x9 */
+ uint32_t dim_17 : 1; /* 3D LUT support for 17x17x17 */
+ uint32_t dim_33 : 1; /* 3D LUT support for 33x33x33 */
+ uint32_t dim_45 : 1; /* 3D LUT support for 45x45x45 */
+ uint32_t dim_65 : 1; /* 3D LUT support for 65x65x65 */
+ } lut_dim_caps;
+};
+
/**
* struct mpc_color_caps - color pipeline capabilities for multiple pipe and
* plane combined blocks
@@ -204,14 +237,21 @@ struct dpp_color_caps {
* @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single
* instance
* @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
+ * @mcm_3d_lut_caps: HW support cap for MCM LUT memory
+ * @rmcm_3d_lut_caps: HW support cap for RMCM LUT memory
+ * @preblend: whether color manager supports preblend with MPC
*/
struct mpc_color_caps {
uint16_t gamut_remap : 1;
uint16_t ogam_ram : 1;
uint16_t ocsc : 1;
uint16_t num_3dluts : 3;
+ uint16_t num_rmcm_3dluts : 3;
uint16_t shared_3d_lut:1;
struct rom_curve_caps ogam_rom_caps;
+ struct lut3d_caps mcm_3d_lut_caps;
+ struct lut3d_caps rmcm_3d_lut_caps;
+ bool preblend;
};
/**
@@ -271,6 +311,7 @@ struct dc_caps {
bool dmcub_support;
bool zstate_support;
bool ips_support;
+ bool ips_v2_support;
uint32_t num_of_internal_disp;
enum dp_protocol_version max_dp_protocol_version;
unsigned int mall_size_per_mem_channel;
@@ -308,6 +349,8 @@ struct dc_caps {
struct dc_scl_caps scl_caps;
uint8_t num_of_host_routers;
uint8_t num_of_dpias_per_host_router;
+ /* limit of the ODM only, could be limited by other factors (like pipe count)*/
+ uint8_t max_odm_combine_factor;
};
struct dc_bug_wa {
@@ -462,6 +505,7 @@ struct dc_config {
bool use_spl;
bool prefer_easf;
bool use_pipe_ctx_sync_logic;
+ int smart_mux_version;
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
bool use_default_clock_table;
@@ -472,6 +516,7 @@ struct dc_config {
bool EnableMinDispClkODM;
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
+ unsigned int disable_ips_rcg;
unsigned int disable_ips_in_vpb;
bool disable_ips_in_dpms_off;
bool usb4_bw_alloc_support;
@@ -484,6 +529,8 @@ struct dc_config {
bool set_pipe_unlock_order;
bool enable_dpia_pre_training;
bool unify_link_enc_assignment;
+ struct spl_sharpness_range dcn_sharpness_range;
+ struct spl_sharpness_range dcn_override_sharpness_range;
};
enum visual_confirm {
@@ -495,6 +542,7 @@ enum visual_confirm {
VISUAL_CONFIRM_SWAPCHAIN = 6,
VISUAL_CONFIRM_FAMS = 7,
VISUAL_CONFIRM_SWIZZLE = 9,
+ VISUAL_CONFIRM_SMARTMUX_DGPU = 10,
VISUAL_CONFIRM_REPLAY = 12,
VISUAL_CONFIRM_SUBVP = 14,
VISUAL_CONFIRM_MCLK_SWITCH = 16,
@@ -773,6 +821,7 @@ enum pg_hw_resources {
PG_DCHVM,
PG_DWB,
PG_HPO,
+ PG_DCOH,
PG_HW_RESOURCES_NUM_ELEMENT
};
@@ -789,10 +838,8 @@ union dpia_debug_options {
uint32_t disable_mst_dsc_work_around:1; /* bit 3 */
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
uint32_t disable_usb4_pm_support:1; /* bit 5 */
- uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */
- uint32_t enable_dpia_pre_training:1; /* bit 7 */
- uint32_t unify_link_enc_assignment:1; /* bit 8 */
- uint32_t reserved:24;
+ uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */
+ uint32_t reserved:25;
} bits;
uint32_t raw;
};
@@ -918,6 +965,9 @@ struct dc_debug_options {
bool disable_dsc_power_gate;
bool disable_optc_power_gate;
bool disable_hpo_power_gate;
+ bool disable_io_clk_power_gate;
+ bool disable_mem_power_gate;
+ bool disable_dio_power_gate;
int dsc_min_slice_height_override;
int dsc_bpp_increment_div;
bool disable_pplib_wm_range;
@@ -1154,7 +1204,7 @@ struct dc_init_data {
uint32_t *dcn_reg_offsets;
uint32_t *nbio_reg_offsets;
uint32_t *clk_reg_offsets;
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
};
struct dc_callback_init {
@@ -1255,6 +1305,12 @@ union dc_3dlut_state {
};
+struct dc_rmcm_3dlut {
+ bool isInUse;
+ const struct dc_stream_state *stream;
+ uint8_t protection_bits;
+};
+
struct dc_3dlut {
struct kref refcount;
struct tetrahedral_params lut_3d;
@@ -1392,6 +1448,8 @@ struct dc_plane_state {
int sharpness_level;
enum linear_light_scaling linear_light_scaling;
unsigned int sdr_white_level_nits;
+ struct spl_sharpness_range sharpness_range;
+ enum sharpness_range_source sharpness_source;
};
struct dc_plane_info {
@@ -1573,6 +1631,7 @@ struct dc_scratch_space {
bool blank_stream_on_ocs_change;
bool read_dpcd204h_on_irq_hpd;
bool force_dp_ffe_preset;
+ bool skip_phy_ssc_reduction;
} wa_flags;
union dc_dp_ffe_preset forced_dp_ffe_preset;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -1582,6 +1641,8 @@ struct dc_scratch_space {
struct gpio *hpd_gpio;
enum dc_link_fec_state fec_state;
+ bool is_dds;
+ bool is_display_mux_present;
bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
struct dc_panel_config panel_config;
@@ -1636,6 +1697,10 @@ struct dc {
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
+ /* For eDP to know the switching state of SmartMux */
+ bool is_switch_in_progress_orig;
+ bool is_switch_in_progress_dest;
+
/* FBC compressor */
struct compressor *fbc_compressor;
@@ -1666,7 +1731,7 @@ struct dc {
} scratch;
struct dml2_configuration_options dml2_options;
- struct dml2_configuration_options dml2_tmp;
+ struct dml2_configuration_options dml2_dc_power_options;
enum dc_acpi_cm_power_state power_state;
};
@@ -1771,19 +1836,15 @@ enum dc_status dc_validate_with_context(struct dc *dc,
const struct dc_validation_set set[],
int set_count,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dc_set_generic_gpio_for_stereo(bool enable,
struct gpio_service *gpio_service);
-/*
- * fast_validate: we return after determining if we can support the new state,
- * but before we populate the programming info
- */
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dc_acquire_release_mpc_3dlut(
struct dc *dc, bool acquire,
@@ -2379,17 +2440,12 @@ void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
struct dc_link *link, int peak_bw);
/*
- * Validate the BW of all the valid DPIA links to make sure it doesn't exceed
- * available BW for each host router
- *
- * @dc: pointer to dc struct
- * @stream: pointer to all possible streams
- * @count: number of valid DPIA streams
+ * Calculates the DP tunneling bandwidth required for the stream timing
+ * and aggregates the stream bandwidth for the respective DP tunneling link
*
- * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
+ * return: dc_status
*/
-bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
- const unsigned int count);
+enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx);
/* Sink Interfaces - A sink corresponds to a display output device */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index afbcf866520e..f5ef1a07078e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -1269,12 +1269,16 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
new_signals.bits.allow_z10 = 1;
+ // New in IPSv2.0
+ new_signals.bits.allow_ips1z8 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
new_signals.bits.allow_ips1 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
+ // IPSv1.0 only
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
+ // IPSv1.0 only
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
@@ -1286,6 +1290,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
new_signals.bits.allow_z10 = 1;
+ // New in IPSv2.0
+ new_signals.bits.allow_ips1z8 = 1;
} else {
/* RCG only */
new_signals.bits.allow_pg = 0;
@@ -1293,8 +1299,28 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_ips2 = 0;
new_signals.bits.allow_z10 = 0;
}
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_Z8_RETENTION) {
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 1;
+ new_signals.bits.allow_z10 = 1;
+ }
+ // Setting RCG allow bits (IPSv2.0)
+ if (dc->config.disable_ips_rcg == DMUB_IPS_RCG_ENABLE) {
+ new_signals.bits.allow_ips0_rcg = 1;
+ new_signals.bits.allow_ips1_rcg = 1;
+ } else if (dc->config.disable_ips_rcg == DMUB_IPS0_RCG_DISABLE) {
+ new_signals.bits.allow_ips1_rcg = 1;
+ } else if (dc->config.disable_ips_rcg == DMUB_IPS1_RCG_DISABLE) {
+ new_signals.bits.allow_ips0_rcg = 1;
+ }
+ // IPS dynamic allow bits (IPSv2 change, vpb use case)
+ if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG) {
+ new_signals.bits.allow_dynamic_ips1 = 1;
+ } else if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_ALL) {
+ new_signals.bits.allow_dynamic_ips1 = 1;
+ new_signals.bits.allow_dynamic_ips1_z8 = 1;
}
-
ips_driver->signals = new_signals;
dc_dmub_srv->driver_signals = ips_driver->signals;
}
@@ -1318,7 +1344,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
struct dc_dmub_srv *dc_dmub_srv;
- uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0;
+ uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0, ips1z8_exit_count = 0;
if (dc->debug.dmcub_emulation)
return;
@@ -1338,31 +1364,34 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
rcg_exit_count = ips_fw->rcg_exit_count;
ips1_exit_count = ips_fw->ips1_exit_count;
ips2_exit_count = ips_fw->ips2_exit_count;
+ ips1z8_exit_count = ips_fw->ips1_z8ret_exit_count;
ips_driver->signals.all = 0;
dc_dmub_srv->driver_signals = ips_driver->signals;
DC_LOG_IPS(
- "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)",
+ "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u ips1z8=%u) (count rcg=%u ips1=%u ips2=%u ips1_z8=%u)",
__func__,
ips_driver->signals.bits.allow_ips1,
ips_driver->signals.bits.allow_ips2,
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit,
+ ips_fw->signals.bits.ips1z8_commit,
ips_fw->rcg_entry_count,
ips_fw->ips1_entry_count,
- ips_fw->ips2_entry_count);
+ ips_fw->ips2_entry_count,
+ ips_fw->ips1_z8ret_entry_count);
/* Note: register access has technically not resumed for DCN here, but we
* need to be message PMFW through our standard register interface.
*/
dc_dmub_srv->needs_idle_wake = false;
- if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
+ if (!dc->caps.ips_v2_support && ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
(!dc->debug.optimize_ips_handshake ||
- ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
+ ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle))) {
DC_LOG_IPS(
- "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)",
+ "wait IPS2 eval (ips1_commit=%u ips2_commit=%u )",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1422,28 +1451,31 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
dc_dmub_srv_notify_idle(dc, false);
if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) {
DC_LOG_IPS(
- "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
+ "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
ips_fw->signals.bits.ips1_commit,
- ips_fw->signals.bits.ips2_commit);
+ ips_fw->signals.bits.ips2_commit,
+ ips_fw->signals.bits.ips1z8_commit);
while (ips_fw->signals.bits.ips1_commit)
udelay(1);
DC_LOG_IPS(
- "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)",
+ "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
ips_fw->signals.bits.ips1_commit,
- ips_fw->signals.bits.ips2_commit);
+ ips_fw->signals.bits.ips2_commit,
+ ips_fw->signals.bits.ips1z8_commit);
}
}
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
- DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)",
+ DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u ips1z8=%u)",
__func__,
rcg_exit_count,
ips1_exit_count,
- ips2_exit_count);
+ ips2_exit_count,
+ ips1z8_exit_count);
}
void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state)
@@ -1656,7 +1688,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com
return result;
}
-void dc_dmub_srv_fams2_update_config(struct dc *dc,
+static void dc_dmub_srv_rb_based_fams2_update_config(struct dc *dc,
struct dc_state *context,
bool enable)
{
@@ -1722,6 +1754,63 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+static void dc_dmub_srv_ib_based_fams2_update_config(struct dc *dc,
+ struct dc_state *context,
+ bool enable)
+{
+ struct dmub_fams2_config_v2 *config = (struct dmub_fams2_config_v2 *)dc->ctx->dmub_srv->dmub->ib_mem_gart.cpu_addr;
+ union dmub_rb_cmd cmd;
+ uint32_t i;
+
+ memset(config, 0, sizeof(*config));
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.ib_fams2_config.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ cmd.ib_fams2_config.header.sub_type = DMUB_CMD__FAMS2_IB_CONFIG;
+
+ cmd.ib_fams2_config.ib_data.src.quad_part = dc->ctx->dmub_srv->dmub->ib_mem_gart.gpu_addr;
+ cmd.ib_fams2_config.ib_data.size = sizeof(*config);
+
+ if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
+ /* copy static feature configuration overrides */
+ config->global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery;
+ config->global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip;
+ config->global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug;
+
+ /* send global configuration parameters */
+ memcpy(&config->global, &context->bw_ctx.bw.dcn.fams2_global_config,
+ sizeof(struct dmub_cmd_fams2_global_config));
+
+ /* construct per-stream configs */
+ for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
+ /* copy stream static base state */
+ memcpy(&config->stream_v1[i].base,
+ &context->bw_ctx.bw.dcn.fams2_stream_base_params[i],
+ sizeof(config->stream_v1[i].base));
+
+ /* copy stream static sub-state */
+ memcpy(&config->stream_v1[i].sub_state,
+ &context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[i],
+ sizeof(config->stream_v1[i].sub_state));
+ }
+ }
+
+ config->global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
+ config->global.features.bits.enable = enable;
+
+ dm_execute_dmub_cmd_list(dc->ctx, 1, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dc_dmub_srv_fams2_update_config(struct dc *dc,
+ struct dc_state *context,
+ bool enable)
+{
+ if (dc->debug.fams_version.major == 2)
+ dc_dmub_srv_rb_based_fams2_update_config(dc, context, enable);
+ if (dc->debug.fams_version.major == 3)
+ dc_dmub_srv_ib_based_fams2_update_config(dc, context, enable);
+}
+
void dc_dmub_srv_fams2_drr_update(struct dc *dc,
uint32_t tg_inst,
uint32_t vtotal_min,
@@ -1847,83 +1936,267 @@ void dc_dmub_srv_fams2_passthrough_flip(
}
}
-bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement)
+
+bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement)
{
- bool result;
+ union dmub_rb_cmd cmd;
- if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.ips_residency_cntl.header.type = DMUB_CMD__IPS;
+ cmd.ips_residency_cntl.header.sub_type = DMUB_CMD__IPS_RESIDENCY_CNTL;
+ cmd.ips_residency_cntl.header.payload_bytes = sizeof(struct dmub_cmd_ips_residency_cntl_data);
+
+ // only panel_inst=0 is supported at the moment
+ cmd.ips_residency_cntl.cntl_data.panel_inst = panel_inst;
+ cmd.ips_residency_cntl.cntl_data.start_measurement = start_measurement;
+
+ if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return false;
+
+ return true;
+}
+
+bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, struct dmub_ips_residency_info *driver_info,
+ enum ips_residency_mode ips_mode)
+{
+ union dmub_rb_cmd cmd;
+ uint32_t bytes = sizeof(struct dmub_ips_residency_info);
+
+ dmub_flush_buffer_mem(&ctx->dmub_srv->dmub->scratch_mem_fb);
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.ips_query_residency_info.header.type = DMUB_CMD__IPS;
+ cmd.ips_query_residency_info.header.sub_type = DMUB_CMD__IPS_QUERY_RESIDENCY_INFO;
+ cmd.ips_query_residency_info.header.payload_bytes = sizeof(struct dmub_cmd_ips_query_residency_info_data);
+
+ cmd.ips_query_residency_info.info_data.dest.quad_part = ctx->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+ cmd.ips_query_residency_info.info_data.size = bytes;
+ cmd.ips_query_residency_info.info_data.panel_inst = panel_inst;
+ cmd.ips_query_residency_info.info_data.ips_mode = (uint32_t)ips_mode;
+
+ if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) ||
+ cmd.ips_query_residency_info.header.ret_status == 0)
return false;
- result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY,
- start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT);
+ // copy the result to the output since ret_status != 0 means the command returned data
+ memcpy(driver_info, ctx->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
+
+ return true;
+}
+
+bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_INIT_CONFIG;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.init_data.gpu_addr_base.quad_part = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.gpu_addr;
+ lsdma_data->u.init_data.ring_size = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.size;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Init failed in DMUB");
return result;
}
-void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output)
+bool dmub_lsdma_send_linear_copy_packet(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t count)
{
- uint32_t i;
- enum dmub_gpint_command command_code;
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
- if (!dc_dmub_srv || !dc_dmub_srv->dmub)
- return;
+ memset(&cmd, 0, sizeof(cmd));
- switch (output->ips_mode) {
- case DMUB_IPS_MODE_IPS1_MAX:
- command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER;
- break;
- case DMUB_IPS_MODE_IPS2:
- command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER;
- break;
- case DMUB_IPS_MODE_IPS1_RCG:
- command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER;
- break;
- case DMUB_IPS_MODE_IPS1_ONO2_ON:
- command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER;
- break;
- default:
- command_code = DMUB_GPINT__INVALID_COMMAND;
- break;
- }
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.linear_copy_data.count = count - 1; // LSDMA controller expects bytes to copy -1
+ lsdma_data->u.linear_copy_data.src_lo = src_addr & 0xFFFFFFFF;
+ lsdma_data->u.linear_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.linear_copy_data.dst_lo = dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.linear_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Linear Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_tiled_to_tiled_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_send_tiled_to_tiled_copy_command_params params)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_TILED_TO_TILED_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.tiled_copy_data.src_addr_lo = params.src_addr & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.src_addr_hi = (params.src_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.dst_addr_lo = params.dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.dst_addr_hi = (params.dst_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.src_x = params.src_x;
+ lsdma_data->u.tiled_copy_data.src_y = params.src_y;
+ lsdma_data->u.tiled_copy_data.dst_x = params.dst_x;
+ lsdma_data->u.tiled_copy_data.dst_y = params.dst_y;
+ lsdma_data->u.tiled_copy_data.src_width = params.src_width - 1; // LSDMA controller expects width -1
+ lsdma_data->u.tiled_copy_data.dst_width = params.dst_width - 1; // LSDMA controller expects width -1
+ lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode;
+ lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode;
+ lsdma_data->u.tiled_copy_data.src_element_size = params.element_size;
+ lsdma_data->u.tiled_copy_data.dst_element_size = params.element_size;
+ lsdma_data->u.tiled_copy_data.rect_x = params.rect_x;
+ lsdma_data->u.tiled_copy_data.rect_y = params.rect_y;
+ lsdma_data->u.tiled_copy_data.dcc = params.dcc;
+ lsdma_data->u.tiled_copy_data.tmz = params.tmz;
+ lsdma_data->u.tiled_copy_data.read_compress = params.read_compress;
+ lsdma_data->u.tiled_copy_data.write_compress = params.write_compress;
+ lsdma_data->u.tiled_copy_data.src_height = params.src_height - 1; // LSDMA controller expects height -1
+ lsdma_data->u.tiled_copy_data.dst_height = params.dst_height - 1; // LSDMA controller expects height -1
+ lsdma_data->u.tiled_copy_data.data_format = params.data_format;
+ lsdma_data->u.tiled_copy_data.max_com = params.max_com;
+ lsdma_data->u.tiled_copy_data.max_uncom = params.max_uncom;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Tiled to Tiled Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_pio_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t overlap_disable)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.pio_copy_data.packet.fields.byte_count = byte_count;
+ lsdma_data->u.pio_copy_data.packet.fields.overlap_disable = overlap_disable;
+ lsdma_data->u.pio_copy_data.src_lo = src_addr & 0xFFFFFFFF;
+ lsdma_data->u.pio_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.pio_copy_data.dst_lo = dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.pio_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA PIO Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_pio_constfill_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t data)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_CONSTFILL;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.pio_constfill_data.packet.fields.constant_fill = 1;
+ lsdma_data->u.pio_constfill_data.packet.fields.byte_count = byte_count;
+ lsdma_data->u.pio_constfill_data.dst_lo = dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.pio_constfill_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.pio_constfill_data.data = data;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA PIO Constfill failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
- if (command_code == DMUB_GPINT__INVALID_COMMAND)
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_POLL_REG_WRITE;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.reg_write_data.reg_addr = reg_addr;
+ lsdma_data->u.reg_write_data.reg_data = reg_data;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Poll Reg failed in DMUB");
+
+ return result;
+}
+
+void dc_dmub_srv_release_hw(const struct dc *dc)
+{
+ struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
+ union dmub_rb_cmd cmd = {0};
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return;
- for (i = 0; i < GPINT_RETRY_NUM; i++) {
- // false could mean GPINT timeout, in which case we should retry
- if (dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
- (uint16_t)(output->ips_mode), &output->residency_percent,
- DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- break;
- udelay(100);
- }
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_RELEASE_HW;
+ cmd.idle_opt_notify_idle.header.payload_bytes =
+ sizeof(cmd.idle_opt_notify_idle) -
+ sizeof(cmd.idle_opt_notify_idle.header);
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER,
- (uint16_t)(output->ips_mode),
- &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->entry_counter = 0;
-
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO,
- (uint16_t)(output->ips_mode),
- &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_active_time_us[0] = 0;
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI,
- (uint16_t)(output->ips_mode),
- &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_active_time_us[1] = 0;
-
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO,
- (uint16_t)(output->ips_mode),
- &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_inactive_time_us[0] = 0;
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI,
- (uint16_t)(output->ips_mode),
- &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_inactive_time_us[1] = 0;
-
- // NUM_IPS_HISTOGRAM_BUCKETS = 16
- for (i = 0; i < 16; i++)
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i],
- DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->histogram[i] = 0;
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index ada5c2fb2db3..8ea320f21269 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -210,6 +210,60 @@ void dc_dmub_srv_fams2_passthrough_flip(
struct dc_surface_update *srf_updates,
int surface_count);
+bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv);
+bool dmub_lsdma_send_linear_copy_packet(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t count);
+bool dmub_lsdma_send_pio_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t overlap_disable);
+bool dmub_lsdma_send_pio_constfill_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t data);
+
+struct lsdma_send_tiled_to_tiled_copy_command_params {
+ uint64_t src_addr;
+ uint64_t dst_addr;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t src_width : 16;
+ uint32_t dst_width : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_height : 16;
+ uint32_t dst_height : 16;
+
+ uint32_t data_format : 6;
+ uint32_t swizzle_mode : 5;
+ uint32_t element_size : 3;
+ uint32_t dcc : 1;
+ uint32_t tmz : 1;
+ uint32_t read_compress : 2;
+ uint32_t write_compress : 2;
+ uint32_t max_com : 2;
+ uint32_t max_uncom : 1;
+ uint32_t padding : 9;
+};
+
+bool dmub_lsdma_send_tiled_to_tiled_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_send_tiled_to_tiled_copy_command_params params);
+bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data);
+
/**
* struct ips_residency_info - struct containing info from dmub_ips_residency_stats
*
@@ -223,7 +277,7 @@ void dc_dmub_srv_fams2_passthrough_flip(
* @histogram: Histogram of given IPS state durations - bucket definitions in dmub_ips.c
*/
struct ips_residency_info {
- enum dmub_ips_mode ips_mode;
+ enum ips_residency_mode ips_mode;
unsigned int residency_percent;
unsigned int entry_counter;
unsigned int total_active_time_us[2];
@@ -231,21 +285,16 @@ struct ips_residency_info {
unsigned int histogram[16];
};
-/**
- * bool dc_dmub_srv_ips_residency_cntl() - Controls IPS residency measurement status
- *
- * @dc_dmub_srv: The DC DMUB service pointer
- * @start_measurement: Describes whether to start or stop measurement
- *
- * Return: true if GPINT was sent successfully, false otherwise
- */
-bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement);
+bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement);
+
+bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst,
+ struct dmub_ips_residency_info *driver_info,
+ enum ips_residency_mode ips_mode);
/**
- * bool dc_dmub_srv_ips_query_residency_info() - Queries DMCUB for residency info
+ * dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required.
*
- * @dc_dmub_srv: The DC DMUB service pointer
- * @output: Output struct to copy the the residency info to
+ * @dc - pointer to DC object
*/
-void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output);
+void dc_dmub_srv_release_hw(const struct dc *dc);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index d346f8ae1634..5ce1be362534 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -162,6 +162,11 @@ struct dc_link_settings {
struct dc_tunnel_settings {
bool should_enable_dp_tunneling;
bool should_use_dp_bw_allocation;
+ uint8_t cm_id;
+ uint8_t group_id;
+ uint32_t bw_granularity;
+ uint32_t estimated_bw;
+ uint32_t allocated_bw;
};
union dc_dp_ffe_preset {
@@ -957,11 +962,21 @@ union usb4_driver_bw_cap {
uint8_t raw;
};
+/* DPCD[0xE0021] DP_IN_ADAPTER_TUNNEL_INFORMATION register. */
+union dpia_tunnel_info {
+ struct {
+ uint8_t group_id :3;
+ uint8_t rsvd :5;
+ } bits;
+ uint8_t raw;
+};
+
/* DP Tunneling over USB4 */
struct dpcd_usb4_dp_tunneling_info {
union dp_tun_cap_support dp_tun_cap;
union dpia_info dpia_info;
union usb4_driver_bw_cap driver_bw_cap;
+ union dpia_tunnel_info dpia_tunnel_info;
uint8_t usb4_driver_id;
uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN];
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index c9f6c6275ca1..667852517246 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -68,7 +68,7 @@ enum dc_plane_addr_type {
struct dc_plane_address {
enum dc_plane_addr_type type;
- bool tmz_surface;
+ uint8_t tmz_surface;
union {
struct{
PHYSICAL_ADDRESS_LOC addr;
@@ -1104,7 +1104,8 @@ enum mpcc_gamut_remap_mode_select {
enum mpcc_gamut_remap_id {
MPCC_OGAM_GAMUT_REMAP,
MPCC_MCM_FIRST_GAMUT_REMAP,
- MPCC_MCM_SECOND_GAMUT_REMAP
+ MPCC_MCM_SECOND_GAMUT_REMAP,
+ MPCC_RMCM_GAMUT_REMAP,
};
enum cursor_matrix_mode {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index e3a8283b4098..7f57661433eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -156,15 +156,16 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->adaptive_sharpness.enable = true;
spl_in->adaptive_sharpness.sharpness_level = 0;
} else if (sharpness_setting == SHARPNESS_CUSTOM) {
- spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = 0;
- spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = 1750;
- spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = 750;
- spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = 0;
- spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = 3500;
- spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = 1500;
- spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = 0;
- spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = 2750;
- spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = 1500;
+ /* SAT: read harpness_range from dc_plane_state */
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = plane_state->sharpness_range.sdr_rgb_min;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = plane_state->sharpness_range.sdr_rgb_max;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = plane_state->sharpness_range.sdr_rgb_mid;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = plane_state->sharpness_range.sdr_yuv_min;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = plane_state->sharpness_range.sdr_yuv_max;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = plane_state->sharpness_range.sdr_yuv_mid;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = plane_state->sharpness_range.hdr_rgb_min;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = plane_state->sharpness_range.hdr_rgb_max;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = plane_state->sharpness_range.hdr_rgb_mid;
if (force_sharpness_level > 0) {
if (force_sharpness_level > 10)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 341d2ffb64b1..5fc6fea211de 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -579,6 +579,17 @@ bool dc_stream_set_gamut_remap(struct dc *dc,
bool dc_stream_program_csc_matrix(struct dc *dc,
struct dc_stream_state *stream);
+struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream,
+ bool allocate_one);
+
+void dc_stream_release_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream);
+
+void dc_stream_init_rmcm_3dlut(struct dc *dc);
+
struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream);
void dc_dmub_update_dirty_rect(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index a4cd0eb39a3a..375ca2f13b7a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -175,6 +175,7 @@ struct dc_panel_patch {
unsigned int embedded_tiled_slave;
unsigned int disable_fams;
unsigned int skip_avmute;
+ unsigned int skip_audio_sab_check;
unsigned int mst_start_top_delay;
unsigned int remove_sink_ext_caps;
unsigned int disable_colorimetry;
@@ -263,6 +264,7 @@ enum dc_timing_source {
TIMING_SOURCE_EDID_4BYTE,
TIMING_SOURCE_EDID_CEA_DISPLAYID_VTDB,
TIMING_SOURCE_EDID_CEA_RID,
+ TIMING_SOURCE_EDID_DISPLAYID_TYPE5,
TIMING_SOURCE_VBIOS,
TIMING_SOURCE_CV,
TIMING_SOURCE_TV,
@@ -1255,7 +1257,6 @@ enum dc_cm2_gpu_mem_layout {
enum dc_cm2_gpu_mem_pixel_component_order {
DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA,
- DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA
};
enum dc_cm2_gpu_mem_format {
@@ -1277,7 +1278,6 @@ struct dc_cm2_gpu_mem_format_parameters {
enum dc_cm2_gpu_mem_size {
DC_CM2_GPU_MEM_SIZE_171717,
- DC_CM2_GPU_MEM_SIZE_333333,
DC_CM2_GPU_MEM_SIZE_TRANSFORMED,
};
@@ -1315,6 +1315,7 @@ struct dc_cm2_func_luts {
bool mpc_3dlut_enable;
bool rmcm_3dlut_enable;
bool mpc_mcm_post_blend;
+ uint8_t rmcm_tmz;
} lut3d_data;
const struct dc_transfer_func *lut1d_func;
};
@@ -1372,4 +1373,19 @@ struct set_backlight_level_params {
uint8_t aux_inst;
};
+enum dc_validate_mode {
+ /* validate the mode and program HW */
+ DC_VALIDATE_MODE_AND_PROGRAMMING = 0,
+ /* only validate the mode */
+ DC_VALIDATE_MODE_ONLY = 1,
+ /* validate the mode and get the max state (voltage level) */
+ DC_VALIDATE_MODE_AND_STATE_INDEX = 2,
+};
+
+struct dc_validation_dpia_set {
+ const struct dc_link *link;
+ const struct dc_tunnel_settings *tunnel_settings;
+ uint32_t required_bw;
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index ffd172231fdf..668ee2d405fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -727,7 +727,7 @@ void dccg401_init(struct dccg *dccg)
}
}
-void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst)
+void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
index 55e8718aad22..5947a35363aa 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
@@ -209,7 +209,7 @@ void dccg401_disable_symclk32_le(
struct dccg *dccg,
int hpo_le_inst);
void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst);
-void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst);
+void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h);
void dccg401_set_ref_dscclk(struct dccg *dccg,
uint32_t dsc_inst);
void dccg401_set_src_sel(
@@ -230,7 +230,6 @@ void dccg401_set_dp_dto(
const struct dp_dto_params *params);
void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst);
void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst);
-void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst);
void dccg401_set_dtbclk_p_src(
struct dccg *dccg,
enum streamclk_source src,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index d28826c3ae5f..4e06468a6284 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -292,9 +292,35 @@ static void set_speed(
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
}
+static bool acquire_engine(struct dce_i2c_hw *dce_i2c_hw)
+{
+ uint32_t arbitrate = 0;
+
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ switch (arbitrate) {
+ case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW:
+ return true;
+ case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW:
+ return false;
+ case DC_I2C_STATUS__DC_I2C_STATUS_IDLE:
+ default:
+ break;
+ }
+
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, true);
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return false;
+
+ return true;
+}
+
static bool setup_engine(
struct dce_i2c_hw *dce_i2c_hw)
{
+ // Deassert soft reset to unblock I2C engine registers
+ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, false);
+
uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
uint32_t reset_length = 0;
@@ -309,8 +335,8 @@ static bool setup_engine(
REG_UPDATE_N(SETUP, 1,
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_EN), 1);
- /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
- REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
+ if (!acquire_engine(dce_i2c_hw))
+ return false;
/*set SW requested I2c speed to default, if API calls in it will be override later*/
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz);
@@ -319,9 +345,8 @@ static bool setup_engine(
i2c_setup_limit = dce_i2c_hw->setup_limit;
/* Program pin select */
- REG_UPDATE_6(DC_I2C_CONTROL,
+ REG_UPDATE_5(DC_I2C_CONTROL,
DC_I2C_GO, 0,
- DC_I2C_SOFT_RESET, 0,
DC_I2C_SEND_RESET, 0,
DC_I2C_SW_STATUS_RESET, 1,
DC_I2C_TRANSACTION_COUNT, 0,
@@ -351,6 +376,26 @@ static bool setup_engine(
return true;
}
+/**
+ * If we boot without an HDMI display, the I2C engine does not get initialized
+ * correctly. One of its symptoms is that SW_USE_I2C does not get cleared after
+ * acquire, so that after setting SW_DONE_USING_I2C on release, the engine gets
+ * immediately reacquired by SW, preventing DMUB from using it.
+ */
+static void cntl_stuck_hw_workaround(struct dce_i2c_hw *dce_i2c_hw)
+{
+ uint32_t arbitrate = 0;
+
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return;
+
+ // Still acquired after release, release again as a workaround
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true);
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ ASSERT(arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW);
+}
+
static void release_engine(
struct dce_i2c_hw *dce_i2c_hw)
{
@@ -378,9 +423,9 @@ static void release_engine(
/*for HW HDCP Ri polling failure w/a test*/
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz_hdcp);
- /* Release I2C after reset, so HW or DMCU could use it */
- REG_UPDATE_2(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1,
- DC_I2C_SW_USE_I2C_REG_REQ, 0);
+ // Release I2C engine so it can be used by HW or DMCU, automatically clears SW_USE_I2C
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true);
+ cntl_stuck_hw_workaround(dce_i2c_hw);
if (dce_i2c_hw->ctx->dc->debug.enable_mem_low_power.bits.i2c) {
if (dce_i2c_hw->regs->DIO_MEM_PWR_CTRL)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index fcd3d86ad517..e7a318e26d38 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -4,6 +4,7 @@
#include "dc.h"
#include "dc_dmub_srv.h"
+#include "dc_dp_types.h"
#include "dmub/dmub_srv.h"
#include "core_types.h"
#include "dmub_replay.h"
@@ -43,21 +44,45 @@ static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *s
/*
* Enable/Disable Replay.
*/
-static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst)
+static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst,
+ struct dc_link *link)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
uint32_t retry_count;
enum replay_state state = REPLAY_STATE_0;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+ uint8_t i;
memset(&cmd, 0, sizeof(cmd));
cmd.replay_enable.header.type = DMUB_CMD__REPLAY;
cmd.replay_enable.data.panel_inst = panel_inst;
cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE;
- if (enable)
+ if (enable) {
cmd.replay_enable.data.enable = REPLAY_ENABLE;
- else
+ // hpo stream/link encoder assignments are not static, need to update everytime we try to enable replay
+ if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx &&
+ res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ pipe_ctx = &res_ctx->pipe_ctx[i];
+ //TODO: refactor for multi edp support
+ break;
+ }
+ }
+
+ if (!pipe_ctx)
+ return;
+
+ cmd.replay_enable.data.hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ cmd.replay_enable.data.hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
+ }
+ } else
cmd.replay_enable.data.enable = REPLAY_DISABLE;
cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data);
@@ -149,6 +174,17 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
copy_settings_data->digbe_inst = replay_context->digbe_inst;
copy_settings_data->digfe_inst = replay_context->digfe_inst;
+ if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
+ if (pipe_ctx->stream_res.hpo_dp_stream_enc)
+ copy_settings_data->hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ else
+ copy_settings_data->hpo_stream_enc_inst = 0;
+ if (pipe_ctx->link_res.hpo_dp_link_enc)
+ copy_settings_data->hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
+ else
+ copy_settings_data->hpo_link_enc_inst = 0;
+ }
+
if (pipe_ctx->plane_res.dpp)
copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
else
@@ -211,6 +247,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
pCmd->header.type = DMUB_CMD__REPLAY;
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL;
pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
+ pCmd->replay_set_coasting_vtotal_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
index e6346c0ffc0e..ccbe385e132c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -19,7 +19,7 @@ struct dmub_replay_funcs {
void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state,
uint8_t panel_inst);
void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait,
- uint8_t panel_inst);
+ uint8_t panel_inst, struct dc_link *link);
bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link,
struct replay_context *replay_context, uint8_t panel_inst);
void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index e1d500633dfa..b357683b4255 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -114,9 +114,6 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_ccflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_rcflags)
-
ifdef CONFIG_DRM_AMD_DC_FP
DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o
DML += dcn10/dcn10_fpu.o
@@ -137,7 +134,6 @@ DML += dcn303/dcn303_fpu.o
DML += dcn314/dcn314_fpu.o
DML += dcn35/dcn35_fpu.o
DML += dcn351/dcn351_fpu.o
-DML += dcn401/dcn401_fpu.o
DML += dsc/rc_calc_fpu.o
DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index f1235bf9a596..74962791302f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -748,7 +748,7 @@ static unsigned int get_highest_allowed_voltage_level(bool is_vmin_only_asic)
bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
/*
* we want a breakdown of the various stages of validation, which the
@@ -1119,7 +1119,7 @@ bool dcn_validate_bandwidth(
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (v->voltage_level != number_of_states_plus_one && !fast_validate) {
+ if (v->voltage_level != number_of_states_plus_one && validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
@@ -1286,7 +1286,7 @@ bool dcn_validate_bandwidth(
}
} else if (v->voltage_level == number_of_states_plus_one) {
BW_VAL_TRACE_SKIP(fail);
- } else if (fast_validate) {
+ } else if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index e9fea9c2162e..2a2eaf6adf26 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1315,7 +1315,7 @@ static void swizzle_to_dml_params(
int dcn20_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt, i;
bool synchronized_vblank = true;
@@ -1733,7 +1733,7 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt, i, pipe_idx;
@@ -1780,10 +1780,10 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context,
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
else
pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
}
*out_pipe_cnt = pipe_cnt;
@@ -2027,7 +2027,7 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
}
static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes)
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes)
{
bool out = false;
@@ -2040,7 +2040,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
BW_VAL_TRACE_COUNT();
- out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate);
+ out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode);
if (pipe_cnt == 0)
goto validate_out;
@@ -2050,12 +2050,12 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
- dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
+ dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
@@ -2077,7 +2077,7 @@ validate_out:
}
bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes)
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes)
{
bool voltage_supported = false;
bool full_pstate_supported = false;
@@ -2095,12 +2095,11 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
/*Unsafe due to current pipe merge and split logic*/
ASSERT(context != dc->current_state);
- if (fast_validate) {
- return dcn20_validate_bandwidth_internal(dc, context, true, pipes);
- }
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ return dcn20_validate_bandwidth_internal(dc, context, validate_mode, pipes);
// Best case, we support full UCLK switch latency
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes);
full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
@@ -2113,7 +2112,7 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
memset(pipes, 0, dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st));
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes);
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
@@ -2156,14 +2155,14 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
int dcn21_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
uint32_t pipe_cnt;
int i;
dc_assert_fp_enabled();
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
for (i = 0; i < pipe_cnt; i++) {
@@ -2239,7 +2238,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel_req,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt, i, pipe_idx;
int vlevel, vlevel_max;
@@ -2281,10 +2280,10 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
else
pipe_cnt = dcn21_populate_dml_pipes_from_context(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
}
*out_pipe_cnt = pipe_cnt;
@@ -2319,7 +2318,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
}
bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes)
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes)
{
bool out = false;
@@ -2337,7 +2336,7 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
/*Unsafe due to current pipe merge and split logic*/
ASSERT(context != dc->current_state);
- out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate);
+ out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode);
if (pipe_cnt == 0)
goto validate_out;
@@ -2347,12 +2346,12 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
- dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
+ dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
index b6c34198ddc8..aed00039ca62 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
@@ -44,14 +44,14 @@ void dcn20_calculate_dlg_params(struct dc *dc,
int dcn20_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn20_calculate_wm(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table max_clocks);
void dcn20_update_bounding_box(struct dc *dc,
@@ -62,7 +62,7 @@ void dcn20_update_bounding_box(struct dc *dc,
void dcn20_patch_bounding_box(struct dc *dc,
struct _vcs_dpi_soc_bounding_box_st *bb);
bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes);
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes);
void dcn20_fpu_set_wm_ranges(int i,
struct pp_smu_wm_range_sets *ranges,
struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
@@ -75,9 +75,9 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
int dcn21_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
-bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, bool
- fast_validate, display_e2e_pipe_params_st *pipes);
+ enum dc_validate_mode validate_mode);
+bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, enum
+ dc_validate_mode, display_e2e_pipe_params_st *pipes);
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index 88789987bdbc..e5f5c0663750 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -339,7 +339,8 @@ void dcn30_fpu_calculate_wm_and_dlg(
* newly found dummy_latency_index
*/
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
+ dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
+ DC_VALIDATE_MODE_AND_PROGRAMMING, true);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
@@ -630,7 +631,8 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
while (dummy_latency_index < max_latency_table_entries) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
- dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
+ dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
+ DC_VALIDATE_MODE_AND_PROGRAMMING, true);
if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank ==
dm_allow_self_refresh_and_mclk_switch)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index d2ae43a82ba5..dfcc5d50071e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -55,5 +55,5 @@ int dcn_get_approx_det_segs_required_for_pstate(
int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
#endif /* __DCN31_FPU_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 5ed117e11aa2..df9d50b9b57c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -306,7 +306,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -316,7 +316,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
dc_assert_fp_enabled();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
index d32c5bb99f4c..362ac79184ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
@@ -35,6 +35,6 @@
void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index b0fc1fd20208..6160952245b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -290,7 +290,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING);
/* for subvp + DRR case, if subvp pipes are still present we support pstate */
if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported &&
@@ -1479,7 +1479,7 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc,
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
- * 2. Full update (i.e. !fast_validate)
+ * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING)
* 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
* 4. Display configuration passes validation
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
@@ -1517,7 +1517,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc,
dc->res_pool->funcs->add_phantom_pipes(dc, context, pipes, *pipe_cnt, dc_pipe_idx);
- *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
+ *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
// Populate dppclk to trigger a recalculate in dml_get_voltage_level
// so the phantom pipe DLG params can be assigned correctly.
pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0);
@@ -1560,7 +1561,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc,
dc_state_remove_phantom_streams_and_planes(dc, context);
dc_state_release_phantom_streams_and_planes(dc, context);
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
- *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
+ *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
/* This may adjust vlevel and maxMpcComb */
@@ -2138,7 +2140,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
bool repopulate_pipes = false;
@@ -2162,7 +2164,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
for (i = 0; i < context->stream_count; i++)
resource_update_pipes_for_stream_with_slice_count(context, dc->current_state, dc->res_pool, context->streams[i], 1);
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
if (!pipe_cnt) {
out = true;
@@ -2172,13 +2174,13 @@ bool dcn32_internal_validate_bw(struct dc *dc,
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
- if (!fast_validate) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge,
&pipe_cnt, &repopulate_pipes))
goto validate_fail;
}
- if (fast_validate ||
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING ||
(dc->debug.dml_disallow_alternate_prefetch_modes &&
(vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
@@ -2195,7 +2197,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_none;
- context->bw_ctx.dml.validate_max_state = fast_validate;
+ context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING);
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
context->bw_ctx.dml.validate_max_state = false;
@@ -2247,7 +2249,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int flag_vlevel = vlevel;
int i;
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
if (!dc->config.enable_windowed_mpo_odm)
dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
@@ -2343,7 +2345,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
}
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
if (is_subvp_p_drr) {
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
@@ -2389,7 +2391,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.fclk_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
}
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
if (vlevel_temp < vlevel) {
vlevel = vlevel_temp;
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
@@ -2410,7 +2413,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
stream_status->fpo_in_use = false;
}
context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index 276e90e4e0ce..273d2bd79d85 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -49,7 +49,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 92f0a099d089..5d73efa2f0c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -437,7 +437,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -446,7 +446,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
const unsigned int max_allowed_vblank_nom = 1023;
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
- fast_validate);
+ validate_mode);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
index 067480fc3691..d121c5afce71 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
@@ -37,7 +37,7 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index 17d0b4923b0c..6f516af82956 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -470,7 +470,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -479,7 +479,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
const unsigned int max_allowed_vblank_nom = 1023;
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
- fast_validate);
+ validate_mode);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
index f93efab9a668..f71d9d8d0759 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
@@ -12,7 +12,7 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c
deleted file mode 100644
index 4fbecb5ff349..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c
+++ /dev/null
@@ -1,239 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dcn401_fpu.h"
-#include "dcn401/dcn401_resource.h"
-// We need this includes for WATERMARKS_* defines
-#include "clk_mgr/dcn401/dcn401_smu14_driver_if.h"
-#include "link.h"
-
-#define DC_LOGGER_INIT(logger)
-
-void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
-{
- /* defaults */
- double pstate_latency_us = clk_mgr->ctx->dc->dml.soc.dram_clock_change_latency_us;
- double fclk_change_latency_us = clk_mgr->ctx->dc->dml.soc.fclk_change_latency_us;
- double sr_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_exit_time_us;
- double sr_enter_plus_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_enter_plus_exit_time_us;
- /* For min clocks use as reported by PM FW and report those as min */
- uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
- uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
- uint16_t setb_min_uclk_mhz = min_uclk_mhz;
- uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz;
-
- dc_assert_fp_enabled();
-
- /* For Set B ranges use min clocks state 2 when available, and report those to PM FW */
- if (dcfclk_mhz_for_the_second_state)
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state;
- else
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
-
- if (clk_mgr->bw_params->clk_table.entries[2].memclk_mhz)
- setb_min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz;
-
- /* Set A - Normal - default values */
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
-
- /* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF;
-
- /* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */
- /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
- if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
- clk_mgr->bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50;
- clk_mgr->bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9;
- clk_mgr->bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8;
- clk_mgr->bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[3].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5;
- }
- /* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */
- /* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us / 2; // TBD
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us / 2; // TBD
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
-}
-
-/*
- * dcn401_update_bw_bounding_box
- *
- * This would override some dcn4_01 ip_or_soc initial parameters hardcoded from
- * spreadsheet with actual values as per dGPU SKU:
- * - with passed few options from dc->config
- * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might
- * need to get it from PM FW)
- * - with passed latency values (passed in ns units) in dc-> bb override for
- * debugging purposes
- * - with passed latencies from VBIOS (in 100_ns units) if available for
- * certain dGPU SKU
- * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU
- * of the same ASIC)
- * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM
- * FW for different clocks (which might differ for certain dGPU SKU of the
- * same ASIC)
- */
-void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
-{
- dc_assert_fp_enabled();
-
- /* Override from passed dc->bb_overrides if available*/
- if (dc->bb_overrides.sr_exit_time_ns)
- dc->dml2_options.bbox_overrides.sr_exit_latency_us =
- dc->bb_overrides.sr_exit_time_ns / 1000.0;
-
- if (dc->bb_overrides.sr_enter_plus_exit_time_ns)
- dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
- dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
-
- if (dc->bb_overrides.urgent_latency_ns)
- dc->dml2_options.bbox_overrides.urgent_latency_us =
- dc->bb_overrides.urgent_latency_ns / 1000.0;
-
- if (dc->bb_overrides.dram_clock_change_latency_ns)
- dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
- dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
-
- if (dc->bb_overrides.fclk_clock_change_latency_ns)
- dc->dml2_options.bbox_overrides.fclk_change_latency_us =
- dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
-
- /* Override from VBIOS if VBIOS bb_info available */
- if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
- struct bp_soc_bb_info bb_info = {0};
- if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
- if (bb_info.dram_clock_change_latency_100ns > 0)
- dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
- bb_info.dram_clock_change_latency_100ns * 10;
-
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
- bb_info.dram_sr_enter_exit_latency_100ns * 10;
-
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dc->dml2_options.bbox_overrides.sr_exit_latency_us =
- bb_info.dram_sr_exit_latency_100ns * 10;
- }
- }
-
- /* Override from VBIOS for num_chan */
- if (dc->ctx->dc_bios->vram_info.num_chans) {
- dc->dml2_options.bbox_overrides.dram_num_chan =
- dc->ctx->dc_bios->vram_info.num_chans;
-
- }
-
- if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
- dc->dml2_options.bbox_overrides.dram_chanel_width_bytes =
- dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
-
- dc->dml2_options.bbox_overrides.disp_pll_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
- dc->dml2_options.bbox_overrides.xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000.0;
- dc->dml2_options.bbox_overrides.dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
- dc->dml2_options.bbox_overrides.dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000.0;
-
- if (dc->clk_mgr->bw_params->clk_table.num_entries > 1) {
- unsigned int i = 0;
-
- dc->dml2_options.bbox_overrides.clks_table.num_states = dc->clk_mgr->bw_params->clk_table.num_entries;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz) {
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
- }
- }
- }
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h
deleted file mode 100644
index 329f1788843c..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#ifndef __DCN401_FPU_H__
-#define __DCN401_FPU_H__
-
-#include "clk_mgr.h"
-
-void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr);
-
-void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index 157ecf008d6c..4c21ce42054c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -81,10 +81,11 @@ AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2))
AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
@@ -94,17 +95,16 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflag
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
-
-
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
@@ -120,6 +120,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
DML21 := src/dml2_top/dml2_top_interfaces.o
DML21 += src/dml2_top/dml2_top_soc15.o
DML21 += src/dml2_core/dml2_core_dcn4.o
+DML21 += src/dml2_core/dml2_core_utils.o
DML21 += src/dml2_core/dml2_core_factory.o
DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 7ae9c0ba0c9e..715f9019a33e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -10189,7 +10189,7 @@ dml_uint_t dml_mode_support_ex(struct dml_mode_support_ex_params_st *in_out_para
result = mode_support_pwr_states(&in_out_params->out_lowest_state_idx,
in_out_params->mode_lib,
in_out_params->in_display_cfg,
- 0,
+ in_out_params->in_start_state_idx,
in_out_params->mode_lib->states.num_states - 1);
if (result)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
index 0670e4dc4fd9..dbeb08466092 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
@@ -1917,6 +1917,7 @@ struct display_mode_lib_st {
struct dml_mode_support_ex_params_st {
struct display_mode_lib_st *mode_lib;
const struct dml_display_cfg_st *in_display_cfg;
+ dml_uint_t in_start_state_idx;
dml_uint_t out_lowest_state_idx;
struct dml_mode_support_info_st *out_evaluation_info;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index 2aa6d44bb359..a06217a9eef6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml21_wrapper.h"
#include "dml2_core_dcn4_calcs.h"
#include "dml2_internal_shared_types.h"
@@ -11,277 +10,263 @@
#include "dml21_translation_helper.h"
#include "bounding_boxes/dcn4_soc_bb.h"
-static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- const struct dml2_soc_bb *soc_bb;
- const struct dml2_soc_qos_parameters *qos_params;
-
- switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_4_01:
- default:
- if (config->bb_from_dmub)
- soc_bb = config->bb_from_dmub;
- else
- soc_bb = &dml2_socbb_dcn401;
-
- qos_params = &dml_dcn4_variant_a_soc_qos_params;
- }
-
- /* patch soc bb */
- memcpy(&dml_init->soc_bb, soc_bb, sizeof(struct dml2_soc_bb));
-
- /* patch qos params */
- memcpy(&dml_init->soc_bb.qos_parameters, qos_params, sizeof(struct dml2_soc_qos_parameters));
-}
-
-static void dml21_external_socbb_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config)
-{
- memcpy(&dml_init->soc_bb, &config->external_socbb_ip_params->soc_bb, sizeof(struct dml2_soc_bb));
-}
-
-static void dml21_external_ip_params(struct dml2_initialize_instance_in_out *dml_init,
+static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options,
+ const struct dc *in_dc,
const struct dml2_configuration_options *config)
{
- memcpy(&dml_init->ip_caps, &config->external_socbb_ip_params->ip_params, sizeof(struct dml2_ip_capabilities));
+ bool disable_fams2 = !in_dc->debug.fams2_config.bits.enable;
+
+ /* ODM options */
+ pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm;
+ pmo_options->disable_dyn_odm_for_multi_stream = true;
+ pmo_options->disable_dyn_odm_for_stream_with_svp = true;
+
+ pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1);
+
+ /* NOTE: DRR and SubVP Require FAMS2 */
+ pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) ||
+ in_dc->debug.force_disable_subvp ||
+ disable_fams2;
+ pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) ||
+ disable_fams2;
+ pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) ||
+ disable_fams2;
+ pmo_options->disable_fams2 = disable_fams2;
+
+ pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE ||
+ in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY;
+ pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
-static void dml21_init_ip_params(struct dml2_initialize_instance_in_out *dml_init,
+/*
+ * Populate dml_init based on default static values in soc bb. The default
+ * values are for reference and support at least minimal operation of current
+ * SoC and DCN hardware. The values could be modifed by subsequent override
+ * functions to reflect our true hardware capability.
+ */
+static void populate_default_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
{
- const struct dml2_ip_capabilities *ip_caps;
-
switch (in_dc->ctx->dce_version) {
case DCN_VERSION_4_01:
+ dml_init->options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
+ dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config);
+ dml_init->soc_bb = dml2_socbb_dcn401;
+ dml_init->soc_bb.qos_parameters = dml_dcn4_variant_a_soc_qos_params;
+ dml_init->ip_caps = dml2_dcn401_max_ip_caps;
+ break;
default:
- ip_caps = &dml2_dcn401_max_ip_caps;
+ memset(dml_init, 0, sizeof(*dml_init));
+ DC_ERR("unsupported dcn version for DML21!");
+ return;
}
-
- memcpy(&dml_init->ip_caps, ip_caps, sizeof(struct dml2_ip_capabilities));
}
-void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init,
+static void override_dml_init_with_values_from_hardware_default(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
{
- if (config->use_native_soc_bb_construction)
- dml21_init_socbb_params(dml_init, config, in_dc);
- else
- dml21_external_socbb_params(dml_init, config);
+ dml_init->soc_bb.dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
+ dml_init->soc_bb.dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000;
+ dml_init->soc_bb.dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
}
-void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init,
+/*
+ * SMU stands for System Management Unit. It is a power management processor.
+ * It owns the initialization of dc's clock table and programming of clock values
+ * based on dc's requests.
+ * Our clock values in base soc bb is a dummy placeholder. The real clock values
+ * are retrieved from SMU firmware to dc clock table at runtime.
+ * This function overrides our dummy placeholder values with real values in dc
+ * clock table.
+ */
+static void override_dml_init_with_values_from_smu(
+ struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
{
- if (config->use_native_soc_bb_construction)
- dml21_init_ip_params(dml_init, config, in_dc);
- else
- dml21_external_ip_params(dml_init, config);
-}
-
-void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config, const struct dc *in_dc)
-{
int i;
-
const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
const struct clk_limit_table *dc_clk_table = &dc_bw_params->clk_table;
- struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb;
- struct dml2_soc_state_table *dml_clk_table = &dml_soc_bb->clk_table;
-
- /* override clocks if smu is present */
- if (in_dc->clk_mgr->funcs->is_smu_present && in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr)) {
- /* dcfclk */
- if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
- dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dcfclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
- dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
- dml_clk_table->dcfclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
- dml_clk_table->dcfclk.num_clk_values = i;
- }
+ struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table;
+
+ if (!in_dc->clk_mgr->funcs->is_smu_present ||
+ !in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr))
+ /* skip if smu is not present */
+ return;
+
+ /* dcfclk */
+ if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
+ dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dcfclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
+ dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
+ dml_clk_table->dcfclk.num_clk_values = i + 1;
} else {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ dml_clk_table->dcfclk.num_clk_values = i;
}
} else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
}
}
+ }
- /* fclk */
- if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
- dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->fclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
- dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
- dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
- dml_clk_table->fclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
- dml_clk_table->fclk.num_clk_values = i;
- }
+ /* fclk */
+ if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
+ dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->fclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
+ dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
+ dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
+ dml_clk_table->fclk.num_clk_values = i + 1;
} else {
- dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
+ dml_clk_table->fclk.num_clk_values = i;
}
} else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
+ dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
}
}
+ }
- /* uclk */
- if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
- dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->uclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
- dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
- dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
- dml_clk_table->uclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
- dml_clk_table->uclk.num_clk_values = i;
- }
+ /* uclk */
+ if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
+ dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->uclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
+ dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
+ dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
+ dml_clk_table->uclk.num_clk_values = i + 1;
} else {
- dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
+ dml_clk_table->uclk.num_clk_values = i;
}
} else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
+ dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
}
}
+ }
- /* dispclk */
- if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
- dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dispclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
- dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
- dml_clk_table->dispclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
- dml_clk_table->dispclk.num_clk_values = i;
- }
+ /* dispclk */
+ if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
+ dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dispclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
+ dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
+ dml_clk_table->dispclk.num_clk_values = i + 1;
} else {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ dml_clk_table->dispclk.num_clk_values = i;
}
} else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
}
}
+ }
- /* dppclk */
- if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
- dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dppclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
- dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
- dml_clk_table->dppclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
- dml_clk_table->dppclk.num_clk_values = i;
- }
+ /* dppclk */
+ if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
+ dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dppclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
+ dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
+ dml_clk_table->dppclk.num_clk_values = i + 1;
} else {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ dml_clk_table->dppclk.num_clk_values = i;
}
} else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
}
}
+ }
- /* dtbclk */
- if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
- dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dtbclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
- dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
- dml_clk_table->dtbclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
- dml_clk_table->dtbclk.num_clk_values = i;
- }
+ /* dtbclk */
+ if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
+ dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dtbclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
+ dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
+ dml_clk_table->dtbclk.num_clk_values = i + 1;
} else {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ dml_clk_table->dtbclk.num_clk_values = i;
}
} else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
}
}
+ }
- /* socclk */
- if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
- dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->socclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
- dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
- dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
- dml_clk_table->socclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
- dml_clk_table->socclk.num_clk_values = i;
- }
+ /* socclk */
+ if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
+ dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->socclk.num_clk_values) {
+ if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
+ dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
+ dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
+ dml_clk_table->socclk.num_clk_values = i + 1;
} else {
- dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
+ dml_clk_table->socclk.num_clk_values = i;
}
} else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
+ dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
}
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
}
}
-
- /* do not override phyclks for now */
- /* phyclk */
- // dml_clk_table->phyclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_levels;
- // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- // dml_clk_table->phyclk.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_mhz * 1000;
- // }
-
- /* phyclk_d18 */
- // dml_clk_table->phyclk_d18.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d18_levels;
- // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- // dml_clk_table->phyclk_d18.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d18_mhz * 1000;
- // }
-
- /* phyclk_d32 */
- // dml_clk_table->phyclk_d32.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d32_levels;
- // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- // dml_clk_table->phyclk_d32.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d32_mhz * 1000;
- // }
}
+}
- dml_soc_bb->dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
- dml_soc_bb->dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000;
- dml_soc_bb->xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
- dml_soc_bb->dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+static void override_dml_init_with_values_from_vbios(
+ struct dml2_initialize_instance_in_out *dml_init,
+ const struct dml2_configuration_options *config,
+ const struct dc *in_dc)
+{
+ const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
+ struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb;
+ struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table;
- /* override bounding box paramters from VBIOS */
if (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns > 0)
dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
(in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns + 9) / 10;
@@ -308,32 +293,120 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in
dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
}
- /* override bounding box paramters from DC config */
- if (in_dc->bb_overrides.sr_exit_time_ns) {
- dml_soc_bb->power_management_parameters.stutter_exit_latency_us =
- in_dc->bb_overrides.sr_exit_time_ns / 1000.0;
+ dml_init->soc_bb.xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
+}
+
+
+static void override_dml_init_with_values_from_dmub(struct dml2_initialize_instance_in_out *dml_init,
+ const struct dml2_configuration_options *config,
+ const struct dc *in_dc)
+{
+ /*
+ * TODO - There seems to be overlaps between the values overriden from
+ * dmub and vbios. Investigate and identify the values that DMUB needs
+ * to own.
+ */
+// const struct dmub_soc_bb_params *dmub_bb_params =
+// (const struct dmub_soc_bb_params *)config->bb_from_dmub;
+
+// if (dmub_bb_params == NULL)
+// return;
+
+// if (dmub_bb_params->dram_clk_change_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us =
+// (double) dmub_bb_params->dram_clk_change_blackout_ns / 1000.0;
+// if (dmub_bb_params->dram_clk_change_read_only_ns > 0)
+// dml_init->soc_bb.power_management_parameters.dram_clk_change_read_only_us =
+// (double) dmub_bb_params->dram_clk_change_read_only_ns / 1000.0;
+// if (dmub_bb_params->dram_clk_change_write_only_ns > 0)
+// dml_init->soc_bb.power_management_parameters.dram_clk_change_write_only_us =
+// (double) dmub_bb_params->dram_clk_change_write_only_ns / 1000.0;
+// if (dmub_bb_params->fclk_change_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us =
+// (double) dmub_bb_params->fclk_change_blackout_ns / 1000.0;
+// if (dmub_bb_params->g7_ppt_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.g7_ppt_blackout_us =
+// (double) dmub_bb_params->g7_ppt_blackout_ns / 1000.0;
+// if (dmub_bb_params->stutter_enter_plus_exit_latency_ns > 0)
+// dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us =
+// (double) dmub_bb_params->stutter_enter_plus_exit_latency_ns / 1000.0;
+// if (dmub_bb_params->stutter_exit_latency_ns > 0)
+// dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us =
+// (double) dmub_bb_params->stutter_exit_latency_ns / 1000.0;
+// if (dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns > 0)
+// dml_init->soc_bb.power_management_parameters.z8_stutter_enter_plus_exit_latency_us =
+// (double) dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns / 1000.0;
+// if (dmub_bb_params->z8_stutter_exit_latency_ns > 0)
+// dml_init->soc_bb.power_management_parameters.z8_stutter_exit_latency_us =
+// (double) dmub_bb_params->z8_stutter_exit_latency_ns / 1000.0;
+// if (dmub_bb_params->z8_min_idle_time_ns > 0)
+// dml_init->soc_bb.power_management_parameters.z8_min_idle_time =
+// (double) dmub_bb_params->z8_min_idle_time_ns / 1000.0;
+// #ifndef TRIM_DML2_DCN6B_IP_SENSITIVE
+// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.lpddr5_dram_clk_change_blackout_us =
+// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0;
+// if (dmub_bb_params->type_b_ppt_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.lpddr5_ppt_blackout_us =
+// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0;
+// #else
+// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.type_b_dram_clk_change_blackout_us =
+// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0;
+// if (dmub_bb_params->type_b_ppt_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.type_b_ppt_blackout_us =
+// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0;
+// #endif
+// if (dmub_bb_params->vmin_limit_dispclk_khz > 0)
+// dml_init->soc_bb.vmin_limit.dispclk_khz = dmub_bb_params->vmin_limit_dispclk_khz;
+// if (dmub_bb_params->vmin_limit_dcfclk_khz > 0)
+// dml_init->soc_bb.vmin_limit.dcfclk_khz = dmub_bb_params->vmin_limit_dcfclk_khz;
+// if (dmub_bb_params->g7_temperature_read_blackout_ns > 0)
+// dml_init->soc_bb.power_management_parameters.g7_temperature_read_blackout_us =
+// (double) dmub_bb_params->g7_temperature_read_blackout_ns / 1000.0;
+}
+
+static void override_dml_init_with_values_from_software_policy(struct dml2_initialize_instance_in_out *dml_init,
+ const struct dml2_configuration_options *config,
+ const struct dc *in_dc)
+{
+ if (!config->use_native_soc_bb_construction) {
+ dml_init->soc_bb = config->external_socbb_ip_params->soc_bb;
+ dml_init->ip_caps = config->external_socbb_ip_params->ip_params;
}
- if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns) {
- dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
+ if (in_dc->bb_overrides.sr_exit_time_ns)
+ dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us =
+ in_dc->bb_overrides.sr_exit_time_ns / 1000.0;
+
+ if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns)
+ dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us =
in_dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
- }
- if (in_dc->bb_overrides.dram_clock_change_latency_ns) {
- dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
+ if (in_dc->bb_overrides.dram_clock_change_latency_ns)
+ dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us =
in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
- }
- if (in_dc->bb_overrides.fclk_clock_change_latency_ns) {
- dml_soc_bb->power_management_parameters.fclk_change_blackout_us =
+ if (in_dc->bb_overrides.fclk_clock_change_latency_ns)
+ dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us =
in_dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0;
- }
+}
- //TODO
- // if (in_dc->bb_overrides.dummy_clock_change_latency_ns) {
- // dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
- // in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
- // }
+void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
+ const struct dml2_configuration_options *config,
+ const struct dc *in_dc)
+{
+ populate_default_dml_init_params(dml_init, config, in_dc);
+
+ override_dml_init_with_values_from_hardware_default(dml_init, config, in_dc);
+
+ override_dml_init_with_values_from_smu(dml_init, config, in_dc);
+
+ override_dml_init_with_values_from_vbios(dml_init, config, in_dc);
+
+ override_dml_init_with_values_from_dmub(dml_init, config, in_dc);
+
+ override_dml_init_with_values_from_software_policy(dml_init, config, in_dc);
}
static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
@@ -726,7 +799,6 @@ static void populate_dml21_surface_config_from_plane_state(
switch (plane_state->tiling_info.gfxversion) {
case DcGfxVersion7:
case DcGfxVersion8:
- // Placeholder for programming the array_mode
break;
case DcGfxVersion9:
case DcGfxVersion10:
@@ -889,10 +961,8 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
case DC_CM2_GPU_MEM_SIZE_171717:
plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
break;
- case DC_CM2_GPU_MEM_SIZE_333333:
- plane->tdlut.tdlut_width_mode = dml2_tdlut_width_33_cube;
- break;
case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
+ default:
//plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
index 73a013be1e48..9880d3e0398e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
@@ -17,9 +17,7 @@ struct dml2_context;
struct dml2_configuration_options;
struct dml2_initialize_instance_in_out;
-void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
-void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
-void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
+void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context);
void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
index 930e86cdb88a..ee721606b883 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
@@ -384,6 +384,7 @@ void dml21_build_fams2_programming(const struct dc *dc,
/* reset fams2 data */
memset(&context->bw_ctx.bw.dcn.fams2_stream_base_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
+ memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2, 0, sizeof(union dmub_fams2_stream_static_sub_state_v2) * DML2_MAX_PLANES);
memset(&context->bw_ctx.bw.dcn.fams2_global_config, 0, sizeof(struct dmub_cmd_fams2_global_config));
if (dml_ctx->v21.mode_programming.programming->fams2_required) {
@@ -414,9 +415,16 @@ void dml21_build_fams2_programming(const struct dc *dc,
memcpy(static_base_state,
&dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_base_params,
sizeof(union dmub_cmd_fams2_config));
- memcpy(static_sub_state,
- &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params,
- sizeof(union dmub_cmd_fams2_config));
+
+ if (dc->debug.fams_version.major == 3) {
+ memcpy(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[num_fams2_streams],
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params_v2,
+ sizeof(union dmub_fams2_stream_static_sub_state_v2));
+ } else {
+ memcpy(static_sub_state,
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params,
+ sizeof(union dmub_cmd_fams2_config));
+ }
switch (dc->debug.fams_version.minor) {
case 1:
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index 208d3651b6ba..03de3cf06ae5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -2,8 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-#include <linux/vmalloc.h>
-
#include "dml2_internal_types.h"
#include "dml_top.h"
#include "dml2_core_dcn4_calcs.h"
@@ -37,15 +35,11 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
return true;
}
-static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
+static void dml21_populate_configuration_options(const struct dc *in_dc,
+ struct dml2_context *dml_ctx,
+ const struct dml2_configuration_options *config)
{
- bool disable_fams2;
- struct dml2_pmo_options *pmo_options = &dml_ctx->v21.dml_init.options.pmo_options;
-
- /* ODM options */
- pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm;
- pmo_options->disable_dyn_odm_for_multi_stream = true;
- pmo_options->disable_dyn_odm_for_stream_with_svp = true;
+ dml_ctx->config = *config;
/* UCLK P-State options */
if (in_dc->debug.dml21_force_pstate_method) {
@@ -55,52 +49,20 @@ static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_contex
} else {
dml_ctx->config.pmo.force_pstate_method_enable = false;
}
-
- pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1);
-
- /* NOTE: DRR and SubVP Require FAMS2 */
- disable_fams2 = !in_dc->debug.fams2_config.bits.enable;
- pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) ||
- in_dc->debug.force_disable_subvp ||
- disable_fams2;
- pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) ||
- disable_fams2;
- pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) ||
- disable_fams2;
- pmo_options->disable_fams2 = disable_fams2;
-
- pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE ||
- in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY;
- pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
-static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
+static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
{
- switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_4_01:
- (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
- break;
- default:
- (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_invalid;
- }
- (*dml_ctx)->architecture = dml2_architecture_21;
+ dml_ctx->architecture = dml2_architecture_21;
- /* Store configuration options */
- (*dml_ctx)->config = *config;
+ dml21_populate_configuration_options(in_dc, dml_ctx, config);
DC_FP_START();
- /*Initialize SOCBB and DCNIP params */
- dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
- dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
- dml21_apply_soc_bb_overrides(&(*dml_ctx)->v21.dml_init, config, in_dc);
-
- /* apply debug overrides */
- dml21_apply_debug_options(in_dc, *dml_ctx, config);
+ dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, config, in_dc);
- /*Initialize DML21 instance */
- dml2_initialize_instance(&(*dml_ctx)->v21.dml_init);
+ dml2_initialize_instance(&dml_ctx->v21.dml_init);
DC_FP_END();
}
@@ -111,7 +73,7 @@ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const s
if (!dml21_allocate_memory(dml_ctx))
return false;
- dml21_init(in_dc, dml_ctx, config);
+ dml21_init(in_dc, *dml_ctx, config);
return true;
}
@@ -328,12 +290,13 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
return true;
}
-bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate)
+bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx,
+ enum dc_validate_mode validate_mode)
{
bool out = false;
- /* Use dml_validate_only for fast_validate path */
- if (fast_validate)
+ /* Use dml21_check_mode_support for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
out = dml21_check_mode_support(in_dc, context, dml_ctx);
else
out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
@@ -496,7 +459,7 @@ bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
return true;
}
-void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
+void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
{
dml21_init(in_dc, dml_ctx, config);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
index 42e715024bc9..15f92029d2e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
@@ -14,6 +14,7 @@ struct dc;
struct dc_state;
struct dml2_configuration_options;
struct dml2_context;
+enum dc_validate_mode;
/**
* dml2_create - Creates dml21_context.
@@ -33,22 +34,23 @@ void dml21_copy(struct dml2_context *dst_dml_ctx,
struct dml2_context *src_dml_ctx);
bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
struct dml2_context *src_dml_ctx);
-void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config);
+void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config);
/**
* dml21_validate - Determines if a display configuration is supported or not.
* @in_dc: dc.
* @context: dc_state to be validated.
- * @fast_validate: Fast validate will not populate context.res_ctx.
+ * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX
+ * will not populate context.res_ctx.
*
* Based on fast_validate option internally would call:
*
- * -dml21_mode_check_and_programming - for non fast_validate option
+ * -dml21_mode_check_and_programming - for DC_VALIDATE_MODE_AND_PROGRAMMING option
* Calculates if dc_state can be supported on the input display
* configuration. If supported, generates the necessary HW
* programming for the new dc_state.
*
- * -dml21_check_mode_support - for fast_validate option
+ * -dml21_check_mode_support - for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX option
* Calculates if dc_state can be supported for the input display
* config.
@@ -56,7 +58,8 @@ void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const s
* separate dc_states for validation.
* Return: True if mode is supported, false otherwise.
*/
-bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate);
+bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx,
+ enum dc_validate_mode validate_mode);
/* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */
void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
index c047d56527c4..a64ec4dcf11a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
@@ -43,5 +43,4 @@ bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_o
*/
bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
index 84c90050668c..b05030926ce8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
@@ -46,6 +46,7 @@ struct dml2_display_dlg_regs {
uint32_t dst_y_delta_drq_limit;
uint32_t refcyc_per_vm_dmdata;
uint32_t dmdata_dl_delta;
+ uint32_t dst_y_svp_drq_limit;
// MRQ
uint32_t refcyc_per_meta_chunk_vblank_l;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
index 255f05de362c..e8dc6471c0be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
@@ -222,6 +222,7 @@ struct dml2_composition_cfg {
struct {
bool enabled;
+ bool upsp_enabled;
struct {
double h_ratio;
double v_ratio;
@@ -426,6 +427,7 @@ struct dml2_stream_parameters {
struct dml2_display_cfg {
bool gpuvm_enable;
+ bool ffbm_enable;
bool hostvm_enable;
// Allocate DET proportionally between streams based on pixel rate
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
index 5f0bc42d1d2f..8c9f414aa6bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
@@ -93,12 +93,15 @@ struct dml2_soc_power_management_parameters {
double dram_clk_change_write_only_us;
double fclk_change_blackout_us;
double g7_ppt_blackout_us;
+ double g7_temperature_read_blackout_us;
double stutter_enter_plus_exit_latency_us;
double stutter_exit_latency_us;
double z8_stutter_enter_plus_exit_latency_us;
double z8_stutter_exit_latency_us;
double z8_min_idle_time;
double g6_temp_read_blackout_us[DML_MAX_CLK_TABLE_SIZE];
+ double type_b_dram_clk_change_blackout_us;
+ double type_b_ppt_blackout_us;
};
struct dml2_clk_table {
@@ -130,6 +133,7 @@ struct dml2_soc_state_table {
struct dml2_soc_vmin_clock_limits {
unsigned long dispclk_khz;
+ unsigned long dcfclk_khz;
};
struct dml2_soc_bb {
@@ -138,6 +142,7 @@ struct dml2_soc_bb {
struct dml2_soc_power_management_parameters power_management_parameters;
struct dml2_soc_vmin_clock_limits vmin_limit;
+ double lower_bound_bandwidth_dchub;
unsigned int dprefclk_mhz;
unsigned int xtalclk_mhz;
unsigned int pcie_refclk_mhz;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
index 0dbf886d8926..98c0234e2f47 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
@@ -53,7 +53,9 @@ enum dml2_output_type_and_rate__rate {
dml2_output_rate_hdmi_rate_6x4 = 9,
dml2_output_rate_hdmi_rate_8x4 = 10,
dml2_output_rate_hdmi_rate_10x4 = 11,
- dml2_output_rate_hdmi_rate_12x4 = 12
+ dml2_output_rate_hdmi_rate_12x4 = 12,
+ dml2_output_rate_hdmi_rate_16x4 = 13,
+ dml2_output_rate_hdmi_rate_20x4 = 14
};
struct dml2_pmo_options {
@@ -279,7 +281,10 @@ struct dml2_per_stream_programming {
} phantom_stream;
union dmub_cmd_fams2_config fams2_base_params;
- union dmub_cmd_fams2_config fams2_sub_params;
+ union {
+ union dmub_cmd_fams2_config fams2_sub_params;
+ union dmub_fams2_stream_static_sub_state_v2 fams2_sub_params_v2;
+ };
};
//-----------------
@@ -674,9 +679,14 @@ struct dml2_display_cfg_programming {
// unlimited # of mcache
struct dml2_mcache_surface_allocation non_optimized_mcache_allocation[DML2_MAX_PLANES];
+ bool failed_prefetch;
+ bool failed_uclk_pstate;
bool failed_mcache_validation;
bool failed_dpmm;
bool failed_mode_programming;
+ bool failed_mode_programming_dcfclk;
+ bool failed_mode_programming_prefetch;
+ bool failed_mode_programming_flip;
bool failed_map_watermarks;
} informative;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 5b62cd19d979..b9cff2198511 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -4861,7 +4861,7 @@ static double get_urgent_bandwidth_required(
double ReadBandwidthChroma[],
double PrefetchBandwidthLuma[],
double PrefetchBandwidthChroma[],
- double PrefetchBandwidthOto[],
+ double PrefetchBandwidthMax[],
double excess_vactive_fill_bw_l[],
double excess_vactive_fill_bw_c[],
double cursor_bw[],
@@ -4925,9 +4925,9 @@ static double get_urgent_bandwidth_required(
l->vm_row_bw = NumberOfDPP[k] * prefetch_vmrow_bw[k];
l->flip_and_active_bw = l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur;
l->flip_and_prefetch_bw = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
- l->flip_and_prefetch_bw_oto = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthOto[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
+ l->flip_and_prefetch_bw_max = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthMax[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
l->active_and_excess_bw = (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k];
- surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_oto);
+ surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_max);
/* export peak required bandwidth for the surface */
surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]);
@@ -5125,7 +5125,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->Tsw_est3 = 0.0;
s->cursor_prefetch_bytes = 0;
*p->prefetch_cursor_bw = 0;
- *p->RequiredPrefetchBWOTO = 0.0;
+ *p->RequiredPrefetchBWMax = 0.0;
dcc_mrq_enable = (p->dcc_enable && p->mrq_present);
@@ -5356,7 +5356,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
* mp will fail if ms decides to use equ schedule and mp decides to use oto schedule
* and the required bandwidth increases when going from ms to mp
*/
- *p->RequiredPrefetchBWOTO = s->prefetch_bw_oto;
+ *p->RequiredPrefetchBWMax = s->prefetch_bw_oto;
#ifdef __DML_VBA_DEBUG__
DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
@@ -5718,8 +5718,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->TimeForFetchingVM = s->Tvm_equ;
s->TimeForFetchingRowInVBlank = s->Tr0_equ;
- *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+
+ /* equ bw should be propagated so a ceiling of the equ bw is accounted for prior to mode programming.
+ * Overall bandwidth may be lower when going from mode support to mode programming but final pixel data
+ * bandwidth may end up higher than what was calculated in mode support.
+ */
+ *p->RequiredPrefetchBWMax = math_max2(s->prefetch_bw_equ, *p->RequiredPrefetchBWMax);
#ifdef __DML_VBA_DEBUG__
DML_LOG_VERBOSE("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
@@ -6115,7 +6121,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
l->zero_array, //PrefetchBandwidthLuma,
l->zero_array, //PrefetchBandwidthChroma,
- l->zero_array, //PrefetchBWOTO
+ l->zero_array, //PrefetchBWMax
l->zero_array,
l->zero_array,
l->zero_array,
@@ -6152,7 +6158,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
l->zero_array, //PrefetchBandwidthLuma,
l->zero_array, //PrefetchBandwidthChroma,
- l->zero_array, //PrefetchBWOTO
+ l->zero_array, //PrefetchBWMax
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6189,7 +6195,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
- p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
+ p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6226,7 +6232,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
- p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
+ p->prefetch_bandwidth_max, // to prevent ms/mp mismatch where mp prefetch bw > ms prefetch bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6263,7 +6269,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
- p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
+ p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -7490,7 +7496,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter
CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
- CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k];
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &mode_lib->ms.RequiredPrefetchBWMax[k];
CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
@@ -7635,7 +7641,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
@@ -7802,7 +7808,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
@@ -7908,6 +7914,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter
}
+
static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params)
{
struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib;
@@ -11256,7 +11263,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->mp.VRatioPrefetchC[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k];
- CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &s->dummy_single_array[0][k];
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &s->dummy_single_array[0][k];
CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->mp.NotEnoughTimeForDynamicMetadata[k];
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->mp.Tno_bw[k];
CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->mp.Tno_bw_flip[k];
@@ -11399,7 +11406,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0];
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0];
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->mp.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->mp.cursor_bw;
@@ -11539,7 +11546,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->meta_row_bw = mode_lib->mp.meta_row_bw;
calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->mp.prefetch_cursor_bw;
calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->mp.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0];
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0];
calculate_peak_bandwidth_params->flip_bw = mode_lib->mp.final_flip_bw;
calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->mp.UrgentBurstFactorLuma;
calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->mp.UrgentBurstFactorChroma;
@@ -11883,7 +11890,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
}
//Maximum Bandwidth Used
- s->TotalWRBandwidth = 0;
+ mode_lib->mp.TotalWRBandwidth = 0;
for (k = 0; k < display_cfg->num_streams; ++k) {
s->WRBandwidth = 0;
if (display_cfg->stream_descriptors[k].writeback.active_writebacks_per_stream > 0) {
@@ -11892,7 +11899,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
(display_cfg->stream_descriptors[k].timing.h_total * display_cfg->stream_descriptors[k].writeback.writeback_stream[0].input_height
/ ((double)display_cfg->stream_descriptors[k].timing.pixel_clock_khz / 1000))
* (display_cfg->stream_descriptors[k].writeback.writeback_stream[0].pixel_format == dml2_444_32 ? 4.0 : 8.0);
- s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth;
+ mode_lib->mp.TotalWRBandwidth = mode_lib->mp.TotalWRBandwidth + s->WRBandwidth;
}
}
@@ -13062,6 +13069,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_10x4;
else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_12x4)
out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_12x4;
+ else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_16x4)
+ out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_16x4;
+ else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_20x4)
+ out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_20x4;
out->informative.mode_support_info.AlignedYPitch[k] = mode_lib->ms.support.AlignedYPitch[k];
out->informative.mode_support_info.AlignedCPitch[k] = mode_lib->ms.support.AlignedCPitch[k];
@@ -13246,7 +13257,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.misc.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[k];
out->informative.misc.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[k];
- out->informative.misc.WritebackRequiredBandwidth = mode_lib->scratch.dml_core_mode_programming_locals.TotalWRBandwidth / 1000.0;
+ out->informative.misc.WritebackRequiredBandwidth = mode_lib->mp.TotalWRBandwidth / 1000.0;
out->informative.misc.WritebackAllowDRAMClockChangeEndPosition[k] = mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k];
out->informative.misc.WritebackAllowFCLKChangeEndPosition[k] = mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k];
out->informative.misc.DSCCLK_calculated[k] = mode_lib->mp.DSCCLK[k];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index bdee6ad7bc59..28687565ac22 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -102,6 +102,7 @@ struct dml2_core_internal_DmlPipe {
double DCFClkDeepSleep;
unsigned int DPPPerSurface;
bool ScalerEnabled;
+ bool UPSPEnabled;
enum dml2_rotation_angle RotationAngle;
bool mirrored;
unsigned int ViewportHeight;
@@ -186,7 +187,9 @@ enum dml2_core_internal_output_type_rate {
dml2_core_internal_output_rate_hdmi_rate_6x4 = 9,
dml2_core_internal_output_rate_hdmi_rate_8x4 = 10,
dml2_core_internal_output_rate_hdmi_rate_10x4 = 11,
- dml2_core_internal_output_rate_hdmi_rate_12x4 = 12
+ dml2_core_internal_output_rate_hdmi_rate_12x4 = 12,
+ dml2_core_internal_output_rate_hdmi_rate_16x4 = 13,
+ dml2_core_internal_output_rate_hdmi_rate_20x4 = 14
};
struct dml2_core_internal_watermarks {
@@ -260,12 +263,14 @@ struct dml2_core_internal_mode_support_info {
bool AvgBandwidthSupport;
bool UrgVactiveBandwidthSupport;
bool EnoughUrgentLatencyHidingSupport;
+ bool PrefetchScheduleSupported;
bool PrefetchSupported;
bool PrefetchBandwidthSupported;
bool DynamicMetadataSupported;
bool VRatioInPrefetchSupported;
bool DISPCLK_DPPCLK_Support;
bool TotalAvailablePipesSupport;
+ bool ODMSupport;
bool ModeSupport;
bool ViewportSizeSupport;
@@ -314,9 +319,7 @@ struct dml2_core_internal_mode_support_info {
double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; // same as urg_bandwidth, except not scaled by urg burst factor
double non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
-
bool avg_bandwidth_support_ok[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
-
double max_urgent_latency_us;
double max_non_urgent_latency_us;
double avg_non_urgent_latency_us;
@@ -329,6 +332,8 @@ struct dml2_core_internal_mode_support_info {
bool temp_read_or_ppt_support;
struct dml2_core_internal_watermarks watermarks;
+ bool dcfclk_support;
+ bool qos_bandwidth_support;
};
struct dml2_core_internal_mode_support {
@@ -350,9 +355,11 @@ struct dml2_core_internal_mode_support {
double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state
double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
double GlobalDPPCLK; /// <brief the Max DPPCLK freq out of all pipes
+ double GlobalDTBCLK; /// <brief the Max DTBCLK freq out of all pipes
double uclk_freq_mhz;
double dram_bw_mbps;
double max_dram_bw_mbps;
+ double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps
double MaxFabricClock; /// <brief Basically just the clock freq at the min (or given) state
double MaxDCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
@@ -394,9 +401,13 @@ struct dml2_core_internal_mode_support {
double TWait[DML2_MAX_PLANES];
bool UnboundedRequestEnabled;
+ unsigned int compbuf_reserved_space_64b;
+ bool hw_debug5;
unsigned int CompressedBufferSizeInkByte;
double VRatioPreY[DML2_MAX_PLANES];
double VRatioPreC[DML2_MAX_PLANES];
+ unsigned int req_per_swath_ub_l[DML2_MAX_PLANES];
+ unsigned int req_per_swath_ub_c[DML2_MAX_PLANES];
unsigned int swath_width_luma_ub[DML2_MAX_PLANES];
unsigned int swath_width_chroma_ub[DML2_MAX_PLANES];
unsigned int RequiredSlots[DML2_MAX_PLANES];
@@ -417,8 +428,8 @@ struct dml2_core_internal_mode_support {
double dst_y_prefetch[DML2_MAX_PLANES];
double LinesForVM[DML2_MAX_PLANES];
double LinesForDPTERow[DML2_MAX_PLANES];
- double SwathWidthYSingleDPP[DML2_MAX_PLANES];
- double SwathWidthCSingleDPP[DML2_MAX_PLANES];
+ unsigned int SwathWidthYSingleDPP[DML2_MAX_PLANES];
+ unsigned int SwathWidthCSingleDPP[DML2_MAX_PLANES];
unsigned int BytePerPixelY[DML2_MAX_PLANES];
unsigned int BytePerPixelC[DML2_MAX_PLANES];
double BytePerPixelInDETY[DML2_MAX_PLANES];
@@ -469,13 +480,58 @@ struct dml2_core_internal_mode_support {
double mall_prefetch_sdp_overhead_factor[DML2_MAX_PLANES]; // overhead to the imall or phantom pipe
double mall_prefetch_dram_overhead_factor[DML2_MAX_PLANES];
+ bool is_using_mall_for_ss[DML2_MAX_PLANES];
+ unsigned int meta_row_width_chroma[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqHeightC[DML2_MAX_PLANES];
+ bool PTE_BUFFER_MODE[DML2_MAX_PLANES];
+ unsigned int meta_req_height_chroma[DML2_MAX_PLANES];
+ unsigned int meta_pte_bytes_per_frame_ub_c[DML2_MAX_PLANES];
+ unsigned int dpde0_bytes_per_frame_ub_c[DML2_MAX_PLANES];
+ unsigned int dpte_row_width_luma_ub[DML2_MAX_PLANES];
+ unsigned int meta_req_width[DML2_MAX_PLANES];
+ unsigned int meta_row_width[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqWidthY[DML2_MAX_PLANES];
+ unsigned int dpte_row_height_linear[DML2_MAX_PLANES];
+ unsigned int PTERequestSizeY[DML2_MAX_PLANES];
+ unsigned int dpte_row_width_chroma_ub[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqWidthC[DML2_MAX_PLANES];
+ unsigned int meta_pte_bytes_per_frame_ub_l[DML2_MAX_PLANES];
+ unsigned int dpte_row_height_linear_chroma[DML2_MAX_PLANES];
+ unsigned int PTERequestSizeC[DML2_MAX_PLANES];
+ unsigned int meta_req_height[DML2_MAX_PLANES];
+ unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES];
+ unsigned int meta_req_width_chroma[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqHeightY[DML2_MAX_PLANES];
+ unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES];
+ unsigned int vm_group_bytes[DML2_MAX_PLANES];
+ unsigned int VReadyOffsetPix[DML2_MAX_PLANES];
+ unsigned int VUpdateOffsetPix[DML2_MAX_PLANES];
+ unsigned int VUpdateWidthPix[DML2_MAX_PLANES];
+ double TSetup[DML2_MAX_PLANES];
+ double Tdmdl_vm_raw[DML2_MAX_PLANES];
+ double Tdmdl_raw[DML2_MAX_PLANES];
+ unsigned int VStartupMin[DML2_MAX_PLANES]; /// <brief Minimum vstartup to meet the prefetch schedule (i.e. the prefetch solution can be found at this vstartup time); not the actual global sync vstartup pos.
+ double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
+ double MaxActiveFCLKChangeLatencySupported;
+
// Backend
bool RequiresDSC[DML2_MAX_PLANES];
bool RequiresFEC[DML2_MAX_PLANES];
double OutputBpp[DML2_MAX_PLANES];
+ double DesiredOutputBpp[DML2_MAX_PLANES];
+ double PixelClockBackEnd[DML2_MAX_PLANES];
unsigned int DSCDelay[DML2_MAX_PLANES];
enum dml2_core_internal_output_type OutputType[DML2_MAX_PLANES];
enum dml2_core_internal_output_type_rate OutputRate[DML2_MAX_PLANES];
+ bool TotalAvailablePipesSupportNoDSC;
+ bool TotalAvailablePipesSupportDSC;
+ unsigned int NumberOfDPPNoDSC;
+ unsigned int NumberOfDPPDSC;
+ enum dml2_odm_mode ODMModeNoDSC;
+ enum dml2_odm_mode ODMModeDSC;
+ double RequiredDISPCLKPerSurfaceNoDSC;
+ double RequiredDISPCLKPerSurfaceDSC;
+ unsigned int EstimatedNumberOfDSCSlices[DML2_MAX_PLANES];
// Bandwidth Related Info
double BandwidthAvailableForImmediateFlip;
@@ -484,8 +540,14 @@ struct dml2_core_internal_mode_support {
double WriteBandwidth[DML2_MAX_PLANES][DML2_MAX_WRITEBACK];
double RequiredPrefetchPixelDataBWLuma[DML2_MAX_PLANES];
double RequiredPrefetchPixelDataBWChroma[DML2_MAX_PLANES];
- /* oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp */
- double RequiredPrefetchBWOTO[DML2_MAX_PLANES];
+ /* Max bandwidth calculated from prefetch schedule should be considered in addition to the pixel data bw to avoid ms/mp mismatches.
+ * 1. oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp
+ *
+ * 2. equ bandwidth needs to be considered for calculating peak urgent bw when equ schedule is used in mode support.
+ * Some slight difference in variables may cause the pixel data bandwidth to be higher
+ * even though overall equ prefetch bandwidths can be lower going from ms to mp
+ */
+ double RequiredPrefetchBWMax[DML2_MAX_PLANES];
double cursor_bw[DML2_MAX_PLANES];
double prefetch_cursor_bw[DML2_MAX_PLANES];
double prefetch_vmrow_bw[DML2_MAX_PLANES];
@@ -538,7 +600,44 @@ struct dml2_core_internal_mode_support {
bool mall_comb_mcache_c[DML2_MAX_PLANES];
bool lc_comb_mcache[DML2_MAX_PLANES];
+ unsigned int vmpg_width_y[DML2_MAX_PLANES];
+ unsigned int vmpg_height_y[DML2_MAX_PLANES];
+ unsigned int vmpg_width_c[DML2_MAX_PLANES];
+ unsigned int vmpg_height_c[DML2_MAX_PLANES];
+
+ unsigned int meta_row_height_luma[DML2_MAX_PLANES];
+ unsigned int meta_row_height_chroma[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES];
+
+ unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_line[DML2_MAX_PLANES];
+
+ unsigned int MaximumVStartup[DML2_MAX_PLANES];
+
+ double HostVMInefficiencyFactor;
+ double HostVMInefficiencyFactorPrefetch;
+
+ unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
+ double tdlut_opt_time[DML2_MAX_PLANES];
+ double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
+
+ double Tvm_trips_flip[DML2_MAX_PLANES];
+ double Tr0_trips_flip[DML2_MAX_PLANES];
+ double Tvm_trips_flip_rounded[DML2_MAX_PLANES];
+ double Tr0_trips_flip_rounded[DML2_MAX_PLANES];
+ unsigned int DSTYAfterScaler[DML2_MAX_PLANES];
+ unsigned int DSTXAfterScaler[DML2_MAX_PLANES];
+
+ enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES];
};
/// @brief A mega structure that houses various info for model programming step.
@@ -548,6 +647,7 @@ struct dml2_core_internal_mode_program {
double FabricClock; /// <brief Basically just the clock freq at the min (or given) state
//double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
double dram_bw_mbps;
+ double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps
double uclk_freq_mhz;
unsigned int NoOfDPP[DML2_MAX_PLANES];
enum dml2_odm_mode ODMMode[DML2_MAX_PLANES];
@@ -599,6 +699,8 @@ struct dml2_core_internal_mode_program {
unsigned int MacroTileHeightC[DML2_MAX_PLANES];
unsigned int MacroTileWidthY[DML2_MAX_PLANES];
unsigned int MacroTileWidthC[DML2_MAX_PLANES];
+ double MaximumSwathWidthLuma[DML2_MAX_PLANES];
+ double MaximumSwathWidthChroma[DML2_MAX_PLANES];
bool surf_linear128_l[DML2_MAX_PLANES];
bool surf_linear128_c[DML2_MAX_PLANES];
@@ -631,6 +733,14 @@ struct dml2_core_internal_mode_program {
double UrgentBurstFactorChroma[DML2_MAX_PLANES];
double UrgentBurstFactorChromaPre[DML2_MAX_PLANES];
+ double MaximumSwathWidthInLineBufferLuma;
+ double MaximumSwathWidthInLineBufferChroma;
+
+ unsigned int vmpg_width_y[DML2_MAX_PLANES];
+ unsigned int vmpg_height_y[DML2_MAX_PLANES];
+ unsigned int vmpg_width_c[DML2_MAX_PLANES];
+ unsigned int vmpg_height_c[DML2_MAX_PLANES];
+
double meta_row_bw[DML2_MAX_PLANES];
unsigned int meta_row_bytes[DML2_MAX_PLANES];
unsigned int meta_req_width[DML2_MAX_PLANES];
@@ -652,7 +762,9 @@ struct dml2_core_internal_mode_program {
unsigned int PTERequestSizeC[DML2_MAX_PLANES];
double TWait[DML2_MAX_PLANES];
+ double Tdmdl_vm_raw[DML2_MAX_PLANES];
double Tdmdl_vm[DML2_MAX_PLANES];
+ double Tdmdl_raw[DML2_MAX_PLANES];
double Tdmdl[DML2_MAX_PLANES];
double TSetup[DML2_MAX_PLANES];
unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES];
@@ -684,6 +796,38 @@ struct dml2_core_internal_mode_program {
double TCalc;
unsigned int TotImmediateFlipBytes;
+ unsigned int MaxTotalDETInKByte;
+ unsigned int NomDETInKByte;
+ unsigned int MinCompressedBufferSizeInKByte;
+ double PixelClockBackEnd[DML2_MAX_PLANES];
+ double OutputBpp[DML2_MAX_PLANES];
+ bool dsc_enable[DML2_MAX_PLANES];
+ unsigned int num_dsc_slices[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_line[DML2_MAX_PLANES];
+ unsigned int MaxVStartupLines[DML2_MAX_PLANES]; /// <brief more like vblank for the plane's OTG
+ double HostVMInefficiencyFactor;
+ double HostVMInefficiencyFactorPrefetch;
+ unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
+ double tdlut_opt_time[DML2_MAX_PLANES];
+ double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
+ double Tvm_trips_flip[DML2_MAX_PLANES];
+ double Tr0_trips_flip[DML2_MAX_PLANES];
+ double Tvm_trips_flip_rounded[DML2_MAX_PLANES];
+ double Tr0_trips_flip_rounded[DML2_MAX_PLANES];
+ bool immediate_flip_required; // any pipes need immediate flip
+ double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state
+ double TotalWRBandwidth;
+ double max_urgent_latency_us;
+ double df_response_time_us;
+
// -------------------
// Output
// -------------------
@@ -694,9 +838,12 @@ struct dml2_core_internal_mode_program {
// Support
bool UrgVactiveBandwidthSupport;
+ bool PrefetchScheduleSupported;
+ bool UrgentBandwidthSupport;
bool PrefetchModeSupported; // <brief Is the prefetch mode (bandwidth and latency) supported
bool ImmediateFlipSupported;
bool ImmediateFlipSupportedForPipe[DML2_MAX_PLANES];
+ bool dcfclk_support;
// Clock
double Dcfclk;
@@ -788,7 +935,7 @@ struct dml2_core_internal_mode_program {
// RQ registers
bool PTE_BUFFER_MODE[DML2_MAX_PLANES];
unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES];
-
+ double VActiveLatencyHidingUs[DML2_MAX_PLANES];
unsigned int SubViewportLinesNeededInMALL[DML2_MAX_PLANES];
bool is_using_mall_for_ss[DML2_MAX_PLANES];
@@ -1001,10 +1148,10 @@ struct dml2_core_calcs_mode_programming_locals {
double dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
double surface_dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES];
double surface_dummy_bw0[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES];
- unsigned int dummy_integer_array[2][DML2_MAX_PLANES];
+ unsigned int dummy_integer_array[4][DML2_MAX_PLANES];
enum dml2_output_encoder_class dummy_output_encoder_array[DML2_MAX_PLANES];
double dummy_single_array[2][DML2_MAX_PLANES];
- unsigned int dummy_long_array[4][DML2_MAX_PLANES];
+ unsigned int dummy_long_array[8][DML2_MAX_PLANES];
bool dummy_boolean_array[2][DML2_MAX_PLANES];
bool dummy_boolean[2];
double dummy_single[2];
@@ -1028,7 +1175,6 @@ struct dml2_core_calcs_mode_programming_locals {
double dlg_vblank_start;
double LSetup;
double blank_lines_remaining;
- double TotalWRBandwidth;
double WRBandwidth;
struct dml2_core_internal_DmlPipe myPipe;
double PixelClockBackEndFactor;
@@ -1153,6 +1299,7 @@ struct dml2_core_calcs_CalculateVMRowAndSwath_params {
unsigned int HostVMMinPageSize;
unsigned int DCCMetaBufferSizeBytes;
bool mrq_present;
+ enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES];
// Output
bool *PTEBufferSizeNotExceeded;
@@ -1389,7 +1536,7 @@ struct dml2_core_shared_get_urgent_bandwidth_required_locals {
double vm_row_bw;
double flip_and_active_bw;
double flip_and_prefetch_bw;
- double flip_and_prefetch_bw_oto;
+ double flip_and_prefetch_bw_max;
double active_and_excess_bw;
};
@@ -1418,6 +1565,7 @@ struct dml2_core_shared_CalculateFlipSchedule_locals {
struct dml2_core_shared_rq_dlg_get_dlg_reg_locals {
unsigned int plane_idx;
+ unsigned int stream_idx;
enum dml2_source_format_class source_format;
const struct dml2_timing_cfg *timing;
bool dual_plane;
@@ -1625,6 +1773,9 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params {
double *BytePerPixDETC;
unsigned int *DPPPerSurface;
bool mrq_present;
+ unsigned int dummy[2][DML2_MAX_PLANES];
+ unsigned int swath_width_luma_ub_single_dpp[DML2_MAX_PLANES];
+ unsigned int swath_width_chroma_ub_single_dpp[DML2_MAX_PLANES];
// output
unsigned int *req_per_swath_ub_l;
@@ -1642,6 +1793,8 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params {
unsigned int *DETBufferSizeC;
unsigned int *full_swath_bytes_l;
unsigned int *full_swath_bytes_c;
+ unsigned int *full_swath_bytes_single_dpp_l;
+ unsigned int *full_swath_bytes_single_dpp_c;
bool *UnboundedRequestEnabled;
unsigned int *compbuf_reserved_space_64b;
unsigned int *CompressedBufferSizeInkByte;
@@ -1801,7 +1954,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
double *VRatioPrefetchC;
double *RequiredPrefetchPixelDataBWLuma;
double *RequiredPrefetchPixelDataBWChroma;
- double *RequiredPrefetchBWOTO;
+ double *RequiredPrefetchBWMax;
bool *NotEnoughTimeForDynamicMetadata;
double *Tno_bw;
double *Tno_bw_flip;
@@ -2038,7 +2191,7 @@ struct dml2_core_calcs_calculate_peak_bandwidth_required_params {
double *surface_read_bandwidth_c;
double *prefetch_bandwidth_l;
double *prefetch_bandwidth_c;
- double *prefetch_bandwidth_oto;
+ double *prefetch_bandwidth_max;
double *excess_vactive_fill_bw_l;
double *excess_vactive_fill_bw_c;
double *cursor_bw;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
index 7a220c0141c2..5f301befed16 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
@@ -464,7 +464,7 @@ bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw
bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode)
{
- return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements);
+ return sw_mode == dml2_sw_linear;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index f486b090bbfc..22969a533a7b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -389,9 +389,6 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dispclk_khz, &state_table->dispclk);
- if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.deepsleep_dcfclk_khz, &state_table->dcfclk);
-
for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
if (result)
result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4x.dppclk_khz, &state_table->dppclk);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
index b226225103c3..611c80f4f1bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
@@ -10,15 +10,74 @@
#define DML_LOG_LEVEL_DEFAULT DML_LOG_LEVEL_WARN
#define DML_LOG_INTERNAL(fmt, ...) dm_output_to_console(fmt, ## __VA_ARGS__)
-/* ASSERT with message output */
-#define DML_ASSERT_MSG(condition, fmt, ...) \
- do { \
- if (!(condition)) { \
- DML_LOG_ERROR("DML ASSERT hit in %s line %d\n", __func__, __LINE__); \
- DML_LOG_ERROR(fmt, ## __VA_ARGS__); \
- DML_ASSERT(condition); \
- } \
- } while (0)
+/* private helper macros */
+#define _BOOL_FORMAT(field) "%s", field ? "true" : "false"
+#define _UINT_FORMAT(field) "%u", field
+#define _INT_FORMAT(field) "%d", field
+#define _DOUBLE_FORMAT(field) "%lf", field
+#define _ELEMENT_FUNC "function"
+#define _ELEMENT_COMP_IF "component_interface"
+#define _ELEMENT_TOP_IF "top_interface"
+#define _LOG_ENTRY(element) do { \
+ DML_LOG_INTERNAL("<"element" name=\""); \
+ DML_LOG_INTERNAL(__func__); \
+ DML_LOG_INTERNAL("\">\n"); \
+} while (0)
+#define _LOG_EXIT(element) DML_LOG_INTERNAL("</"element">\n")
+#define _LOG_SCALAR(field, format) do { \
+ DML_LOG_INTERNAL(#field" = "format(field)); \
+ DML_LOG_INTERNAL("\n"); \
+} while (0)
+#define _LOG_ARRAY(field, size, format) do { \
+ DML_LOG_INTERNAL(#field " = ["); \
+ for (int _i = 0; _i < (int) size; _i++) { \
+ DML_LOG_INTERNAL(format(field[_i])); \
+ if (_i + 1 == (int) size) \
+ DML_LOG_INTERNAL("]\n"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+}} while (0)
+#define _LOG_2D_ARRAY(field, size0, size1, format) do { \
+ DML_LOG_INTERNAL(#field" = ["); \
+ for (int _i = 0; _i < (int) size0; _i++) { \
+ DML_LOG_INTERNAL("\n\t["); \
+ for (int _j = 0; _j < (int) size1; _j++) { \
+ DML_LOG_INTERNAL(format(field[_i][_j])); \
+ if (_j + 1 == (int) size1) \
+ DML_LOG_INTERNAL("]"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+ if (_i + 1 == (int) size0) \
+ DML_LOG_INTERNAL("]\n"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+} while (0)
+#define _LOG_3D_ARRAY(field, size0, size1, size2, format) do { \
+ DML_LOG_INTERNAL(#field" = ["); \
+ for (int _i = 0; _i < (int) size0; _i++) { \
+ DML_LOG_INTERNAL("\n\t["); \
+ for (int _j = 0; _j < (int) size1; _j++) { \
+ DML_LOG_INTERNAL("["); \
+ for (int _k = 0; _k < (int) size2; _k++) { \
+ DML_LOG_INTERNAL(format(field[_i][_j][_k])); \
+ if (_k + 1 == (int) size2) \
+ DML_LOG_INTERNAL("]"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+ if (_j + 1 == (int) size1) \
+ DML_LOG_INTERNAL("]"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+ if (_i + 1 == (int) size0) \
+ DML_LOG_INTERNAL("]\n"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+} while (0)
/* fatal errors for unrecoverable DML states until a full reset */
#define DML_LOG_LEVEL_FATAL 0
@@ -28,7 +87,7 @@
#define DML_LOG_LEVEL_WARN 2
/* high level tracing of DML interfaces */
#define DML_LOG_LEVEL_INFO 3
-/* detailed tracing of DML internal components */
+/* tracing of DML internal executions */
#define DML_LOG_LEVEL_DEBUG 4
/* detailed tracing of DML calculation procedure */
#define DML_LOG_LEVEL_VERBOSE 5
@@ -37,30 +96,94 @@
#define DML_LOG_LEVEL DML_LOG_LEVEL_DEFAULT
#endif /* #ifndef DML_LOG_LEVEL */
+/* public macros for DML_LOG_LEVEL_FATAL and up */
#define DML_LOG_FATAL(fmt, ...) DML_LOG_INTERNAL("[DML FATAL] " fmt, ## __VA_ARGS__)
+
+/* public macros for DML_LOG_LEVEL_ERROR and up */
#if DML_LOG_LEVEL >= DML_LOG_LEVEL_ERROR
#define DML_LOG_ERROR(fmt, ...) DML_LOG_INTERNAL("[DML ERROR] "fmt, ## __VA_ARGS__)
+#define DML_ASSERT_MSG(condition, fmt, ...) \
+ do { \
+ if (!(condition)) { \
+ DML_LOG_ERROR("ASSERT hit in %s line %d\n", __func__, __LINE__); \
+ DML_LOG_ERROR(fmt, ## __VA_ARGS__); \
+ DML_ASSERT(condition); \
+ } \
+ } while (0)
#else
#define DML_LOG_ERROR(fmt, ...) ((void)0)
+#define DML_ASSERT_MSG(condition, fmt, ...) ((void)0)
#endif
+
+/* public macros for DML_LOG_LEVEL_WARN and up */
#if DML_LOG_LEVEL >= DML_LOG_LEVEL_WARN
#define DML_LOG_WARN(fmt, ...) DML_LOG_INTERNAL("[DML WARN] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_WARN(fmt, ...) ((void)0)
#endif
+
+/* public macros for DML_LOG_LEVEL_INFO and up */
#if DML_LOG_LEVEL >= DML_LOG_LEVEL_INFO
#define DML_LOG_INFO(fmt, ...) DML_LOG_INTERNAL("[DML INFO] "fmt, ## __VA_ARGS__)
+#define DML_LOG_TOP_IF_ENTER() _LOG_ENTRY(_ELEMENT_TOP_IF)
+#define DML_LOG_TOP_IF_EXIT() _LOG_EXIT(_ELEMENT_TOP_IF)
#else
#define DML_LOG_INFO(fmt, ...) ((void)0)
+#define DML_LOG_TOP_IF_ENTER() ((void)0)
+#define DML_LOG_TOP_IF_EXIT() ((void)0)
#endif
+
+/* public macros for DML_LOG_LEVEL_DEBUG and up */
#if DML_LOG_LEVEL >= DML_LOG_LEVEL_DEBUG
-#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL("[DML DEBUG] "fmt, ## __VA_ARGS__)
+#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__)
+#define DML_LOG_COMP_IF_ENTER() _LOG_ENTRY(_ELEMENT_COMP_IF)
+#define DML_LOG_COMP_IF_EXIT() _LOG_EXIT(_ELEMENT_COMP_IF)
+#define DML_LOG_FUNC_ENTER() _LOG_ENTRY(_ELEMENT_FUNC)
+#define DML_LOG_FUNC_EXIT() _LOG_EXIT(_ELEMENT_FUNC)
+#define DML_LOG_DEBUG_BOOL(field) _LOG_SCALAR(field, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_UINT(field) _LOG_SCALAR(field, _UINT_FORMAT)
+#define DML_LOG_DEBUG_INT(field) _LOG_SCALAR(field, _INT_FORMAT)
+#define DML_LOG_DEBUG_DOUBLE(field) _LOG_SCALAR(field, _DOUBLE_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) _LOG_ARRAY(field, size, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_UINT(field, size) _LOG_ARRAY(field, size, _UINT_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_INT(field, size) _LOG_ARRAY(field, size, _INT_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) _LOG_ARRAY(field, size, _DOUBLE_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _UINT_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _INT_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _DOUBLE_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _UINT_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _INT_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _DOUBLE_FORMAT)
#else
#define DML_LOG_DEBUG(fmt, ...) ((void)0)
+#define DML_LOG_COMP_IF_ENTER() ((void)0)
+#define DML_LOG_COMP_IF_EXIT() ((void)0)
+#define DML_LOG_FUNC_ENTER() ((void)0)
+#define DML_LOG_FUNC_EXIT() ((void)0)
+#define DML_LOG_DEBUG_BOOL(field) ((void)0)
+#define DML_LOG_DEBUG_UINT(field) ((void)0)
+#define DML_LOG_DEBUG_INT(field) ((void)0)
+#define DML_LOG_DEBUG_DOUBLE(field) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_UINT(field, size) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_INT(field, size) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) ((void)0)
#endif
+
+/* public macros for DML_LOG_LEVEL_VERBOSE */
#if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE
-#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL("[DML VERBOSE] "fmt, ## __VA_ARGS__)
+#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__)
#else
#define DML_LOG_VERBOSE(fmt, ...) ((void)0)
-#endif
+#endif /* #if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE */
#endif /* __DML2_DEBUG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
index 00688b9f1df4..d52aa82283b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
@@ -202,6 +202,8 @@ struct dml2_core_mode_support_result {
} active;
unsigned int dispclk_khz;
+ unsigned int dpprefclk_khz;
+ unsigned int dtbrefclk_khz;
unsigned int dcfclk_deepsleep_khz;
unsigned int socclk_khz;
@@ -446,13 +448,17 @@ struct dml2_core_internal_state_intermediates {
};
struct dml2_core_mode_support_locals {
- struct dml2_core_calcs_mode_support_ex mode_support_ex_params;
+ union {
+ struct dml2_core_calcs_mode_support_ex mode_support_ex_params;
+ };
struct dml2_display_cfg svp_expanded_display_cfg;
struct dml2_calculate_mcache_allocation_in_out calc_mcache_allocation_params;
};
struct dml2_core_mode_programming_locals {
- struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params;
+ union {
+ struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params;
+ };
struct dml2_display_cfg svp_expanded_display_cfg;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
index 6b3b8803e0ae..a56e75cdf712 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
@@ -868,7 +868,7 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
- * 2. Full update (i.e. !fast_validate)
+ * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING)
* 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
* 4. Display configuration passes validation
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 208630754c8a..3b866e876bf4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -1189,22 +1189,6 @@ static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2
return location;
}
-static void apply_legacy_svp_drr_settings(struct dml2_context *dml2, const struct dc_state *state, struct dml_display_cfg_st *dml_dispcfg)
-{
- int i;
-
- if (state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
- ASSERT(state->stream_count == 1);
- dml_dispcfg->timing.DRRDisplay[0] = true;
- } else if (state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid) {
-
- for (i = 0; i < dml_dispcfg->num_timings; i++) {
- if (dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i] == state->streams[state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index]->stream_id)
- dml_dispcfg->timing.DRRDisplay[i] = true;
- }
- }
-}
-
static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, struct dc_state *state)
{
unsigned int i;
@@ -1437,9 +1421,6 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
}
}
}
-
- if (!dml2->config.use_native_pstate_optimization)
- apply_legacy_svp_drr_settings(dml2, context, dml_dispcfg);
}
void dml2_update_pipe_ctx_dchub_regs(struct _vcs_dpi_dml_display_rq_regs_st *rq_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 525b7d04bf84..0318260370ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -24,8 +24,6 @@
*
*/
-#include <linux/vmalloc.h>
-
#include "display_mode_core.h"
#include "dml2_internal_types.h"
#include "dml2_utils.h"
@@ -95,12 +93,17 @@ static void map_hw_resources(struct dml2_context *dml2,
static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2,
const struct dml_display_cfg_st *display_cfg,
- struct dml_mode_support_info_st *evaluation_info)
+ struct dml_mode_support_info_st *evaluation_info,
+ enum dc_validate_mode validate_mode)
{
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
s->mode_support_params.mode_lib = &dml2->v20.dml_core_ctx;
s->mode_support_params.in_display_cfg = display_cfg;
+ if (validate_mode == DC_VALIDATE_MODE_ONLY)
+ s->mode_support_params.in_start_state_idx = dml2->v20.dml_core_ctx.states.num_states - 1;
+ else
+ s->mode_support_params.in_start_state_idx = 0;
s->mode_support_params.out_evaluation_info = evaluation_info;
memset(evaluation_info, 0, sizeof(struct dml_mode_support_info_st));
@@ -112,10 +115,8 @@ static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2,
static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrapper_optimize_configuration_params *p)
{
int unused_dpps = p->ip_params->max_num_dpp;
- int i, j;
- int odms_needed, refresh_rate_hz, dpps_needed, subvp_height, pstate_width_fw_delay_lines, surface_count;
- int subvp_timing_to_add, new_timing_index, subvp_surface_to_add, new_surface_index;
- float frame_time_sec, max_frame_time_sec;
+ int i;
+ int odms_needed;
int largest_blend_and_timing = 0;
bool optimization_done = false;
@@ -130,79 +131,6 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe
if (p->new_display_config != p->cur_display_config)
*p->new_display_config = *p->cur_display_config;
- // Optimize P-State Support
- if (dml2->config.use_native_pstate_optimization) {
- if (p->cur_mode_support_info->DRAMClockChangeSupport[0] == dml_dram_clock_change_unsupported) {
- // Find a display with < 120Hz refresh rate with maximal refresh rate that's not already subvp
- subvp_timing_to_add = -1;
- subvp_surface_to_add = -1;
- max_frame_time_sec = 0;
- surface_count = 0;
- for (i = 0; i < (int) p->cur_display_config->num_timings; i++) {
- refresh_rate_hz = (int)div_u64((unsigned long long) p->cur_display_config->timing.PixelClock[i] * 1000 * 1000,
- (p->cur_display_config->timing.HTotal[i] * p->cur_display_config->timing.VTotal[i]));
- if (refresh_rate_hz < 120) {
- // Check its upstream surfaces to see if this one could be converted to subvp.
- dpps_needed = 0;
- for (j = 0; j < (int) p->cur_display_config->num_surfaces; j++) {
- if (p->cur_display_config->plane.BlendingAndTiming[j] == i &&
- p->cur_display_config->plane.UseMALLForPStateChange[j] == dml_use_mall_pstate_change_disable) {
- dpps_needed += p->cur_mode_support_info->DPPPerSurface[j];
- subvp_surface_to_add = j;
- surface_count++;
- }
- }
-
- if (surface_count == 1 && dpps_needed > 0 && dpps_needed <= unused_dpps) {
- frame_time_sec = (float)1 / refresh_rate_hz;
- if (frame_time_sec > max_frame_time_sec) {
- max_frame_time_sec = frame_time_sec;
- subvp_timing_to_add = i;
- }
- }
- }
- }
- if (subvp_timing_to_add >= 0) {
- new_timing_index = p->new_display_config->num_timings++;
- new_surface_index = p->new_display_config->num_surfaces++;
- // Add a phantom pipe reflecting the main pipe's timing
- dml2_util_copy_dml_timing(&p->new_display_config->timing, new_timing_index, subvp_timing_to_add);
-
- pstate_width_fw_delay_lines = (int)(((double)(p->config->svp_pstate.subvp_fw_processing_delay_us +
- p->config->svp_pstate.subvp_pstate_allow_width_us) / 1000000) *
- (p->new_display_config->timing.PixelClock[subvp_timing_to_add] * 1000 * 1000) /
- (double)p->new_display_config->timing.HTotal[subvp_timing_to_add]);
-
- subvp_height = p->cur_mode_support_info->SubViewportLinesNeededInMALL[subvp_timing_to_add] + pstate_width_fw_delay_lines;
-
- p->new_display_config->timing.VActive[new_timing_index] = subvp_height;
- p->new_display_config->timing.VTotal[new_timing_index] = subvp_height +
- p->new_display_config->timing.VTotal[subvp_timing_to_add] - p->new_display_config->timing.VActive[subvp_timing_to_add];
-
- p->new_display_config->output.OutputDisabled[new_timing_index] = true;
-
- p->new_display_config->plane.UseMALLForPStateChange[subvp_surface_to_add] = dml_use_mall_pstate_change_sub_viewport;
-
- dml2_util_copy_dml_plane(&p->new_display_config->plane, new_surface_index, subvp_surface_to_add);
- dml2_util_copy_dml_surface(&p->new_display_config->surface, new_surface_index, subvp_surface_to_add);
-
- p->new_display_config->plane.ViewportHeight[new_surface_index] = subvp_height;
- p->new_display_config->plane.ViewportHeightChroma[new_surface_index] = subvp_height;
- p->new_display_config->plane.ViewportStationary[new_surface_index] = false;
-
- p->new_display_config->plane.UseMALLForStaticScreen[new_surface_index] = dml_use_mall_static_screen_disable;
- p->new_display_config->plane.UseMALLForPStateChange[new_surface_index] = dml_use_mall_pstate_change_phantom_pipe;
-
- p->new_display_config->plane.NumberOfCursors[new_surface_index] = 0;
-
- p->new_policy->ImmediateFlipRequirement[new_surface_index] = dml_immediate_flip_not_required;
-
- p->new_display_config->plane.BlendingAndTiming[new_surface_index] = new_timing_index;
-
- optimization_done = true;
- }
- }
- }
// Optimize Clocks
if (!optimization_done) {
@@ -226,7 +154,8 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe
return optimization_done;
}
-static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state)
+static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state,
+ enum dc_validate_mode validate_mode)
{
struct dml2_calculate_lowest_supported_state_for_temp_read_scratch *s = &dml2->v20.scratch.dml2_calculate_lowest_supported_state_for_temp_read_scratch;
struct dml2_wrapper_scratch *s_global = &dml2->v20.scratch;
@@ -268,7 +197,8 @@ static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *d
dml2->v20.dml_core_ctx.states.state_array[j].dram_clock_change_latency_us = s_global->dummy_pstate_table[i].dummy_pstate_latency_us;
}
- dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info);
+ dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info,
+ validate_mode);
if (dml_result && s->evaluation_info.DRAMClockChangeSupport[0] == dml_dram_clock_change_vactive) {
map_hw_resources(dml2, &s->cur_display_config, &s->evaluation_info);
@@ -333,7 +263,8 @@ static bool does_configuration_meet_sw_policies(struct dml2_context *ctx, const
}
static bool dml_mode_support_wrapper(struct dml2_context *dml2,
- struct dc_state *display_state)
+ struct dc_state *display_state,
+ enum dc_validate_mode validate_mode)
{
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
unsigned int result = 0, i;
@@ -369,7 +300,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
result = pack_and_call_dml_mode_support_ex(dml2,
&s->cur_display_config,
- &s->mode_support_info);
+ &s->mode_support_info,
+ validate_mode);
if (result)
result = does_configuration_meet_sw_policies(dml2, &s->cur_display_config, &s->mode_support_info);
@@ -390,7 +322,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
dml2->v20.dml_core_ctx.policy = s->new_policy;
optimized_result = pack_and_call_dml_mode_support_ex(dml2,
&s->new_display_config,
- &s->mode_support_info);
+ &s->mode_support_info,
+ validate_mode);
if (optimized_result)
optimized_result = does_configuration_meet_sw_policies(dml2, &s->new_display_config, &s->mode_support_info);
@@ -409,7 +342,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
if (!optimized_result) {
result = pack_and_call_dml_mode_support_ex(dml2,
&s->cur_display_config,
- &s->mode_support_info);
+ &s->mode_support_info,
+ validate_mode);
}
}
@@ -419,118 +353,7 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
return result;
}
-static int find_drr_eligible_stream(struct dc_state *display_state)
-{
- int i;
-
- for (i = 0; i < display_state->stream_count; i++) {
- if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE
- && display_state->streams[i]->ignore_msa_timing_param) {
- // Use ignore_msa_timing_param flag to identify as DRR
- return i;
- }
- }
-
- return -1;
-}
-
-static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct dc_state *display_state)
-{
- struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
- bool pstate_optimization_done = false;
- bool pstate_optimization_success = false;
- bool result = false;
- int drr_display_index = 0, non_svp_streams = 0;
- bool force_svp = dml2->config.svp_pstate.force_enable_subvp;
-
- display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
-
- result = dml_mode_support_wrapper(dml2, display_state);
-
- if (!result) {
- pstate_optimization_done = true;
- } else if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported && !force_svp) {
- pstate_optimization_success = true;
- pstate_optimization_done = true;
- }
-
- if (display_state->stream_count == 1 && dml2->config.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch(dml2->config.callbacks.dc, display_state)) {
- display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true;
-
- result = dml_mode_support_wrapper(dml2, display_state);
- } else {
- non_svp_streams = display_state->stream_count;
-
- while (!pstate_optimization_done) {
- result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
-
- // Always try adding SVP first
- if (result)
- result = dml2_svp_add_phantom_pipe_to_dc_state(dml2, display_state, &s->mode_support_info);
- else
- pstate_optimization_done = true;
-
-
- if (result) {
- result = dml_mode_support_wrapper(dml2, display_state);
- } else {
- pstate_optimization_done = true;
- }
-
- if (result) {
- non_svp_streams--;
-
- if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
- if (dml2_svp_validate_static_schedulability(dml2, display_state, s->mode_support_info.DRAMClockChangeSupport[0])) {
- pstate_optimization_success = true;
- pstate_optimization_done = true;
- } else {
- pstate_optimization_success = false;
- pstate_optimization_done = false;
- }
- } else {
- drr_display_index = find_drr_eligible_stream(display_state);
-
- // If there is only 1 remaining non SubVP pipe that is DRR, check static
- // schedulability for SubVP + DRR.
- if (non_svp_streams == 1 && drr_display_index >= 0) {
- if (dml2_svp_drr_schedulable(dml2, display_state, &display_state->streams[drr_display_index]->timing)) {
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = true;
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index = drr_display_index;
- result = dml_mode_support_wrapper(dml2, display_state);
- }
-
- if (result && s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
- pstate_optimization_success = true;
- pstate_optimization_done = true;
- } else {
- pstate_optimization_success = false;
- pstate_optimization_done = false;
- }
- }
-
- if (pstate_optimization_success) {
- pstate_optimization_done = true;
- } else {
- pstate_optimization_done = false;
- }
- }
- }
- }
- }
-
- if (!pstate_optimization_success) {
- dml2_svp_remove_all_phantom_pipes(dml2, display_state);
- display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
- result = dml_mode_support_wrapper(dml2, display_state);
- }
-
- return result;
-}
-
-static bool call_dml_mode_support_and_programming(struct dc_state *context)
+static bool call_dml_mode_support_and_programming(struct dc_state *context, enum dc_validate_mode validate_mode)
{
unsigned int result = 0;
unsigned int min_state = 0;
@@ -544,16 +367,13 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context)
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
if (!context->streams[0]->sink->link->dc->caps.is_apu) {
- min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
+ min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context,
+ validate_mode);
ASSERT(min_state_for_g6_temp_read >= 0);
}
- if (!dml2->config.use_native_pstate_optimization) {
- result = optimize_pstate_with_svp_and_drr(dml2, context);
- } else {
- result = dml_mode_support_wrapper(dml2, context);
- }
+ result = dml_mode_support_wrapper(dml2, context, validate_mode);
/* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on.
* Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly.
@@ -575,7 +395,8 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context)
return result;
}
-static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context)
+static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context,
+ enum dc_validate_mode validate_mode)
{
struct dml2_context *dml2 = context->bw_ctx.dml2;
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
@@ -611,7 +432,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
copy_dummy_pstate_table(s->dummy_pstate_table, in_dc->clk_mgr->bw_params->dummy_pstate_table, 4);
- result = call_dml_mode_support_and_programming(context);
+ result = call_dml_mode_support_and_programming(context, validate_mode);
/* Call map dc pipes to map the pipes based on the DML output. For correctly determining if recalculation
* is required or not, the resource context needs to correctly reflect the number of active pipes. We would
* only know the correct number if active pipes after dml2_map_dc_pipes is called.
@@ -628,7 +449,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch);
if (need_recalculation) {
/* Engage the DML again if recalculation is required. */
- call_dml_mode_support_and_programming(context);
+ call_dml_mode_support_and_programming(context, validate_mode);
if (!dml2->config.skip_hw_state_mapping) {
dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state);
}
@@ -684,7 +505,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
return result;
}
-static bool dml2_validate_only(struct dc_state *context)
+static bool dml2_validate_only(struct dc_state *context, enum dc_validate_mode validate_mode)
{
struct dml2_context *dml2;
unsigned int result = 0;
@@ -708,7 +529,8 @@ static bool dml2_validate_only(struct dc_state *context)
result = pack_and_call_dml_mode_support_ex(dml2,
&dml2->v20.scratch.cur_display_config,
- &dml2->v20.scratch.mode_support_info);
+ &dml2->v20.scratch.mode_support_info,
+ validate_mode);
if (result)
result = does_configuration_meet_sw_policies(dml2, &dml2->v20.scratch.cur_display_config, &dml2->v20.scratch.mode_support_info);
@@ -723,7 +545,8 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d
}
}
-bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate)
+bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2,
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -733,17 +556,17 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2
/* DML2.1 validation path */
if (dml2->architecture == dml2_architecture_21) {
- out = dml21_validate(in_dc, context, dml2, fast_validate);
+ out = dml21_validate(in_dc, context, dml2, validate_mode);
return out;
}
DC_FP_START();
- /* Use dml_validate_only for fast_validate path */
- if (fast_validate)
- out = dml2_validate_only(context);
+ /* Use dml_validate_only for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ out = dml2_validate_only(context, validate_mode);
else
- out = dml2_validate_and_build_resource(in_dc, context);
+ out = dml2_validate_and_build_resource(in_dc, context, validate_mode);
DC_FP_END();
@@ -757,8 +580,8 @@ static inline struct dml2_context *dml2_allocate_memory(void)
static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
- dml21_reinit(in_dc, dml2, config);
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) {
+ dml21_reinit(in_dc, *dml2, config);
return;
}
@@ -803,9 +626,7 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
// TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21)
- && (in_dc->ctx->dce_version == DCN_VERSION_4_01
- ))
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01))
return dml21_create(in_dc, dml2, config);
// Allocate Mode Lib Ctx
@@ -874,8 +695,8 @@ void dml2_reinit(const struct dc *in_dc,
const struct dml2_configuration_options *config,
struct dml2_context **dml2)
{
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
- dml21_reinit(in_dc, dml2, config);
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) {
+ dml21_reinit(in_dc, *dml2, config);
return;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index 5100f269368e..c384e141cebc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -240,7 +240,7 @@ struct dml2_configuration_options {
bool use_clock_dc_limits;
bool gpuvm_enable;
bool force_tdlut_enable;
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
};
/*
@@ -272,7 +272,7 @@ void dml2_reinit(const struct dc *in_dc,
* dml2_validate - Determines if a display configuration is supported or not.
* @in_dc: dc.
* @context: dc_state to be validated.
- * @fast_validate: Fast validate will not populate context.res_ctx.
+ * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX will not populate context.res_ctx.
*
* DML1.0 compatible interface for validation.
*
@@ -295,7 +295,7 @@ void dml2_reinit(const struct dc *in_dc,
bool dml2_validate(const struct dc *in_dc,
struct dc_state *context,
struct dml2_context *dml2,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
/*
* dml2_extract_dram_and_fclk_change_support - Extracts the FCLK and UCLK change support info.
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
index 97bf26fa3573..36187f890d5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
@@ -231,7 +231,7 @@ static struct dpp_funcs dcn401_dpp_funcs = {
.dpp_program_regamma_pwl = NULL,
.dpp_set_pre_degam = dpp3_set_pre_degam,
.dpp_program_input_lut = NULL,
- .dpp_full_bypass = dpp401_full_bypass,
+ .dpp_full_bypass = NULL,
.dpp_setup = dpp401_dpp_setup,
.dpp_program_degamma_pwl = NULL,
.dpp_program_cm_dealpha = dpp3_program_cm_dealpha,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
index ecaa976e1f52..5a6a861402b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
@@ -641,6 +641,7 @@
uint32_t ISHARP_DELTA_DATA; \
uint32_t ISHARP_DELTA_INDEX; \
uint32_t ISHARP_NLDELTA_SOFT_CLIP
+
struct dcn401_dpp_registers {
DPP_REG_VARIABLE_LIST_DCN401;
};
@@ -683,8 +684,6 @@ void dpp401_dscl_set_scaler_manual_scale(
struct dpp *dpp_base,
const struct scaler_data *scl_data);
-void dpp401_full_bypass(struct dpp *dpp_base);
-
void dpp401_dpp_setup(
struct dpp *dpp_base,
enum surface_pixel_format format,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index 712aff7e17f7..7aab77b58869 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -88,30 +88,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-void dpp401_full_bypass(struct dpp *dpp_base)
-{
- struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
-
- /* Input pixel format: ARGB8888 */
- REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
- CNVC_SURFACE_PIXEL_FORMAT, 0x8);
-
- /* Zero expansion */
- REG_SET_3(FORMAT_CONTROL, 0,
- CNVC_BYPASS, 0,
- FORMAT_CONTROL__ALPHA_EN, 0,
- FORMAT_EXPANSION_MODE, 0);
-
- /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
- if (dpp->tf_mask->CM_BYPASS_EN)
- REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
- else
- REG_SET(CM_CONTROL, 0, CM_BYPASS, 1);
-
- /* Setting degamma bypass for now */
- REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
-}
-
void dpp401_set_cursor_attributes(
struct dpp *dpp_base,
struct dc_cursor_attributes *cursor_attributes)
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 11535922b5ff..a454d16e6586 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -30,6 +30,9 @@
#include "rc_calc.h"
#include "fixed31_32.h"
+#include "clk_mgr.h"
+#include "resource.h"
+
#define DC_LOGGER \
dsc->ctx->logger
@@ -149,6 +152,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
}
/* Forward Declerations */
+static unsigned int get_min_slice_count_for_odm(
+ const struct display_stream_compressor *dsc,
+ const struct dsc_enc_caps *dsc_enc_caps,
+ const struct dc_crtc_timing *timing);
+
static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
const uint32_t max_bpp_x16,
@@ -183,6 +191,7 @@ static bool setup_dsc_config(
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
const enum dc_link_encoding_format link_encoding,
+ int min_slice_count,
struct dc_dsc_config *dsc_cfg);
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
@@ -442,7 +451,6 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
return true;
}
-
/* If DSC is possbile, get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range and
* timing's pixel clock and uncompressed bandwidth.
* If DSC is not possible, leave '*range' untouched.
@@ -458,6 +466,7 @@ bool dc_dsc_compute_bandwidth_range(
struct dc_dsc_bw_range *range)
{
bool is_dsc_possible = false;
+ unsigned int min_slice_count;
struct dsc_enc_caps dsc_enc_caps;
struct dsc_enc_caps dsc_common_caps;
struct dc_dsc_config config = {0};
@@ -469,12 +478,14 @@ bool dc_dsc_compute_bandwidth_range(
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
+ min_slice_count = get_min_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
+
is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps,
timing->pixel_encoding, &dsc_common_caps);
if (is_dsc_possible)
is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
- &options, link_encoding, &config);
+ &options, link_encoding, min_slice_count, &config);
if (is_dsc_possible)
is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
@@ -525,20 +536,152 @@ void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc,
DC_LOG_DSC("\tis_dp %d", dsc_sink_caps->is_dp);
}
+
+static void build_dsc_enc_combined_slice_caps(
+ const struct dsc_enc_caps *single_dsc_enc_caps,
+ struct dsc_enc_caps *dsc_enc_caps,
+ unsigned int max_odm_combine_factor)
+{
+ /* 1-16 slice configurations, single DSC */
+ dsc_enc_caps->slice_caps.raw |= single_dsc_enc_caps->slice_caps.raw;
+
+ /* 2x DSC's */
+ if (max_odm_combine_factor >= 2) {
+ /* 1 + 1 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1;
+
+ /* 2 + 2 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2;
+
+ /* 4 + 4 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
+
+ /* 8 + 8 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_8;
+ }
+
+ /* 3x DSC's */
+ if (max_odm_combine_factor >= 3) {
+ /* 4 + 4 + 4 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
+ }
+
+ /* 4x DSC's */
+ if (max_odm_combine_factor >= 4) {
+ /* 1 + 1 + 1 + 1 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1;
+
+ /* 2 + 2 + 2 + 2 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2;
+
+ /* 3 + 3 + 3 + 3 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_3;
+
+ /* 4 + 4 + 4 + 4 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
+ }
+}
+
+static void build_dsc_enc_caps(
+ const struct display_stream_compressor *dsc,
+ struct dsc_enc_caps *dsc_enc_caps)
+{
+ unsigned int max_dscclk_khz;
+ unsigned int num_dsc;
+ unsigned int max_odm_combine_factor;
+ struct dsc_enc_caps single_dsc_enc_caps;
+
+ struct dc *dc;
+
+ memset(&single_dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
+
+ if (!dsc || !dsc->ctx || !dsc->ctx->dc || !dsc->funcs->dsc_get_single_enc_caps)
+ return;
+
+ dc = dsc->ctx->dc;
+
+ if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_max_clock_khz || !dc->res_pool)
+ return;
+
+ /* get max DSCCLK from clk_mgr */
+ max_dscclk_khz = dc->clk_mgr->funcs->get_max_clock_khz(dc->clk_mgr, CLK_TYPE_DSCCLK);
+
+ dsc->funcs->dsc_get_single_enc_caps(&single_dsc_enc_caps, max_dscclk_khz);
+
+ /* global capabilities */
+ dsc_enc_caps->dsc_version = single_dsc_enc_caps.dsc_version;
+ dsc_enc_caps->lb_bit_depth = single_dsc_enc_caps.lb_bit_depth;
+ dsc_enc_caps->is_block_pred_supported = single_dsc_enc_caps.is_block_pred_supported;
+ dsc_enc_caps->max_slice_width = single_dsc_enc_caps.max_slice_width;
+ dsc_enc_caps->bpp_increment_div = single_dsc_enc_caps.bpp_increment_div;
+ dsc_enc_caps->color_formats.raw = single_dsc_enc_caps.color_formats.raw;
+ dsc_enc_caps->color_depth.raw = single_dsc_enc_caps.color_depth.raw;
+
+ /* expand per DSC capabilities to global */
+ max_odm_combine_factor = dc->caps.max_odm_combine_factor;
+ num_dsc = dc->res_pool->res_cap->num_dsc;
+ max_odm_combine_factor = min(max_odm_combine_factor, num_dsc);
+ dsc_enc_caps->max_total_throughput_mps =
+ single_dsc_enc_caps.max_total_throughput_mps *
+ max_odm_combine_factor;
+
+ /* check slice counts possible for with ODM combine */
+ build_dsc_enc_combined_slice_caps(&single_dsc_enc_caps, dsc_enc_caps, max_odm_combine_factor);
+}
+
+static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
+{
+ return (value + 9) / 10;
+}
+
+static unsigned int get_min_slice_count_for_odm(
+ const struct display_stream_compressor *dsc,
+ const struct dsc_enc_caps *dsc_enc_caps,
+ const struct dc_crtc_timing *timing)
+{
+ unsigned int max_dispclk_khz;
+
+ /* get max pixel rate and combine caps */
+ max_dispclk_khz = dsc_enc_caps->max_total_throughput_mps * 1000;
+ if (dsc && dsc->ctx->dc) {
+ if (dsc->ctx->dc->clk_mgr &&
+ dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz) {
+ /* dispclk is available */
+ max_dispclk_khz = dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz(dsc->ctx->dc->clk_mgr, CLK_TYPE_DISPCLK);
+ }
+ }
+
+ /* consider minimum odm slices required due to
+ * 1) display pipe throughput (dispclk)
+ * 2) max image width per slice
+ */
+ return dc_fixpt_ceil(dc_fixpt_max(
+ dc_fixpt_div_int(dc_fixpt_from_int(dsc_div_by_10_round_up(timing->pix_clk_100hz)),
+ max_dispclk_khz), // throughput
+ dc_fixpt_div_int(dc_fixpt_from_int(timing->h_addressable + timing->h_border_left + timing->h_border_right),
+ dsc_enc_caps->max_slice_width))); // slice width
+}
+
static void get_dsc_enc_caps(
const struct display_stream_compressor *dsc,
struct dsc_enc_caps *dsc_enc_caps,
int pixel_clock_100Hz)
{
- // This is a static HW query, so we can use any DSC
-
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
- if (dsc) {
+
+ if (!dsc)
+ return;
+
+ /* check if reported cap global or only for a single DCN DSC enc */
+ if (dsc->funcs->dsc_get_enc_caps) {
if (!dsc->ctx->dc->debug.disable_dsc)
dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
- if (dsc->ctx->dc->debug.native422_support)
- dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
+ } else {
+ build_dsc_enc_caps(dsc, dsc_enc_caps);
}
+
+ if (dsc->ctx->dc->debug.native422_support)
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
}
/* Returns 'false' if no intersection was found for at least one capability.
@@ -621,11 +764,6 @@ static bool intersect_dsc_caps(
return true;
}
-static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
-{
- return (value + 9) / 10;
-}
-
static uint32_t compute_bpp_x16_from_target_bandwidth(
const uint32_t bandwidth_in_kbps,
const struct dc_crtc_timing *timing,
@@ -910,11 +1048,11 @@ static bool setup_dsc_config(
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
const enum dc_link_encoding_format link_encoding,
+ int min_slices_h,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
int max_slices_h = 0;
- int min_slices_h = 0;
int num_slices_h = 0;
int pic_width;
int slice_width;
@@ -1018,12 +1156,9 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
- min_slices_h = pic_width / dsc_common_caps.max_slice_width;
- if (pic_width % dsc_common_caps.max_slice_width)
- min_slices_h++;
-
min_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, min_slices_h);
+ /* increase minimum slice count to meet sink throughput limitations */
while (min_slices_h <= max_slices_h) {
int pix_clk_per_slice_khz = dsc_div_by_10_round_up(timing->pix_clk_100hz) / min_slices_h;
if (pix_clk_per_slice_khz <= sink_per_slice_throughput_mps * 1000)
@@ -1032,14 +1167,12 @@ static bool setup_dsc_config(
min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
}
- is_dsc_possible = (min_slices_h <= max_slices_h);
-
- if (pic_width % min_slices_h != 0)
- min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
-
- if (min_slices_h == 0 && max_slices_h == 0)
- is_dsc_possible = false;
+ /* increase minimum slice count to meet divisibility requirements */
+ while (pic_width % min_slices_h != 0 && min_slices_h <= max_slices_h) {
+ min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
+ }
+ is_dsc_possible = (min_slices_h <= max_slices_h) && max_slices_h != 0;
if (!is_dsc_possible)
goto done;
@@ -1162,12 +1295,19 @@ bool dc_dsc_compute_config(
{
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
-
+ unsigned int min_slice_count;
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
+
+ min_slice_count = get_min_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
+
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, options, link_encoding, dsc_cfg);
+ timing,
+ options,
+ link_encoding,
+ min_slice_count,
+ dsc_cfg);
return is_dsc_possible;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index 4222679fd4c9..7bd92ae8b13e 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -9,19 +9,14 @@
#include "dsc/dscc_types.h"
#include "dsc/rc_calc.h"
-#define MAX_THROUGHPUT_PER_DSC_100HZ 20000000
-#define MAX_DSC_UNIT_COMBINE 4
-
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals);
/* Object I/F functions */
//static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
//static bool dsc401_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps);
-static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
-static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
+static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
static const struct dsc_funcs dcn401_dsc_funcs = {
- .dsc_get_enc_caps = dsc401_get_enc_caps,
.dsc_read_state = dsc401_read_state,
.dsc_validate_stream = dsc401_validate_stream,
.dsc_set_config = dsc401_set_config,
@@ -30,6 +25,7 @@ static const struct dsc_funcs dcn401_dsc_funcs = {
.dsc_disable = dsc401_disable,
.dsc_disconnect = dsc401_disconnect,
.dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear,
+ .dsc_get_single_enc_caps = dsc401_get_single_enc_caps,
};
/* Macro definitios for REG_SET macros*/
@@ -66,22 +62,14 @@ void dsc401_construct(struct dcn401_dsc *dsc,
dsc->max_image_width = 5184;
}
-static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
+static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz)
{
- int min_dsc_unit_required = (pixel_clock_100Hz + MAX_THROUGHPUT_PER_DSC_100HZ - 1) / MAX_THROUGHPUT_PER_DSC_100HZ;
-
dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */
- /* 1 slice is only supported with 1 DSC unit */
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = min_dsc_unit_required == 1 ? 1 : 0;
- /* 2 slice is only supported with 1 or 2 DSC units */
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = (min_dsc_unit_required == 1 || min_dsc_unit_required == 2) ? 1 : 0;
- /* 3 slice is only supported with 1 DSC unit */
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = min_dsc_unit_required == 1 ? 1 : 0;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1;
dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1;
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 = 1;
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 = 1;
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 = 1;
dsc_enc_caps->lb_bit_depth = 13;
dsc_enc_caps->is_block_pred_supported = true;
@@ -95,7 +83,7 @@ static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clo
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1;
- dsc_enc_caps->max_total_throughput_mps = MAX_THROUGHPUT_PER_DSC_100HZ * MAX_DSC_UNIT_COMBINE;
+ dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000;
dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */
dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */
@@ -191,7 +179,7 @@ void dsc401_disable(struct display_stream_compressor *dsc)
DSC_CLOCK_EN, 0);
}
-static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc)
+void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
index e3ca70058e64..7acd57eb4f42 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
@@ -341,5 +341,6 @@ void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_c
void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe);
void dsc401_disable(struct display_stream_compressor *dsc);
void dsc401_disconnect(struct display_stream_compressor *dsc);
+void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
index 1ebce5426a58..b0bd1f9425b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
@@ -108,6 +108,7 @@ struct dsc_funcs {
void (*dsc_disable)(struct display_stream_compressor *dsc);
void (*dsc_disconnect)(struct display_stream_compressor *dsc);
void (*dsc_wait_disconnect_pending_clear)(struct display_stream_compressor *dsc);
+ void (*dsc_get_single_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
index c7765e6f09e6..f8f991785d4f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
@@ -666,10 +666,29 @@ struct dcn_mi_mask {
DCN_HUBP_REG_FIELD_LIST(uint32_t);
};
+struct dcn_fl_regs_st {
+ uint32_t lut_enable;
+ uint32_t lut_done;
+ uint32_t lut_addr_mode;
+ uint32_t lut_width;
+ uint32_t lut_tmz;
+ uint32_t lut_crossbar_sel_r;
+ uint32_t lut_crossbar_sel_g;
+ uint32_t lut_crossbar_sel_b;
+ uint32_t lut_addr_hi;
+ uint32_t lut_addr_lo;
+ uint32_t refcyc_3dlut_group;
+ uint32_t lut_fl_bias;
+ uint32_t lut_fl_scale;
+ uint32_t lut_fl_mode;
+ uint32_t lut_fl_format;
+};
+
struct dcn_hubp_state {
struct _vcs_dpi_display_dlg_regs_st dlg_attr;
struct _vcs_dpi_display_ttu_regs_st ttu_attr;
struct _vcs_dpi_display_rq_regs_st rq_regs;
+ struct dcn_fl_regs_st fl_regs;
uint32_t pixel_format;
uint32_t inuse_addr_hi;
uint32_t inuse_addr_lo;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index baed31611477..705b98b1b6cc 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -86,11 +86,11 @@ void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, width);
}
-void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled)
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_enabled ? 1 : 0);
+ REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_bits);
}
void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index 6e1d4c90ddd4..608e6153fa68 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -333,7 +333,7 @@ void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
-void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled);
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits);
void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 38e17b1796e1..4ea13d0bf815 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1186,8 +1186,10 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
if (dccg) {
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
- if (dccg && dccg->funcs->set_dtbclk_dto)
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ if (!(dc->ctx->dce_version >= DCN_VERSION_3_5)) {
+ if (dccg && dccg->funcs->set_dtbclk_dto)
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
}
} else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
@@ -1379,7 +1381,7 @@ static void populate_audio_dp_link_info(
}
}
-static void build_audio_output(
+void build_audio_output(
struct dc_state *state,
const struct pipe_ctx *pipe_ctx,
struct audio_output *audio_output)
@@ -1684,6 +1686,19 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
+ /* Temporary workaround to perform DSC programming ahead of stream enablement
+ * for smartmux/SPRS
+ * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest)
+ || link->is_dds || link->skip_implict_edp_power_control)) &&
+ (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal)))
+ dc->link_srv->set_dsc_enable(pipe_ctx, true);
+ }
+
if (!stream->dpms_off)
dc->link_srv->set_dpms_on(context, pipe_ctx);
@@ -1925,6 +1940,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
can_apply_edp_fast_boot = dc_validate_boot_timing(dc,
edp_stream->sink, &edp_stream->timing);
+
+ // For Mux-platform, the default value is false.
+ // Disable fast boot during mux switching.
+ // The flag would be clean after switching done.
+ if (dc->is_switch_in_progress_dest && edp_link->is_dds)
+ can_apply_edp_fast_boot = false;
+
edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot;
if (can_apply_edp_fast_boot) {
DC_LOG_EVENT_LINK_TRAINING("eDP fast boot Enable\n");
@@ -1968,6 +1990,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
if (edp_with_sink_num)
edp_link_with_sink = edp_links_with_sink[0];
+ // During a mux switch, powering down the HW blocks and then enabling
+ // the link via a DPCD SET_POWER write causes a brief flash
+ keep_edp_vdd_on |= dc->is_switch_in_progress_dest;
+
if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) {
if (edp_link_with_sink && !keep_edp_vdd_on) {
/*turn off backlight before DP_blank and encoder powered down*/
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index 06789ac3a224..7cd8c1576988 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -110,5 +110,9 @@ void dce110_enable_dp_link_output(
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings);
+void build_audio_output(
+ struct dc_state *state,
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_output *audio_output);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index f9ee55998b6b..39910f73ecd0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -327,6 +327,35 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
}
}
+ DTN_INFO("\n=======HUBP FL======\n");
+ DTN_INFO(
+ "HUBP FL: Enabled Done adr_mode width tmz xbar_sel_R xbar_sel_G xbar_sel_B adr_hi adr_low REFCYC Bias Scale Mode Format\n");
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+ struct dcn_fl_regs_st *fl_regs = &s->fl_regs;
+
+ if (!s->blank_en) {
+ DTN_INFO("[%2d]: %5xh %6xh %5d %6d %8xh %2xh %6xh %6d %8d %8d %7d %8xh %5x %5x %5x",
+ pool->hubps[i]->inst,
+ fl_regs->lut_enable,
+ fl_regs->lut_done,
+ fl_regs->lut_addr_mode,
+ fl_regs->lut_width,
+ fl_regs->lut_tmz,
+ fl_regs->lut_crossbar_sel_r,
+ fl_regs->lut_crossbar_sel_g,
+ fl_regs->lut_crossbar_sel_b,
+ fl_regs->lut_addr_hi,
+ fl_regs->lut_addr_lo,
+ fl_regs->refcyc_3dlut_group,
+ fl_regs->lut_fl_bias,
+ fl_regs->lut_fl_scale,
+ fl_regs->lut_fl_mode,
+ fl_regs->lut_fl_format);
+ DTN_INFO("\n");
+ }
+ }
+
DTN_INFO("\n=========RQ========\n");
DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
" min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
@@ -511,6 +540,36 @@ static void dcn10_log_color_state(struct dc *dc,
dc->caps.color.mpc.num_3dluts,
dc->caps.color.mpc.ogam_ram,
dc->caps.color.mpc.ocsc);
+ DTN_INFO("===== MPC RMCM 3DLUT =====\n");
+ DTN_INFO("MPCC: SIZE MODE MODE_CUR RD_SEL 30BIT_EN WR_EN_MASK RAM_SEL OUT_NORM_FACTOR FL_SEL OUT_OFFSET OUT_SCALE FL_DONE SOFT_UNDERFLOW HARD_UNDERFLOW MEM_PWR_ST FORCE DIS MODE\n");
+ for (i = 0; i < pool->mpcc_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ if (s.opp_id != 0xf)
+ DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x %4x %4x %4x\n",
+ i, s.rmcm_regs.rmcm_3dlut_size, s.rmcm_regs.rmcm_3dlut_mode, s.rmcm_regs.rmcm_3dlut_mode_cur,
+ s.rmcm_regs.rmcm_3dlut_read_sel, s.rmcm_regs.rmcm_3dlut_30bit_en, s.rmcm_regs.rmcm_3dlut_wr_en_mask,
+ s.rmcm_regs.rmcm_3dlut_ram_sel, s.rmcm_regs.rmcm_3dlut_out_norm_factor, s.rmcm_regs.rmcm_3dlut_fl_sel,
+ s.rmcm_regs.rmcm_3dlut_out_offset_r, s.rmcm_regs.rmcm_3dlut_out_scale_r, s.rmcm_regs.rmcm_3dlut_fl_done,
+ s.rmcm_regs.rmcm_3dlut_fl_soft_underflow, s.rmcm_regs.rmcm_3dlut_fl_hard_underflow, s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_force, s.rmcm_regs.rmcm_3dlut_mem_pwr_dis, s.rmcm_regs.rmcm_3dlut_mem_pwr_mode);
+ }
+ DTN_INFO("\n");
+ DTN_INFO("===== MPC RMCM Shaper =====\n");
+ DTN_INFO("MPCC: CNTL LUT_MODE MODE_CUR WR_EN_MASK WR_SEL OFFSET SCALE START_B START_SEG_B END_B END_BASE_B MEM_PWR_ST FORCE DIS MODE\n");
+ for (i = 0; i < pool->mpcc_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ if (s.opp_id != 0xf)
+ DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x\n",
+ i, s.rmcm_regs.rmcm_cntl, s.rmcm_regs.rmcm_shaper_lut_mode, s.rmcm_regs.rmcm_shaper_mode_cur,
+ s.rmcm_regs.rmcm_shaper_lut_write_en_mask, s.rmcm_regs.rmcm_shaper_lut_write_sel, s.rmcm_regs.rmcm_shaper_offset_b,
+ s.rmcm_regs.rmcm_shaper_scale_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_seg_b,
+ s.rmcm_regs.rmcm_shaper_rama_exp_region_end_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_end_base_b, s.rmcm_regs.rmcm_shaper_mem_pwr_state,
+ s.rmcm_regs.rmcm_shaper_mem_pwr_force, s.rmcm_regs.rmcm_shaper_mem_pwr_dis, s.rmcm_regs.rmcm_shaper_mem_pwr_mode);
+ }
}
void dcn10_log_hw_state(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index c277df12c817..3207addbd4eb 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -283,14 +283,13 @@ void dcn20_setup_gsl_group_as_lock(
}
/* at this point we want to program whether it's to enable or disable */
- if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
+ if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL) {
pipe_ctx->stream_res.tg->funcs->set_gsl(
pipe_ctx->stream_res.tg,
&gsl);
-
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
- pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+ if (pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL)
+ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
+ pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
} else
BREAK_TO_DEBUGGER();
}
@@ -956,7 +955,7 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
- hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
+ fsleep(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
@@ -1971,14 +1970,6 @@ static void dcn20_program_pipe(
pipe_ctx->plane_state->update_flags.bits.hdr_mult))
hws->funcs.set_hdr_multiplier(pipe_ctx);
- if (hws->funcs.populate_mcm_luts) {
- if (pipe_ctx->plane_state) {
- hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
- pipe_ctx->plane_state->lut_bank_a);
- pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
- }
- }
-
if (pipe_ctx->plane_state &&
(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change ||
@@ -2492,7 +2483,7 @@ bool dcn20_update_bandwidth(
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK)
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK)
return false;
/* apply updated bandwidth parameters */
@@ -2816,6 +2807,8 @@ void dcn20_reset_back_end_for_pipe(
{
struct dc_link *link = pipe_ctx->stream->link;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ struct dccg *dccg = dc->res_pool->dccg;
+ struct dtbclk_dto_params dto_params = {0};
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
@@ -2876,6 +2869,13 @@ void dcn20_reset_back_end_for_pipe(
&pipe_ctx->link_res, pipe_ctx->stream->signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
}
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && dccg
+ && dc->ctx->dce_version >= DCN_VERSION_3_5) {
+ dto_params.otg_inst = pipe_ctx->stream_res.tg->inst;
+ dto_params.timing = &pipe_ctx->stream->timing;
+ if (dccg && dccg->funcs->set_dtbclk_dto)
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 5ba3999991b0..8ba934b83957 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -562,6 +562,19 @@ static void dcn31_reset_back_end_for_pipe(
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
+ /* Temporary workaround to perform DSC programming ahead of pipe reset
+ * for smartmux/SPRS
+ * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest)
+ || link->is_dds || link->skip_implict_edp_power_control)) &&
+ (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal)))
+ dc->link_srv->set_dsc_enable(pipe_ctx, false);
+ }
+
/* free acquired resources */
if (pipe_ctx->stream_res.audio) {
/*disable az_endpoint*/
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index a0b05b9ef660..416b1dca3dac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1063,15 +1063,17 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
ASSERT(odm_dsc);
+ if (!odm_dsc)
+ continue;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index c4177a9a662f..cc9f40d97af2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -2,6 +2,8 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "os_types.h"
#include "dm_services.h"
#include "basics/dc_common.h"
#include "dm_helpers.h"
@@ -49,7 +51,7 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
-static void dcn401_initialize_min_clocks(struct dc *dc)
+void dcn401_initialize_min_clocks(struct dc *dc)
{
struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
@@ -143,13 +145,8 @@ void dcn401_init_hw(struct dc *dc)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// mark dcmode limits present if any clock has distinct AC and DC values from SMU
- dc->caps.dcmode_power_limits_present =
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dcfclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dispclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dtbclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.fclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.memclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.socclk_mhz);
+ dc->caps.dcmode_power_limits_present = dc->clk_mgr->funcs->is_dc_mode_present &&
+ dc->clk_mgr->funcs->is_dc_mode_present(dc->clk_mgr);
}
// Initialize the dccg
@@ -396,249 +393,6 @@ static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ct
}
}
-static void dcn401_set_mcm_location_post_blend(struct dc *dc, struct pipe_ctx *pipe_ctx, bool bPostBlend)
-{
- struct mpc *mpc = dc->res_pool->mpc;
- int mpcc_id = pipe_ctx->plane_res.hubp->inst;
-
- if (!pipe_ctx->plane_state)
- return;
-
- mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
- pipe_ctx->plane_state->mcm_location = (bPostBlend) ?
- MPCC_MOVABLE_CM_LOCATION_AFTER :
- MPCC_MOVABLE_CM_LOCATION_BEFORE;
-}
-
-static void dc_get_lut_mode(
- enum dc_cm2_gpu_mem_layout layout,
- enum hubp_3dlut_fl_mode *mode,
- enum hubp_3dlut_fl_addressing_mode *addr_mode)
-{
- switch (layout) {
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
- *mode = hubp_3dlut_fl_mode_native_1;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
- *mode = hubp_3dlut_fl_mode_native_2;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
- *mode = hubp_3dlut_fl_mode_transform;
- *addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
- break;
- default:
- *mode = hubp_3dlut_fl_mode_disable;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- }
-}
-
-static void dc_get_lut_format(
- enum dc_cm2_gpu_mem_format dc_format,
- enum hubp_3dlut_fl_format *format)
-{
- switch (dc_format) {
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
- *format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
- *format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
- *format = hubp_3dlut_fl_format_float_fp1_5_10;
- break;
- }
-}
-
-static void dc_get_lut_xbar(
- enum dc_cm2_gpu_mem_pixel_component_order order,
- enum hubp_3dlut_fl_crossbar_bit_slice *cr_r,
- enum hubp_3dlut_fl_crossbar_bit_slice *y_g,
- enum hubp_3dlut_fl_crossbar_bit_slice *cb_b)
-{
- switch (order) {
- case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
- *cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- *cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- break;
- case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA:
- *cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- *cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- break;
- }
-}
-
-static void dc_get_lut_width(
- enum dc_cm2_gpu_mem_size size,
- enum hubp_3dlut_fl_width *width)
-{
- switch (size) {
- case DC_CM2_GPU_MEM_SIZE_333333:
- *width = hubp_3dlut_fl_width_33;
- break;
- case DC_CM2_GPU_MEM_SIZE_171717:
- *width = hubp_3dlut_fl_width_17;
- break;
- case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
- *width = hubp_3dlut_fl_width_transformed;
- break;
- }
-}
-static bool dc_is_rmcm_3dlut_supported(struct hubp *hubp, struct mpc *mpc)
-{
- if (mpc->funcs->rmcm.update_3dlut_fast_load_select &&
- mpc->funcs->rmcm.program_lut_read_write_control &&
- hubp->funcs->hubp_program_3dlut_fl_addr &&
- mpc->funcs->rmcm.program_bit_depth &&
- hubp->funcs->hubp_program_3dlut_fl_mode &&
- hubp->funcs->hubp_program_3dlut_fl_addressing_mode &&
- hubp->funcs->hubp_program_3dlut_fl_format &&
- hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
- mpc->funcs->rmcm.program_bias_scale &&
- hubp->funcs->hubp_program_3dlut_fl_crossbar &&
- hubp->funcs->hubp_program_3dlut_fl_width &&
- mpc->funcs->rmcm.update_3dlut_fast_load_select &&
- mpc->funcs->rmcm.populate_lut &&
- mpc->funcs->rmcm.program_lut_mode &&
- hubp->funcs->hubp_enable_3dlut_fl &&
- mpc->funcs->rmcm.enable_3dlut_fl)
- return true;
-
- return false;
-}
-
-bool dcn401_program_rmcm_luts(
- struct hubp *hubp,
- struct pipe_ctx *pipe_ctx,
- enum dc_cm2_transfer_func_source lut3d_src,
- struct dc_cm2_func_luts *mcm_luts,
- struct mpc *mpc,
- bool lut_bank_a,
- int mpcc_id)
-{
- struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
- union mcm_lut_params m_lut_params;
- enum MCM_LUT_XABLE shaper_xable, lut3d_xable = MCM_LUT_DISABLE, lut1d_xable;
- enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum hubp_3dlut_fl_format format = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
- enum hubp_3dlut_fl_width width = 0;
- struct dc *dc = hubp->ctx->dc;
-
- bool bypass_rmcm_3dlut = false;
- bool bypass_rmcm_shaper = false;
-
- dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
-
- /* 3DLUT */
- switch (lut3d_src) {
- case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
- memset(&m_lut_params, 0, sizeof(m_lut_params));
- // Don't know what to do in this case.
- //case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
- break;
- case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
- dc_get_lut_width(mcm_luts->lut3d_data.gpu_mem_params.size, &width);
- if (!dc_is_rmcm_3dlut_supported(hubp, mpc) ||
- !mpc->funcs->rmcm.is_config_supported(width))
- return false;
-
- //0. disable fl on mpc
- mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, 0xF);
-
- //1. power down the block
- mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, false);
-
- //2. program RMCM
- //2a. 3dlut reg programming
- mpc->funcs->rmcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a,
- (!bypass_rmcm_3dlut) && lut3d_xable != MCM_LUT_DISABLE, mpcc_id);
-
- hubp->funcs->hubp_program_3dlut_fl_addr(hubp,
- mcm_luts->lut3d_data.gpu_mem_params.addr);
-
- mpc->funcs->rmcm.program_bit_depth(mpc,
- mcm_luts->lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
-
- // setting native or transformed mode,
- dc_get_lut_mode(mcm_luts->lut3d_data.gpu_mem_params.layout, &mode, &addr_mode);
-
- //these program the mcm 3dlut
- hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
-
- hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
-
- //seems to be only for the MCM
- dc_get_lut_format(mcm_luts->lut3d_data.gpu_mem_params.format_params.format, &format);
- hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
-
- mpc->funcs->rmcm.program_bias_scale(mpc,
- mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale,
- mpcc_id);
- hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
- mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale);
-
- dc_get_lut_xbar(
- mcm_luts->lut3d_data.gpu_mem_params.component_order,
- &crossbar_bit_slice_cr_r,
- &crossbar_bit_slice_y_g,
- &crossbar_bit_slice_cb_b);
-
- hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
- crossbar_bit_slice_cr_r,
- crossbar_bit_slice_y_g,
- crossbar_bit_slice_cb_b);
-
- mpc->funcs->rmcm.program_3dlut_size(mpc, width, mpcc_id);
-
- mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
-
- //2b. shaper reg programming
- memset(&m_lut_params, 0, sizeof(m_lut_params));
-
- if (mcm_luts->shaper->type == TF_TYPE_HWPWL) {
- m_lut_params.pwl = &mcm_luts->shaper->pwl;
- } else if (mcm_luts->shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
- ASSERT(false);
- cm_helper_translate_curve_to_hw_format(
- dc->ctx,
- mcm_luts->shaper,
- &dpp_base->regamma_params, true);
- m_lut_params.pwl = &dpp_base->regamma_params;
- }
- if (m_lut_params.pwl) {
- mpc->funcs->rmcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
- mpc->funcs->rmcm.program_lut_mode(mpc, !bypass_rmcm_shaper, lut_bank_a, mpcc_id);
- } else {
- //RMCM 3dlut won't work without its shaper
- return false;
- }
-
- //3. Select the hubp connected to this RMCM
- hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
- mpc->funcs->rmcm.enable_3dlut_fl(mpc, true, mpcc_id);
-
- //4. power on the block
- if (m_lut_params.pwl)
- mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, true);
-
- break;
- default:
- return false;
- }
-
- return true;
-}
-
void dcn401_populate_mcm_luts(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_cm2_func_luts mcm_luts,
@@ -664,25 +418,6 @@ void dcn401_populate_mcm_luts(struct dc *dc,
dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
- //MCM - setting its location (Before/After) blender
- //set to post blend (true)
- dcn401_set_mcm_location_post_blend(
- dc,
- pipe_ctx,
- mcm_luts.lut3d_data.mpc_mcm_post_blend);
-
- //RMCM - 3dLUT+Shaper
- if (mcm_luts.lut3d_data.rmcm_3dlut_enable) {
- dcn401_program_rmcm_luts(
- hubp,
- pipe_ctx,
- lut3d_src,
- &mcm_luts,
- mpc,
- lut_bank_a,
- mpcc_id);
- }
-
/* 1D LUT */
if (mcm_luts.lut1d_func) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
@@ -740,15 +475,15 @@ void dcn401_populate_mcm_luts(struct dc *dc,
break;
case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
- case DC_CM2_GPU_MEM_SIZE_333333:
- width = hubp_3dlut_fl_width_33;
- break;
case DC_CM2_GPU_MEM_SIZE_171717:
width = hubp_3dlut_fl_width_17;
break;
case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
width = hubp_3dlut_fl_width_transformed;
break;
+ default:
+ //TODO: handle default case
+ break;
}
//check for support
@@ -817,11 +552,14 @@ void dcn401_populate_mcm_luts(struct dc *dc,
//navi 4x has a bug and r and blue are swapped and need to be worked around here in
//TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
- dc_get_lut_xbar(
- mcm_luts.lut3d_data.gpu_mem_params.component_order,
- &crossbar_bit_slice_cr_r,
- &crossbar_bit_slice_y_g,
- &crossbar_bit_slice_cb_b);
+ switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
+ case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
+ default:
+ crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ break;
+ }
if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
@@ -2269,14 +2007,6 @@ void dcn401_program_pipe(
pipe_ctx->plane_state->update_flags.bits.hdr_mult))
hws->funcs.set_hdr_multiplier(pipe_ctx);
- if (hws->funcs.populate_mcm_luts) {
- if (pipe_ctx->plane_state) {
- hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
- pipe_ctx->plane_state->lut_bank_a);
- pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
- }
- }
-
if (pipe_ctx->plane_state &&
(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change ||
@@ -2651,7 +2381,7 @@ bool dcn401_update_bandwidth(
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK)
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK)
return false;
/* apply updated bandwidth parameters */
@@ -2902,10 +2632,12 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
DC_LOGGER_INIT(dc->ctx->logger);
- REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
- if (org_ip_request_cntl == 0)
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 1);
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+ }
if (hws->funcs.dpp_pg_control)
hws->funcs.dpp_pg_control(hws, dpp->inst, false);
@@ -2916,7 +2648,7 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
- if (org_ip_request_cntl == 0)
+ if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index ce65b4f6c672..2621b7725267 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -109,12 +109,5 @@ void dcn401_detect_pipe_changes(
void dcn401_plane_atomic_power_down(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
-bool dcn401_program_rmcm_luts(
- struct hubp *hubp,
- struct pipe_ctx *pipe_ctx,
- enum dc_cm2_transfer_func_source lut3d_src,
- struct dc_cm2_func_luts *mcm_luts,
- struct mpc *mpc,
- bool lut_bank_a,
- int mpcc_id);
+void dcn401_initialize_min_clocks(struct dc *dc);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 3a0795045bc6..9df8030e37f7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -502,6 +502,9 @@ void get_hdr_visual_confirm_color(
void get_mpctree_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void get_smartmux_visual_confirm_color(
+ struct dc *dc,
+ struct tg_color *color);
void get_vabc_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index f3696143590c..82085d9c3f40 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -59,6 +59,7 @@ enum dc_status {
DC_FAIL_DP_PAYLOAD_ALLOCATION = 27,
DC_FAIL_DP_LINK_BANDWIDTH = 28,
DC_FAIL_HW_CURSOR_SUPPORT = 29,
+ DC_FAIL_DP_TUNNEL_BW_VALIDATE = 30,
DC_ERROR_UNEXPECTED = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 0cf349cafb3e..f0d7185153b2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -67,6 +67,8 @@ struct resource_context;
struct clk_bw_params;
struct dc_mcache_params;
+#define MAX_RMCM_INST 2
+
struct resource_funcs {
enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index);
void (*destroy)(struct resource_pool **pool);
@@ -82,7 +84,7 @@ struct resource_funcs {
enum dc_status (*validate_bandwidth)(
struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void (*calculate_wm_and_dlg)(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -107,7 +109,7 @@ struct resource_funcs {
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
/*
* Algorithm for assigning available link encoders to links.
@@ -223,6 +225,11 @@ struct resource_funcs {
const struct dc_stream_state *stream);
bool (*program_mcache_pipe_config)(struct dc_state *context,
const struct dc_mcache_params *mcache_params);
+ enum dc_status (*update_dc_state_for_encoder_switch)(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint8_t pipe_count,
+ struct pipe_ctx *pipes,
+ struct audio_output *audio_output);
};
struct audio_support{
@@ -281,6 +288,7 @@ struct resource_pool {
struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS];
struct dc_3dlut *mpc_lut[MAX_PIPES];
struct dc_transfer_func *mpc_shaper[MAX_PIPES];
+ struct dc_rmcm_3dlut rmcm_3dlut[MAX_RMCM_INST];
struct {
unsigned int xtalin_clock_inKhz;
@@ -556,7 +564,10 @@ struct dcn_bw_output {
struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES];
struct dmub_cmd_fams2_global_config fams2_global_config;
union dmub_cmd_fams2_config fams2_stream_base_params[DML2_MAX_PLANES];
- union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES];
+ union {
+ union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES];
+ union dmub_fams2_stream_static_sub_state_v2 fams2_stream_sub_params_v2[DML2_MAX_PLANES];
+ };
struct dml2_display_arb_regs arb_regs;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index d19a595c2be4..134091d5842d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -622,7 +622,7 @@ extern const struct dcn_ip_params dcn10_ip_defaults;
bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn_get_soc_clks(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index c14d64687a3d..2c9a4a12bd8a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -100,6 +100,17 @@ struct dcn301_clk_internal {
#define MAX_NUM_DPM_LVL 8
#define WM_SET_COUNT 4
+enum clk_type {
+ CLK_TYPE_DCFCLK,
+ CLK_TYPE_FCLK,
+ CLK_TYPE_MCLK,
+ CLK_TYPE_SOCCLK,
+ CLK_TYPE_DTBCLK,
+ CLK_TYPE_DISPCLK,
+ CLK_TYPE_DPPCLK,
+ CLK_TYPE_DSCCLK,
+ CLK_TYPE_COUNT
+};
struct clk_limit_table_entry {
unsigned int voltage; /* milivolts withh 2 fractional bits */
@@ -324,6 +335,11 @@ struct clk_mgr_funcs {
int (*get_dispclk_from_dentist)(struct clk_mgr *clk_mgr_base);
+ bool (*is_dc_mode_present)(struct clk_mgr *clk_mgr);
+
+ uint32_t (*set_smartmux_switch)(struct clk_mgr *clk_mgr, uint32_t pins_to_set);
+
+ unsigned int (*get_max_clock_khz)(struct clk_mgr *clk_mgr_base, enum clk_type clk_type);
};
struct clk_mgr {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index e94e9ba60f55..61c4d2a7db1c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -211,7 +211,7 @@ struct dccg_funcs {
struct dccg *dccg,
enum streamclk_source src,
uint32_t otg_inst);
- void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
+ void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst, uint32_t num_slices_h);
void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index b610beb075d5..cee29e89ec5c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -282,7 +282,7 @@ struct hubp_funcs {
void (*hubp_enable_3dlut_fl)(struct hubp *hubp, bool enable);
void (*hubp_program_3dlut_fl_addressing_mode)(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode);
void (*hubp_program_3dlut_fl_width)(struct hubp *hubp, enum hubp_3dlut_fl_width width);
- void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, bool protection_enabled);
+ void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, uint8_t protection_bits);
void (*hubp_program_3dlut_fl_crossbar)(struct hubp *hubp,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 6e303b81bfb0..7641439f6ca0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -190,6 +190,42 @@ struct mpc_grph_gamut_adjustment {
enum mpcc_gamut_remap_id mpcc_gamut_remap_block_id;
};
+struct mpc_rmcm_regs {
+ uint32_t rmcm_3dlut_mem_pwr_state;
+ uint32_t rmcm_3dlut_mem_pwr_force;
+ uint32_t rmcm_3dlut_mem_pwr_dis;
+ uint32_t rmcm_3dlut_mem_pwr_mode;
+ uint32_t rmcm_3dlut_size;
+ uint32_t rmcm_3dlut_mode;
+ uint32_t rmcm_3dlut_mode_cur;
+ uint32_t rmcm_3dlut_read_sel;
+ uint32_t rmcm_3dlut_30bit_en;
+ uint32_t rmcm_3dlut_wr_en_mask;
+ uint32_t rmcm_3dlut_ram_sel;
+ uint32_t rmcm_3dlut_out_norm_factor;
+ uint32_t rmcm_3dlut_fl_sel;
+ uint32_t rmcm_3dlut_out_offset_r;
+ uint32_t rmcm_3dlut_out_scale_r;
+ uint32_t rmcm_3dlut_fl_done;
+ uint32_t rmcm_3dlut_fl_soft_underflow;
+ uint32_t rmcm_3dlut_fl_hard_underflow;
+ uint32_t rmcm_cntl;
+ uint32_t rmcm_shaper_mem_pwr_state;
+ uint32_t rmcm_shaper_mem_pwr_force;
+ uint32_t rmcm_shaper_mem_pwr_dis;
+ uint32_t rmcm_shaper_mem_pwr_mode;
+ uint32_t rmcm_shaper_lut_mode;
+ uint32_t rmcm_shaper_mode_cur;
+ uint32_t rmcm_shaper_lut_write_en_mask;
+ uint32_t rmcm_shaper_lut_write_sel;
+ uint32_t rmcm_shaper_offset_b;
+ uint32_t rmcm_shaper_scale_b;
+ uint32_t rmcm_shaper_rama_exp_region_start_b;
+ uint32_t rmcm_shaper_rama_exp_region_start_seg_b;
+ uint32_t rmcm_shaper_rama_exp_region_end_b;
+ uint32_t rmcm_shaper_rama_exp_region_end_base_b;
+};
+
struct mpcc_sm_cfg {
bool enable;
/* 0-single plane,2-row subsampling,4-column subsampling,6-checkboard subsampling */
@@ -301,6 +337,7 @@ struct mpcc_state {
uint32_t rgam_mode;
uint32_t rgam_lut;
struct mpc_grph_gamut_adjustment gamut_remap;
+ struct mpc_rmcm_regs rmcm_regs;
};
/**
@@ -1038,6 +1075,11 @@ struct mpc_funcs {
*/
void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id);
+ /**
+ * @mcm:
+ *
+ * MPC MCM new HW sequential programming functions
+ */
struct {
void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
@@ -1050,6 +1092,11 @@ struct mpc_funcs {
bool lut_bank_a, int mpcc_id);
} mcm;
+ /**
+ * @rmcm:
+ *
+ * MPC RMCM new HW sequential programming functions
+ */
struct {
void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id);
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
index 00ea3864dd4d..44f86cc2d1d6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
@@ -46,6 +46,8 @@ struct pg_cntl_funcs {
void (*opp_pg_control)(struct pg_cntl *pg_cntl, unsigned int opp_inst, bool power_on);
void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on);
void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
+ void (*mem_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
+ void (*dio_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
void (*init_pg_status)(struct pg_cntl *pg_cntl);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index fe7f3137f228..27f950ae45ee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -117,6 +117,7 @@ struct stream_encoder {
uint32_t stream_enc_inst;
struct vpg *vpg;
struct afmt *afmt;
+ struct apg *apg;
};
struct enc_state {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index 7d16351bba99..f2503402c10e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -144,9 +144,9 @@ struct link_service {
uint32_t (*dp_link_bandwidth_kbps)(
const struct dc_link *link,
const struct dc_link_settings *link_settings);
- bool (*validate_dpia_bandwidth)(
- const struct dc_stream_state *stream,
- const unsigned int num_streams);
+ enum dc_status (*validate_dp_tunnel_bandwidth)(
+ const struct dc *dc,
+ const struct dc_state *new_ctx);
uint32_t (*dp_required_hblank_size_bytes)(
const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 96febabf464a..2956c2b3ad1a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -34,6 +34,7 @@
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h"
+#include "clk_mgr.h"
#define DC_LOGGER \
link->ctx->logger
@@ -67,10 +68,17 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
{
struct pipe_ctx *pipes[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
+ struct dc_stream_update stream_update = { 0 };
+ bool dpms_off = false;
+ bool needs_divider_update = false;
bool was_hpo_acquired = resource_is_hpo_acquired(link->dc->current_state);
bool is_hpo_acquired;
uint8_t count;
int i;
+ struct audio_output audio_output[MAX_PIPES];
+
+ needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
+ link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
udelay(100);
@@ -83,16 +91,59 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
link->dc,
state,
pipes[i]);
+
+ // Disable OTG and re-enable after updating clocks
+ pipes[i]->stream_res.tg->funcs->disable_crtc(pipes[i]->stream_res.tg);
}
- if (link->dc->hwss.setup_hpo_hw_control) {
- is_hpo_acquired = resource_is_hpo_acquired(state);
- if (was_hpo_acquired != is_hpo_acquired)
- link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired);
+ if (needs_divider_update && link->dc->res_pool->funcs->update_dc_state_for_encoder_switch) {
+ link->dc->res_pool->funcs->update_dc_state_for_encoder_switch(link,
+ link_setting, count,
+ *pipes, &audio_output[0]);
+ for (i = 0; i < count; i++) {
+ pipes[i]->clock_source->funcs->program_pix_clk(
+ pipes[i]->clock_source,
+ &pipes[i]->stream_res.pix_clk_params,
+ link->dc->link_srv->dp_get_encoding_format(&pipes[i]->link_config.dp_link_settings),
+ &pipes[i]->pll_settings);
+
+ if (pipes[i]->stream_res.audio != NULL) {
+ const struct link_hwss *link_hwss = get_link_hwss(
+ link, &pipes[i]->link_res);
+
+ link_hwss->setup_audio_output(pipes[i], &audio_output[i],
+ pipes[i]->stream_res.audio->inst);
+
+ pipes[i]->stream_res.audio->funcs->az_configure(
+ pipes[i]->stream_res.audio,
+ pipes[i]->stream->signal,
+ &audio_output[i].crtc_info,
+ &pipes[i]->stream->audio_info,
+ &audio_output[i].dp_link_info);
+
+ if (link->dc->config.disable_hbr_audio_dp2 &&
+ pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio &&
+ link->dc->link_srv->dp_is_128b_132b_signal(pipes[i]))
+ pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio(pipes[i]->stream_res.audio);
+ }
+ }
}
- for (i = count-1; i >= 0; i--)
- link_set_dpms_on(state, pipes[i]);
+ // Toggle on HPO I/O if necessary
+ is_hpo_acquired = resource_is_hpo_acquired(state);
+ if (was_hpo_acquired != is_hpo_acquired && link->dc->hwss.setup_hpo_hw_control)
+ link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired);
+
+ for (i = 0; i < count; i++)
+ pipes[i]->stream_res.tg->funcs->enable_crtc(pipes[i]->stream_res.tg);
+
+ // Set DPMS on with stream update
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link) {
+ stream_update.stream = state->streams[i];
+ stream_update.dpms_off = &dpms_off;
+ dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, state->streams[i], &stream_update);
+ }
}
static void dp_test_send_link_training(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
index 116ff37126e7..55c5148de800 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
@@ -74,7 +74,7 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link,
static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
- uint8_t clk_src = 0x4C;
+ uint8_t clk_src = 0xC4;
uint8_t pattern = 0x4F; /* SQ128 */
const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 9655e6fa53a4..827b630daf49 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -593,8 +593,9 @@ static bool detect_dp(struct dc_link *link,
if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
- if (!detect_dp_sink_caps(link))
+ if (!detect_dp_sink_caps(link)) {
return false;
+ }
if (is_dp_branch_device(link))
/* DP SST branch */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 273a3be6d593..8c8682f743d6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -140,7 +140,8 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
}
}
- if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
+ if (((!dc->is_switch_in_progress_dest) && ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)) &&
+ (link->type != dc_connection_none))
dpcd_write_rx_power_ctrl(link, false);
}
}
@@ -842,14 +843,14 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
}
@@ -2296,8 +2297,7 @@ static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, i
link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw;
}
- /* get dp overhead for dp tunneling */
- link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link);
+ link->dpia_bw_alloc_config.dp_overhead = link_dpia_get_dp_overhead(link);
req_bw += link->dpia_bw_alloc_config.dp_overhead;
link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw);
@@ -2537,6 +2537,14 @@ void link_set_dpms_on(
!pipe_ctx->next_odm_pipe) {
pipe_ctx->stream->dpms_off = false;
update_psp_stream_config(pipe_ctx, false);
+
+ if (link->is_dds) {
+ uint32_t post_oui_delay = 30; // 30ms
+
+ dpcd_set_source_specific_data(link);
+ msleep(post_oui_delay);
+ }
+
return;
}
@@ -2629,6 +2637,15 @@ void link_set_dpms_on(
dp_is_128b_132b_signal(pipe_ctx))
update_sst_payload(pipe_ctx, true);
+ /* Corruption was observed on systems with display mux when stream gets
+ * enabled after the mux switch. Having a small delay between link
+ * training and stream unblank resolves the corruption issue.
+ * This is workaround.
+ */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ link->is_display_mux_present)
+ msleep(20);
+
dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 1a04f4b74585..de1143dbbd25 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -100,7 +100,7 @@ static void construct_link_service_validation(struct link_service *link_srv)
{
link_srv->validate_mode_timing = link_validate_mode_timing;
link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps;
- link_srv->validate_dpia_bandwidth = link_validate_dpia_bandwidth;
+ link_srv->validate_dp_tunnel_bandwidth = link_validate_dp_tunnel_bandwidth;
link_srv->dp_required_hblank_size_bytes = dp_required_hblank_size_bytes;
}
@@ -539,10 +539,16 @@ static bool construct_phy(struct dc_link *link,
break;
case CONNECTOR_ID_EDP:
+ // If smartmux is supported, only create the link on the primary eDP.
+ // Dual eDP is not supported with smartmux.
+ if (!(!link->dc->config.smart_mux_version || dc_ctx->dc_edp_id_count == 0))
+ goto create_fail;
+
link->connector_signal = SIGNAL_TYPE_EDP;
if (link->hpd_gpio) {
- if (!link->dc->config.allow_edp_hotplug_detection)
+ if (!link->dc->config.allow_edp_hotplug_detection
+ && !is_smartmux_suported(link))
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
switch (link->dc->config.allow_edp_hotplug_detection) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index 29606fda029d..aecaf37eee35 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -86,6 +86,10 @@ static bool dp_active_dongle_validate_timing(
if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
return false;
break;
+ case PIXEL_ENCODING_UNDEFINED:
+ /* These color depths are currently not supported */
+ ASSERT(false);
+ break;
default:
/* Invalid Pixel Encoding*/
return false;
@@ -104,6 +108,10 @@ static bool dp_active_dongle_validate_timing(
if (dongle_caps->dp_hdmi_max_bpc < 12)
return false;
break;
+ case COLOR_DEPTH_UNDEFINED:
+ /* These color depths are currently not supported */
+ ASSERT(false);
+ break;
case COLOR_DEPTH_141414:
case COLOR_DEPTH_161616:
default:
@@ -255,6 +263,14 @@ uint32_t dp_link_bandwidth_kbps(
return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000;
}
+static uint32_t dp_get_timing_bandwidth_kbps(
+ const struct dc_crtc_timing *timing,
+ const struct dc_link *link)
+{
+ return dc_bandwidth_in_kbps_from_timing(timing,
+ dc_link_get_highest_encoding_format(link));
+}
+
static bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing)
@@ -351,63 +367,81 @@ enum dc_status link_validate_mode_timing(
return DC_OK;
}
+static const struct dc_tunnel_settings *get_dp_tunnel_settings(const struct dc_state *context,
+ const struct dc_stream_state *stream)
+{
+ int i;
+ const struct dc_tunnel_settings *dp_tunnel_settings = NULL;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream && (context->res_ctx.pipe_ctx[i].stream == stream)) {
+ dp_tunnel_settings = &context->res_ctx.pipe_ctx[i].link_config.dp_tunnel_settings;
+ break;
+ }
+ }
+
+ return dp_tunnel_settings;
+}
+
/*
- * This function calculates the bandwidth required for the stream timing
- * and aggregates the stream bandwidth for the respective dpia link
- *
- * @stream: pointer to the dc_stream_state struct instance
- * @num_streams: number of streams to be validated
+ * Calculates the DP tunneling bandwidth required for the stream timing
+ * and aggregates the stream bandwidth for the respective DP tunneling link
*
- * return: true if validation is succeeded
+ * return: dc_status
*/
-bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams)
+enum dc_status link_validate_dp_tunnel_bandwidth(const struct dc *dc, const struct dc_state *new_ctx)
{
- int bw_needed[MAX_DPIA_NUM] = {0};
- struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
- int num_dpias = 0;
-
- for (unsigned int i = 0; i < num_streams; ++i) {
- if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
- /* new dpia sst stream, check whether it exceeds max dpia */
- if (num_dpias >= MAX_DPIA_NUM)
- return false;
+ struct dc_validation_dpia_set dpia_link_sets[MAX_DPIA_NUM] = { 0 };
+ uint8_t link_count = 0;
+ enum dc_status result = DC_OK;
- dpia_link[num_dpias] = stream[i].link;
- bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
- dc_link_get_highest_encoding_format(dpia_link[num_dpias]));
- num_dpias++;
- } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- uint8_t j = 0;
- /* check whether its a known dpia link */
- for (; j < num_dpias; ++j) {
- if (dpia_link[j] == stream[i].link)
- break;
- }
+ // Iterate through streams in the new context
+ for (uint8_t i = 0; (i < MAX_PIPES && i < new_ctx->stream_count); i++) {
+ const struct dc_stream_state *stream = new_ctx->streams[i];
+ const struct dc_link *link;
+ const struct dc_tunnel_settings *dp_tunnel_settings;
+ uint32_t timing_bw;
+
+ if (stream == NULL)
+ continue;
+
+ link = stream->link;
+
+ if (!(link && (stream->signal == SIGNAL_TYPE_DISPLAY_PORT
+ || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ && link->hpd_status))
+ continue;
- if (j == num_dpias) {
- /* new dpia mst stream, check whether it exceeds max dpia */
- if (num_dpias >= MAX_DPIA_NUM)
- return false;
- else {
- dpia_link[j] = stream[i].link;
- num_dpias++;
- }
+ dp_tunnel_settings = get_dp_tunnel_settings(new_ctx, stream);
+
+ if ((dp_tunnel_settings == NULL) || (dp_tunnel_settings->should_use_dp_bw_allocation == false))
+ continue;
+
+ timing_bw = dp_get_timing_bandwidth_kbps(&stream->timing, link);
+
+ // Find an existing entry for this 'link' in 'dpia_link_sets'
+ for (uint8_t j = 0; j < MAX_DPIA_NUM; j++) {
+ bool is_new_slot = false;
+
+ if (dpia_link_sets[j].link == NULL) {
+ is_new_slot = true;
+ link_count++;
+ dpia_link_sets[j].required_bw = 0;
+ dpia_link_sets[j].link = link;
}
- bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
- dc_link_get_highest_encoding_format(dpia_link[j]));
+ if (is_new_slot || (dpia_link_sets[j].link == link)) {
+ dpia_link_sets[j].tunnel_settings = dp_tunnel_settings;
+ dpia_link_sets[j].required_bw += timing_bw;
+ break;
+ }
}
}
- /* Include dp overheads */
- for (uint8_t i = 0; i < num_dpias; ++i) {
- int dp_overhead = 0;
-
- dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]);
- bw_needed[i] += dp_overhead;
- }
+ if (link_count && link_dpia_validate_dp_tunnel_bandwidth(dpia_link_sets, link_count) == false)
+ result = DC_FAIL_DP_TUNNEL_BW_VALIDATE;
- return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
+ return result;
}
struct dp_audio_layout_config {
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index bf398c49c3e8..9553c81053fe 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -30,9 +30,9 @@ enum dc_status link_validate_mode_timing(
const struct dc_stream_state *stream,
struct dc_link *link,
const struct dc_crtc_timing *timing);
-bool link_validate_dpia_bandwidth(
- const struct dc_stream_state *stream,
- const unsigned int num_streams);
+enum dc_status link_validate_dp_tunnel_bandwidth(
+ const struct dc *dc,
+ const struct dc_state *new_ctx);
uint32_t dp_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 0f965380a9b4..651926e547b9 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -1388,6 +1388,21 @@ void dpcd_set_source_specific_data(struct dc_link *link)
struct dpcd_amd_signature amd_signature = {0};
struct dpcd_amd_device_id amd_device_id = {0};
+ if (link->is_dds) {
+ uint8_t dpcd_dp_edp_backlight_mode = 0;
+
+ /*
+ * Write 0 to bits 0:1 for dp_edp_backlight_mode_set register
+ * if platform is DDS
+ */
+ core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &dpcd_dp_edp_backlight_mode, sizeof(uint8_t));
+ dpcd_dp_edp_backlight_mode &= ~0x3;
+
+ core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &dpcd_dp_edp_backlight_mode, sizeof(uint8_t));
+ }
+
amd_device_id.device_id_byte1 =
(uint8_t)(link->ctx->asic_id.chip_id);
amd_device_id.device_id_byte2 =
@@ -1543,6 +1558,10 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
return false;
link->dpcd_sink_ext_caps.raw = dpcd_data;
+ if (link->is_dds && !link->dpcd_sink_ext_caps.bits.oled) {
+ link->dpcd_sink_ext_caps.raw = 0;
+ return false;
+ }
if (core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, &edp_general_cap2, 1) != DC_OK)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 22bfdced64ab..9b2f1a7da1d1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -75,12 +75,15 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc) {
status = core_link_read_dpcd(link, USB4_DRIVER_BW_CAPABILITY,
- dpcd_dp_tun_data, 1);
+ dpcd_dp_tun_data, 2);
if (status != DC_OK)
goto err;
- link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw = dpcd_dp_tun_data[0];
+ link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw =
+ dpcd_dp_tun_data[USB4_DRIVER_BW_CAPABILITY - USB4_DRIVER_BW_CAPABILITY];
+ link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.raw =
+ dpcd_dp_tun_data[DP_IN_ADAPTER_TUNNEL_INFO - USB4_DRIVER_BW_CAPABILITY];
}
DC_LOG_DEBUG("%s: Link[%d] DP tunneling support (RouterId=%d AdapterId=%d) "
@@ -155,8 +158,14 @@ void link_decide_dp_tunnel_settings(struct dc_stream_state *stream,
link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling;
if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
- && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support)
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) {
dp_tunnel_setting->should_use_dp_bw_allocation = true;
+ dp_tunnel_setting->cm_id = link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id & 0x0F;
+ dp_tunnel_setting->group_id = link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.bits.group_id;
+ dp_tunnel_setting->estimated_bw = link->dpia_bw_alloc_config.estimated_bw;
+ dp_tunnel_setting->allocated_bw = link->dpia_bw_alloc_config.allocated_bw;
+ dp_tunnel_setting->bw_granularity = link->dpia_bw_alloc_config.bw_granularity;
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 3af7564a84f1..819bf2d8ba53 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -35,6 +35,8 @@
#define Kbps_TO_Gbps (1000 * 1000)
+#define MST_TIME_SLOT_COUNT 64
+
// ------------------------------------------------------------------
// PRIVATE FUNCTIONS
// ------------------------------------------------------------------
@@ -160,78 +162,6 @@ static void retrieve_usb4_dp_bw_allocation_info(struct dc_link *link)
link->dpia_bw_alloc_config.nrd_max_lane_count);
}
-static uint8_t get_lowest_dpia_index(struct dc_link *link)
-{
- const struct dc *dc_struct = link->dc;
- uint8_t idx = 0xFF;
- int i;
-
- for (i = 0; i < MAX_LINKS; ++i) {
-
- if (!dc_struct->links[i] ||
- dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
- continue;
-
- if (idx > dc_struct->links[i]->link_index) {
- idx = dc_struct->links[i]->link_index;
- break;
- }
- }
-
- return idx;
-}
-
-/*
- * Get the maximum dp tunnel banwidth of host router
- *
- * @dc: pointer to the dc struct instance
- * @hr_index: host router index
- *
- * return: host router maximum dp tunnel bandwidth
- */
-static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index)
-{
- uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]);
- uint8_t hr_index_temp = 0;
- struct dc_link *link_dpia_primary, *link_dpia_secondary;
- int total_bw = 0;
-
- for (uint8_t i = 0; i < MAX_LINKS - 1; ++i) {
-
- if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
- continue;
-
- hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2;
-
- if (hr_index_temp == hr_index) {
- link_dpia_primary = dc->links[i];
- link_dpia_secondary = dc->links[i + 1];
-
- /**
- * If BW allocation enabled on both DPIAs, then
- * HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary)
- * otherwise HR BW = Estimated(bw alloc enabled dpia)
- */
- if ((link_dpia_primary->hpd_status &&
- link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
- (link_dpia_secondary->hpd_status &&
- link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
- total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
- link_dpia_secondary->dpia_bw_alloc_config.allocated_bw;
- } else if (link_dpia_primary->hpd_status &&
- link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
- total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
- } else if (link_dpia_secondary->hpd_status &&
- link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) {
- total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw;
- }
- break;
- }
- }
-
- return total_bw;
-}
-
/*
* Cleanup function for when the dpia is unplugged to reset struct
* and perform any required clean up
@@ -251,32 +181,40 @@ static void dpia_bw_alloc_unplug(struct dc_link *link)
static void link_dpia_send_bw_alloc_request(struct dc_link *link, int req_bw)
{
- uint8_t requested_bw;
- uint32_t temp;
+ uint8_t request_reg_val;
+ uint32_t temp, request_bw;
- /* Error check whether request bw greater than allocated */
- if (req_bw > link->dpia_bw_alloc_config.estimated_bw) {
- DC_LOG_ERROR("%s: Request BW greater than estimated BW for link(%d)\n",
- __func__, link->link_index);
- req_bw = link->dpia_bw_alloc_config.estimated_bw;
+ if (link->dpia_bw_alloc_config.bw_granularity == 0) {
+ DC_LOG_ERROR("%s: Link[%d]: bw_granularity is zero!", __func__, link->link_index);
+ return;
}
temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
- requested_bw = temp / Kbps_TO_Gbps;
+ request_reg_val = temp / Kbps_TO_Gbps;
/* Always make sure to add more to account for floating points */
if (temp % Kbps_TO_Gbps)
- ++requested_bw;
+ ++request_reg_val;
- /* Error check whether requested and allocated are equal */
- req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
- if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) {
- DC_LOG_ERROR("%s: Request BW equals to allocated BW for link(%d)\n",
- __func__, link->link_index);
+ request_bw = request_reg_val * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+
+ if (request_bw > link->dpia_bw_alloc_config.estimated_bw) {
+ DC_LOG_ERROR("%s: Link[%d]: Request BW (%d --> %d) > Estimated BW (%d)... Set to Estimated BW!",
+ __func__, link->link_index,
+ req_bw, request_bw, link->dpia_bw_alloc_config.estimated_bw);
+ req_bw = link->dpia_bw_alloc_config.estimated_bw;
+
+ temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
+ request_reg_val = temp / Kbps_TO_Gbps;
+ if (temp % Kbps_TO_Gbps)
+ ++request_reg_val;
}
+ link->dpia_bw_alloc_config.allocated_bw = request_bw;
+ DC_LOG_DC("%s: Link[%d]: Request BW: %d", __func__, link->link_index, request_bw);
+
core_link_write_dpcd(link, REQUESTED_BW,
- &requested_bw,
+ &request_reg_val,
sizeof(uint8_t));
}
@@ -304,14 +242,16 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
ret = true;
- /*
- * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other
- * DPIA. CM release preallocation only when allocation is complete. Do zero alloc
- * to make the CM to release preallocation and update estimated BW correctly for
- * all DPIAs per host router
- */
- // TODO: Zero allocation can be removed once the MSFT CM fix has been released
- link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
+ if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) {
+ /*
+ * During DP tunnel creation, the CM preallocates BW
+ * and reduces the estimated BW of other DPIAs.
+ * The CM releases the preallocation only when the allocation is complete.
+ * Perform a zero allocation to make the CM release the preallocation
+ * and correctly update the estimated BW for all DPIAs per host router.
+ */
+ link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
+ }
} else
DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
}
@@ -329,19 +269,17 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
*/
void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status)
{
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
+
if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) {
DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)",
__func__, link->link_index);
} else if (status & DP_TUNNELING_BW_REQUEST_FAILED) {
- link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
-
DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d",
__func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw);
} else if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) {
- link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
-
DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d",
__func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
}
@@ -374,9 +312,13 @@ void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pe
void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
{
- DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n",
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
+
+ DC_LOG_DEBUG("%s: ENTER: link[%d] hpd(%d) Allocated_BW: %d Estimated_BW: %d Req_BW: %d",
__func__, link->link_index, link->hpd_status,
- link->dpia_bw_alloc_config.allocated_bw, req_bw);
+ link->dpia_bw_alloc_config.allocated_bw,
+ link->dpia_bw_alloc_config.estimated_bw,
+ req_bw);
if (link_dp_is_bw_alloc_available(link))
link_dpia_send_bw_alloc_request(link, req_bw);
@@ -384,73 +326,116 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
DC_LOG_DEBUG("%s: BW Allocation mode not available", __func__);
}
-bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
+uint32_t link_dpia_get_dp_overhead(const struct dc_link *link)
{
- bool ret = true;
- int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0;
- uint8_t lowest_dpia_index, i, hr_index;
+ uint32_t link_dp_overhead = 0;
- if (!num_dpias || num_dpias > MAX_DPIA_NUM)
- return ret;
+ if ((link->type == dc_connection_mst_branch) &&
+ !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
+ * MST overhead is 1/64 of link bandwidth (excluding any overhead)
+ */
+ const struct dc_link_settings *link_cap = dc_link_get_link_cap(link);
- lowest_dpia_index = get_lowest_dpia_index(link[0]);
+ if (link_cap) {
+ uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
+ (uint32_t)link_cap->lane_count *
+ LINK_RATE_REF_FREQ_IN_KHZ * 8;
+ link_dp_overhead = (link_bw_in_kbps / MST_TIME_SLOT_COUNT)
+ + ((link_bw_in_kbps % MST_TIME_SLOT_COUNT) ? 1 : 0);
+ }
+ }
- /* get total Host Router BW with granularity for the given modes */
- for (i = 0; i < num_dpias; ++i) {
- int granularity_Gbps = 0;
- int bw_granularity = 0;
+ return link_dp_overhead;
+}
- if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
- continue;
+/*
+ * Aggregates the DPIA bandwidth usage for the respective USB4 Router.
+ * And then validate if the required bandwidth is within the router's capacity.
+ *
+ * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set
+ * @count: number of DPIA validation sets
+ *
+ * return: true if validation is succeeded
+ */
+bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count)
+{
+ uint32_t granularity_Gbps;
+ const struct dc_link *link;
+ uint32_t link_bw_granularity;
+ uint32_t link_required_bw;
+ struct usb4_router_validation_set router_sets[MAX_HOST_ROUTERS_NUM] = { 0 };
+ uint8_t i;
+ bool is_success = true;
+ uint8_t router_count = 0;
+
+ if ((dpia_link_sets == NULL) || (count == 0))
+ return is_success;
+
+ // Iterate through each DP tunneling link (DPIA).
+ // Aggregate its bandwidth requirements onto the respective USB4 router.
+ for (i = 0; i < count; i++) {
+ link = dpia_link_sets[i].link;
+ link_required_bw = dpia_link_sets[i].required_bw;
+ const struct dc_tunnel_settings *dp_tunnel_settings = dpia_link_sets[i].tunnel_settings;
+
+ if ((link == NULL) || (dp_tunnel_settings == NULL) || dp_tunnel_settings->bw_granularity == 0)
+ break;
- if (link[i]->link_index < lowest_dpia_index)
- continue;
+ if (link->type == dc_connection_mst_branch)
+ link_required_bw += link_dpia_get_dp_overhead(link);
- granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity);
- bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps +
- ((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0);
+ granularity_Gbps = (Kbps_TO_Gbps / dp_tunnel_settings->bw_granularity);
+ link_bw_granularity = (link_required_bw / granularity_Gbps) * granularity_Gbps +
+ ((link_required_bw % granularity_Gbps) ? granularity_Gbps : 0);
- hr_index = (link[i]->link_index - lowest_dpia_index) / 2;
- bw_needed_per_hr[hr_index] += bw_granularity;
- }
+ // Find or add the USB4 router associated with the current DPIA link
+ for (uint8_t j = 0; j < MAX_HOST_ROUTERS_NUM; j++) {
+ if (router_sets[j].is_valid == false) {
+ router_sets[j].is_valid = true;
+ router_sets[j].cm_id = dp_tunnel_settings->cm_id;
+ router_count++;
+ }
- /* validate against each Host Router max BW */
- for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) {
- if (bw_needed_per_hr[hr_index]) {
- host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index);
- if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) {
- ret = false;
+ if (router_sets[j].cm_id == dp_tunnel_settings->cm_id) {
+ uint32_t remaining_bw =
+ dp_tunnel_settings->estimated_bw - dp_tunnel_settings->allocated_bw;
+
+ router_sets[j].allocated_bw += dp_tunnel_settings->allocated_bw;
+
+ if (remaining_bw > router_sets[j].remaining_bw)
+ router_sets[j].remaining_bw = remaining_bw;
+
+ // Get the max estimated BW within the same CM_ID
+ if (dp_tunnel_settings->estimated_bw > router_sets[j].estimated_bw)
+ router_sets[j].estimated_bw = dp_tunnel_settings->estimated_bw;
+
+ router_sets[j].required_bw += link_bw_granularity;
+ router_sets[j].dpia_count++;
break;
}
}
}
- return ret;
-}
+ // Validate bandwidth for each unique router found.
+ for (i = 0; i < router_count; i++) {
+ uint32_t total_bw = 0;
-int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link)
-{
- int dp_overhead = 0, link_mst_overhead = 0;
+ if (router_sets[i].is_valid == false)
+ break;
- if (!link_dp_is_bw_alloc_available(link))
- return dp_overhead;
+ // Determine the total available bandwidth for the current router based on aggregated data
+ if ((router_sets[i].dpia_count == 1) || (router_sets[i].allocated_bw == 0))
+ total_bw = router_sets[i].estimated_bw;
+ else
+ total_bw = router_sets[i].allocated_bw + router_sets[i].remaining_bw;
- /* if its mst link, add MTPH overhead */
- if ((link->type == dc_connection_mst_branch) &&
- !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
- /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
- * MST overhead is 1/64 of link bandwidth (excluding any overhead)
- */
- const struct dc_link_settings *link_cap =
- dc_link_get_link_cap(link);
- uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
- (uint32_t)link_cap->lane_count *
- LINK_RATE_REF_FREQ_IN_KHZ * 8;
- link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0);
+ if (router_sets[i].required_bw > total_bw) {
+ is_success = false;
+ break;
+ }
}
- /* add all the overheads */
- dp_overhead = link_mst_overhead;
-
- return dp_overhead;
+ return is_success;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index 801965b5f9a4..41efcb3e44e2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -28,10 +28,6 @@
#include "link.h"
-/* Number of Host Routers per motherboard is 2 */
-#define MAX_HR_NUM 2
-/* Number of DPIA per host router is 2 */
-#define MAX_DPIA_NUM (MAX_HR_NUM * 2)
/*
* Host Router BW type
@@ -42,6 +38,16 @@ enum bw_type {
HOST_ROUTER_BW_INVALID,
};
+struct usb4_router_validation_set {
+ bool is_valid;
+ uint8_t cm_id;
+ uint8_t dpia_count;
+ uint32_t required_bw;
+ uint32_t allocated_bw;
+ uint32_t estimated_bw;
+ uint32_t remaining_bw;
+};
+
/*
* Enable USB4 DP BW allocation mode
*
@@ -74,25 +80,13 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw);
/*
- * Handle the validation of total BW here and confirm that the bw used by each
- * DPIA doesn't exceed available BW for each host router (HR)
- *
- * @link[]: array of link pointer to all possible DPIA links
- * @bw_needed[]: bw needed for each DPIA link based on timing
- * @num_dpias: Number of DPIAs for the above 2 arrays. Should always be <= MAX_DPIA_NUM
- *
- * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
- */
-bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias);
-
-/*
* Obtain all the DP overheads in dp tunneling for the dpia link
*
* @link: pointer to the dc_link struct instance
*
* return: DP overheads in DP tunneling
*/
-int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
+uint32_t link_dpia_get_dp_overhead(const struct dc_link *link);
/*
* Handle DP BW allocation status register
@@ -104,4 +98,15 @@ int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
*/
void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status);
+/*
+ * Aggregates the DPIA bandwidth usage for the respective USB4 Router.
+ *
+ * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set
+ * @count: number of DPIA validation sets
+ *
+ * return: true if validation is succeeded
+ */
+bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count);
+
#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index da74c2b5854f..e7927b8f5ba3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -161,6 +161,9 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
+ if (link->is_dds && !link->dpcd_caps.panel_luminance_control)
+ return true;
+
// use internal backlight control if dmub capabilities are not present
if (link->backlight_control_type == BACKLIGHT_CONTROL_VESA_AUX &&
!link->dc->caps.dmub_caps.aux_backlight_support) {
@@ -173,6 +176,15 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
target_luminance = (struct target_luminance_value *)&backlight_millinits;
+ //make sure we disable AMD ABC first.
+ core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_enable, sizeof(uint8_t));
+ if (backlight_enable) {
+ backlight_enable = 0;
+ core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_enable, 1);
+ }
+
core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
&backlight_enable, sizeof(uint8_t));
@@ -193,10 +205,22 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
*(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
uint8_t backlight_control = isHDR ? 1 : 0;
+ uint8_t backlight_enable = 0;
+
// OLEDs have no PWM, they can only use AUX
if (link->dpcd_sink_ext_caps.bits.oled == 1)
backlight_control = 1;
+ //make sure we disable VESA ABC first.
+ core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &backlight_enable, sizeof(uint8_t));
+
+ if (backlight_enable & DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE) {
+ backlight_enable &= ~DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE;
+ core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &backlight_enable, sizeof(backlight_enable));
+ }
+
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
(uint8_t *)(&dpcd_backlight_set),
sizeof(dpcd_backlight_set)) != DC_OK)
@@ -222,6 +246,8 @@ bool edp_get_backlight_level_nits(struct dc_link *link,
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
+ if (link->is_dds)
+ return false;
if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
dpcd_backlight_get.raw,
sizeof(union dpcd_source_backlight_get)))
@@ -248,6 +274,8 @@ bool edp_backlight_enable_aux(struct dc_link *link, bool enable)
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
+ if (link->is_dds)
+ return true;
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE,
&backlight_enable, 1) != DC_OK)
return false;
@@ -916,7 +944,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
// TODO: Handle mux change case if force_static is set
// If force_static is set, just change the replay_allow_active state directly
if (replay != NULL && link->replay_settings.replay_feature_enabled)
- replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst);
+ replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst, link);
link->replay_settings.replay_allow_active = *allow_active;
}
@@ -1173,6 +1201,16 @@ int edp_get_target_backlight_pwm(const struct dc_link *link)
return (int) abm->funcs->get_target_backlight(abm);
}
+bool is_smartmux_suported(struct dc_link *link)
+{
+ if (link->dc->caps.is_apu)
+ return false;
+ if (!link->dc->config.smart_mux_version)
+ return false;
+
+ return true;
+}
+
static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link,
struct link_resource *link_res, bool enable)
{
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index bcfa6ac5d4e7..4a475d5b9dde 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -30,6 +30,7 @@
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool set_default_brightness_aux(struct dc_link *link);
+bool is_smartmux_suported(struct dc_link *link);
void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
int edp_get_backlight_level(const struct dc_link *link);
bool edp_get_backlight_level_nits(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/Makefile b/drivers/gpu/drm/amd/display/dc/mpc/Makefile
index 1e2e66508192..5402c3529f5e 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/mpc/Makefile
@@ -68,5 +68,5 @@ MPC_DCN401 = dcn401_mpc.o
AMD_DAL_MPC_DCN401 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn401/,$(MPC_DCN401))
AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN401)
-endif
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
index b4cea2b8cb2a..6f0e017a8ae2 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
@@ -30,7 +30,6 @@
#include "basics/conversion.h"
#include "dcn10/dcn10_cm_common.h"
#include "dc.h"
-#include "dcn401/dcn401_mpc.h"
#define REG(reg)\
mpc30->mpc_regs->reg
@@ -879,7 +878,7 @@ void mpc32_set3dlut_ram10(
}
-static void mpc32_set_3dlut_mode(
+void mpc32_set_3dlut_mode(
struct mpc *mpc,
enum dc_lut_mode mode,
bool is_color_channel_12bits,
@@ -1022,8 +1021,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
.set_bg_color = mpc1_set_bg_color,
- .set_movable_cm_location = mpc401_set_movable_cm_location,
- .populate_lut = mpc401_populate_lut,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h
index 9622518826c9..8c9b20bcca85 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h
@@ -391,4 +391,12 @@ void mpc32_select_3dlut_ram(
enum dc_lut_mode mode,
bool is_color_channel_12bits,
uint32_t mpcc_id);
+
+void mpc32_set_3dlut_mode(
+ struct mpc *mpc,
+ enum dc_lut_mode mode,
+ bool is_color_channel_12bits,
+ bool is_lut_size17x17x17,
+ uint32_t mpcc_id);
+
#endif //__DC_MPCC_DCN32_H__
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index 98cf0cbd59ba..f3fb3fe13757 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -294,7 +294,7 @@ void mpc401_program_3dlut_size(struct mpc *mpc, bool is_17x17x17, int mpcc_id)
REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, is_17x17x17 ? 0 : 1);
}
-static void program_gamut_remap(
+void mpc_program_gamut_remap(
struct mpc *mpc,
unsigned int mpcc_id,
const uint16_t *regval,
@@ -426,7 +426,7 @@ void mpc401_set_gamut_remap(
if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) {
/* Bypass / Disable if type is bypass or hw */
- program_gamut_remap(mpc, mpcc_id, NULL,
+ mpc_program_gamut_remap(mpc, mpcc_id, NULL,
adjust->mpcc_gamut_remap_block_id, MPCC_GAMUT_REMAP_MODE_SELECT_0);
} else {
struct fixed31_32 arr_matrix[12];
@@ -460,12 +460,12 @@ void mpc401_set_gamut_remap(
else
mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_2;
- program_gamut_remap(mpc, mpcc_id, arr_reg_val,
+ mpc_program_gamut_remap(mpc, mpcc_id, arr_reg_val,
adjust->mpcc_gamut_remap_block_id, mode_select);
}
}
-static void read_gamut_remap(struct mpc *mpc,
+void mpc_read_gamut_remap(struct mpc *mpc,
int mpcc_id,
uint16_t *regval,
enum mpcc_gamut_remap_id gamut_remap_block_id,
@@ -561,9 +561,9 @@ void mpc401_get_gamut_remap(struct mpc *mpc,
struct mpc_grph_gamut_adjustment *adjust)
{
uint16_t arr_reg_val[12] = {0};
- uint32_t mode_select;
+ uint32_t mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_0;
- read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select);
+ mpc_read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select);
if (mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) {
adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index 8e35ebc603a9..eb0c68d0b0c7 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
@@ -241,6 +241,19 @@ void mpc401_update_3dlut_fast_load_select(
int mpcc_id,
int hubp_idx);
+void mpc_program_gamut_remap(
+ struct mpc *mpc,
+ unsigned int mpcc_id,
+ const uint16_t *regval,
+ enum mpcc_gamut_remap_id gamut_remap_block_id,
+ enum mpcc_gamut_remap_mode_select mode_select);
+
+void mpc_read_gamut_remap(struct mpc *mpc,
+ int mpcc_id,
+ uint16_t *regval,
+ enum mpcc_gamut_remap_id gamut_remap_block_id,
+ uint32_t *mode_select);
+
void mpc401_update_3dlut_fast_load_select(
struct mpc *mpc,
int mpcc_id,
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index f2ba76c1e0c0..782316348941 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -31,6 +31,7 @@
#include <linux/kgdb.h>
#include <linux/delay.h>
#include <linux/mm.h>
+#include <linux/vmalloc.h>
#include <asm/byteorder.h>
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index 84f73fdb0f95..3a51be63f020 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -839,7 +839,7 @@ static enum dc_status build_mapped_resource(
static enum dc_status dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i;
bool at_least_one_pipe = false;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index f3d5baac11bf..cccde5a6f3cd 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -963,7 +963,7 @@ static enum dc_status build_mapped_resource(
static enum dc_status dce110_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool result = false;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 4225cae68c10..164ba796f64c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -886,7 +886,7 @@ static enum dc_status build_mapped_resource(
enum dc_status dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool result = false;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
index 6221d749246d..3efc4c55d2d2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
@@ -45,7 +45,7 @@ enum dc_status dce112_validate_with_context(
enum dc_status dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
enum dc_status dce112_add_stream_to_ctx(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
index d9ffdded5ce1..58b59d52dc9d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
@@ -866,7 +866,7 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool)
static enum dc_status dce60_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i;
bool at_least_one_pipe = false;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index bd5811f97531..3e8b0ac11d90 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -872,7 +872,7 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool)
static enum dc_status dce80_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i;
bool at_least_one_pipe = false;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index be4ade0853e9..652c05c35494 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -1129,12 +1129,12 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool)
static enum dc_status dcn10_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool voltage_supported;
DC_FP_START();
- voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate);
+ voltage_supported = dcn_validate_bandwidth(dc, context, validate_mode);
DC_FP_END();
return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index 3405be07f5e3..f9cbdad3ef37 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -2007,7 +2007,7 @@ bool dcn20_fast_validate_bw(
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
int split[MAX_PIPES] = { 0 };
@@ -2021,7 +2021,7 @@ bool dcn20_fast_validate_bw(
dcn20_merge_pipes_for_validate(dc, context);
DC_FP_START();
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
DC_FP_END();
*pipe_cnt_out = pipe_cnt;
@@ -2125,7 +2125,7 @@ validate_out:
}
enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool voltage_supported;
display_e2e_pipe_params_st *pipes;
@@ -2135,7 +2135,7 @@ enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
return DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
- voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes);
+ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, validate_mode, pipes);
DC_FP_END();
kfree(pipes);
@@ -2736,6 +2736,8 @@ static bool dcn20_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 2;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
index c0e062c7407d..e997d35a8b86 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
@@ -119,7 +119,7 @@ void dcn20_set_mcif_arb_params(
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
-enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
+enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode);
void dcn20_merge_pipes_for_validate(
struct dc *dc,
struct dc_state *context);
@@ -158,7 +158,7 @@ bool dcn20_fast_validate_bw(
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream);
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index 43fa2cb117f3..e4a1338d21e0 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -1285,6 +1285,8 @@ static bool dcn201_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 2;
+
dc->cap_funcs = cap_funcs;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 9ab01b65b177..918742a42ded 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -769,7 +769,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
int split[MAX_PIPES] = { 0 };
@@ -783,7 +783,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
dcn20_merge_pipes_for_validate(dc, context);
DC_FP_START();
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
DC_FP_END();
*pipe_cnt_out = pipe_cnt;
@@ -924,7 +924,7 @@ validate_out:
* dcn20_validate_bandwidth in dcn20_resource.c.
*/
static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool voltage_supported;
display_e2e_pipe_params_st *pipes;
@@ -934,7 +934,7 @@ static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *c
return DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
- voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes);
+ voltage_supported = dcn21_validate_bandwidth_fp(dc, context, validate_mode, pipes);
DC_FP_END();
kfree(pipes);
@@ -1684,6 +1684,8 @@ static bool dcn21_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 2;
+
dc->cap_funcs = cap_funcs;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
index f7ecc002c2f7..a017fd9854d1 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
@@ -51,6 +51,6 @@ bool dcn21_fast_validate_bw(
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
#endif /* _DCN21_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index f631ae34e320..895349d9ca07 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -1319,13 +1319,13 @@ static struct clock_source *dcn30_clock_source_create(
int dcn30_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
DC_FP_START();
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1627,7 +1627,7 @@ noinline bool dcn30_internal_validate_bw(
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate,
+ enum dc_validate_mode validate_mode,
bool allow_self_refresh_only)
{
bool out = false;
@@ -1646,7 +1646,7 @@ noinline bool dcn30_internal_validate_bw(
context->bw_ctx.dml.vba.VoltageLevel = 0;
context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
if (!pipe_cnt) {
out = true;
@@ -1655,7 +1655,7 @@ noinline bool dcn30_internal_validate_bw(
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
- if (!fast_validate || !allow_self_refresh_only) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING || !allow_self_refresh_only) {
/*
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
@@ -1669,7 +1669,7 @@ noinline bool dcn30_internal_validate_bw(
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
}
if (allow_self_refresh_only &&
- (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
+ (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING || vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) {
/*
* If mode is unsupported or there's still no p-state support
@@ -1678,7 +1678,7 @@ noinline bool dcn30_internal_validate_bw(
* We don't actually support prefetch mode 2, so require that we
* at least support prefetch mode 1.
*/
- context->bw_ctx.dml.validate_max_state = fast_validate;
+ context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING);
context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
dm_allow_self_refresh;
@@ -1865,7 +1865,7 @@ noinline bool dcn30_internal_validate_bw(
}
if (repopulate_pipes)
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
context->bw_ctx.dml.vba.VoltageLevel = vlevel;
*vlevel_out = vlevel;
*pipe_cnt_out = pipe_cnt;
@@ -2037,7 +2037,7 @@ void dcn30_calculate_wm_and_dlg(
enum dc_status dcn30_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -2055,7 +2055,7 @@ enum dc_status dcn30_validate_bandwidth(struct dc *dc,
goto validate_fail;
DC_FP_START();
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true);
DC_FP_END();
if (pipe_cnt == 0)
@@ -2066,7 +2066,7 @@ enum dc_status dcn30_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -2586,6 +2586,8 @@ static bool dcn30_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
index 689d9bdace81..2c967fe55712 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
@@ -57,14 +57,14 @@ unsigned int dcn30_calc_max_scaled_time(
unsigned int urgent_watermark);
enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dcn30_internal_validate_bw(
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate,
+ enum dc_validate_mode validate_mode,
bool allow_self_refresh_only);
void dcn30_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
@@ -78,7 +78,7 @@ void dcn30_populate_dml_writeback_from_context(
int dcn30_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dcn30_acquire_post_bldn_3dlut(
struct resource_context *res_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index 121a86a59833..82a205a7c25c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -1706,6 +1706,8 @@ static bool dcn301_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 012c5fd52cb1..3345068a878c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -1481,6 +1481,8 @@ static bool dcn302_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index a8d0b4686f9a..3479e1eab4cd 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -1414,6 +1414,8 @@ static bool dcn303_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 51ca0b2959fc..3ed7f50554e2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1616,14 +1616,14 @@ static bool is_dual_plane(enum surface_pixel_format format)
int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
uint32_t pipe_cnt;
int i;
dc_assert_fp_enabled();
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
for (i = 0; i < pipe_cnt; i++) {
pipes[i].pipe.src.gpuvm = 1;
@@ -1641,7 +1641,7 @@ int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
int dcn31_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1649,7 +1649,7 @@ int dcn31_populate_dml_pipes_from_context(
bool upscaled = false;
DC_FP_START();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1760,7 +1760,7 @@ dcn31_set_mcif_arb_params(struct dc *dc,
enum dc_status dcn31_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -1778,19 +1778,19 @@ enum dc_status dcn31_validate_bandwidth(struct dc *dc,
goto validate_fail;
DC_FP_START();
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true);
DC_FP_END();
- // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
+ // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg
if (pipe_cnt == 0)
- fast_validate = false;
+ validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING;
if (!out)
goto validate_fail;
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -1850,7 +1850,9 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn31_get_panel_config_defaults,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static struct clock_source *dcn30_clock_source_create(
@@ -2202,6 +2204,8 @@ static bool dcn31_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp;
@@ -2231,3 +2235,35 @@ struct resource_pool *dcn31_create_resource_pool(
kfree(pool);
return NULL;
}
+
+enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint8_t pipe_count,
+ struct pipe_ctx *pipes,
+ struct audio_output *audio_output)
+{
+ struct dc_state *state = link->dc->current_state;
+ int i;
+
+#if defined(CONFIG_DRM_AMD_DC_FP)
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link)
+ link->dc->hwss.calculate_pix_rate_divider((struct dc *)link->dc, state, state->streams[i]);
+
+ for (i = 0; i < pipe_count; i++) {
+ link->dc->res_pool->funcs->build_pipe_pix_clk_params(&pipes[i]);
+
+ // Setup audio
+ if (pipes[i].stream_res.audio != NULL)
+ build_audio_output(state, &pipes[i], &audio_output[i]);
+ }
+#else
+ /* This DCN requires rate divider updates and audio reprogramming to allow DP1<-->DP2 link rate switching,
+ * but the above will not compile on architectures without an FPU.
+ */
+ DC_LOG_WARNING("%s: DP1<-->DP2 link retraining will not work on this DCN on non-FPU platforms", __func__);
+ ASSERT(0);
+#endif
+
+ return DC_OK;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
index dd82815d7efe..c32c85ef0ba4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
@@ -39,7 +39,7 @@ struct dcn31_resource_pool {
enum dc_status dcn31_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn31_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -48,7 +48,7 @@ void dcn31_calculate_wm_and_dlg(
int dcn31_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void
dcn31_populate_dml_writeback_from_context(struct dc *dc,
struct resource_context *res_ctx,
@@ -66,6 +66,12 @@ struct resource_pool *dcn31_create_resource_pool(
unsigned int dcn31_get_det_buffer_size(
const struct dc_state *context);
+enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint8_t pipe_count,
+ struct pipe_ctx *pipes,
+ struct audio_output *audio_output);
+
/*temp: B0 specific before switch to dcn313 headers*/
#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 8383e2e59be5..de708fdc1e80 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -1667,12 +1667,12 @@ static struct clock_source *dcn31_clock_source_create(
static int dcn314_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt;
DC_FP_START();
- pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate);
+ pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
DC_FP_END();
return pipe_cnt;
@@ -1696,7 +1696,7 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi
enum dc_status dcn314_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -1715,19 +1715,19 @@ enum dc_status dcn314_validate_bandwidth(struct dc *dc,
DC_FP_START();
// do not support self refresh only
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, false);
DC_FP_END();
- // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
+ // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg
if (pipe_cnt == 0)
- fast_validate = false;
+ validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING;
if (!out)
goto validate_fail;
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -1779,7 +1779,9 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.get_panel_config_defaults = dcn314_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static struct clock_source *dcn30_clock_source_create(
@@ -2117,6 +2119,8 @@ static bool dcn314_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_14_ip.max_num_dpp;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
index f8ba531d6342..ac9bb7f097d5 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
@@ -41,7 +41,7 @@ struct dcn314_resource_pool {
enum dc_status dcn314_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
struct resource_pool *dcn314_create_resource_pool(
const struct dc_init_data *init_data,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 6c2bb3f63be1..82cc78c291d8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1664,7 +1664,7 @@ static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context)
static int dcn315_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt, crb_idx, crb_pipes;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1674,7 +1674,7 @@ static int dcn315_populate_dml_pipes_from_context(
bool pixel_rate_crb = allow_pixel_rate_crb(dc, context);
DC_FP_START();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1844,7 +1844,9 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.get_panel_config_defaults = dcn315_get_panel_config_defaults,
.get_power_profile = dcn315_get_power_profile,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn315_resource_construct(
@@ -2140,6 +2142,8 @@ static bool dcn315_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_15_ip.max_num_dpp;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index 568094827212..636110e48d01 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -1610,7 +1610,7 @@ static bool is_dual_plane(enum surface_pixel_format format)
static int dcn316_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1618,7 +1618,7 @@ static int dcn316_populate_dml_pipes_from_context(
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB;
DC_FP_START();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1720,7 +1720,9 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn316_get_panel_config_defaults,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn316_resource_construct(
@@ -2008,6 +2010,8 @@ static bool dcn316_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_16_ip.max_num_dpp;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index bb0dae0be5b8..9917b366f00c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1742,7 +1742,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context,
}
}
-static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_validate)
+static bool dml1_validate(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -1767,7 +1767,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
goto validate_fail;
DC_FP_START();
- out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode);
DC_FP_END();
if (pipe_cnt == 0)
@@ -1778,7 +1778,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
@@ -1809,7 +1809,7 @@ validate_out:
enum dc_status dcn32_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
unsigned int i;
enum dc_status status;
@@ -1827,11 +1827,11 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc,
if (dc->debug.using_dml2)
status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
else
- status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
- if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) {
/* check new stream configuration still supports cursor if subvp used */
for (i = 0; i < context->stream_count; i++) {
stream = context->streams[i];
@@ -1846,14 +1846,14 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc,
};
}
- if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) {
/* attempt to validate again with subvp disabled due to cursor */
if (dc->debug.using_dml2)
status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
else
- status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
return status;
@@ -1862,7 +1862,7 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc,
int dcn32_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -1878,7 +1878,7 @@ int dcn32_populate_dml_pipes_from_context(
int num_subvp_none = 0;
int odm_slice_count;
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
/* For single display subvp, look for subvp main so if we have phantom
* pipe, we can set odm policy to match main pipe
@@ -1960,7 +1960,7 @@ int dcn32_populate_dml_pipes_from_context(
/* Only populate DML input with subvp info for full updates.
* This is just a workaround -- needs a proper fix.
*/
- if (!fast_validate) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
switch (dc_state_get_pipe_subvp_type(context, pipe)) {
case SUBVP_MAIN:
pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport;
@@ -2061,21 +2061,15 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context,
static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-
DC_FP_START();
dcn32_update_bw_bounding_box_fpu(dc, bw_params);
- dml2_opt->use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
- dml2_opt->use_clock_dc_limits = true;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+ dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source);
DC_FP_END();
}
@@ -2257,7 +2251,7 @@ static bool dcn32_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
// no OGAM ROM on DCN2 and later ASICs
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
@@ -2276,6 +2270,7 @@ static bool dcn32_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
@@ -2505,6 +2500,8 @@ static bool dcn32_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
@@ -2519,7 +2516,6 @@ static bool dcn32_resource_construct(
}
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = false;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
@@ -2551,6 +2547,10 @@ static bool dcn32_resource_construct(
if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0))
dc->config.sdpif_request_limit_words_per_umc = 16;
+ /* init DC limited DML2 options */
+ memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options));
+ dc->dml2_dc_power_options.use_clock_dc_limits = true;
+
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index d60ed77eda80..82f966cf4ed2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -100,12 +100,12 @@ void dcn32_add_phantom_pipes(struct dc *dc,
enum dc_status dcn32_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
int dcn32_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn32_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 7db1f7a5613f..061c0907d802 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1580,21 +1580,15 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-
DC_FP_START();
dcn321_update_bw_bounding_box_fpu(dc, bw_params);
- dml2_opt->use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
- dml2_opt->use_clock_dc_limits = true;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+ dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source);
DC_FP_END();
}
@@ -1761,8 +1755,8 @@ static bool dcn321_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
- dc->caps.color.dpp.ogam_ram = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
+ dc->caps.color.dpp.ogam_ram = 0;
// no OGAM ROM on DCN2 and later ASICs
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
@@ -1780,6 +1774,7 @@ static bool dcn321_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
@@ -2004,6 +1999,8 @@ static bool dcn321_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
@@ -2018,7 +2015,6 @@ static bool dcn321_resource_construct(
}
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = false;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
@@ -2046,6 +2042,10 @@ static bool dcn321_resource_construct(
dc->dml2_options.max_segments_per_hubp = 18;
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;
+ /* init DC limited DML2 options */
+ memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options));
+ dc->dml2_dc_power_options.use_clock_dc_limits = true;
+
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index e01aa2f2e13e..8475c6eec547 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -1734,15 +1734,15 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
out = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ validate_mode);
- if (fast_validate)
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
@@ -1786,7 +1786,9 @@ static struct resource_funcs dcn35_res_pool_funcs = {
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn35_resource_construct(
@@ -1874,7 +1876,7 @@ static bool dcn35_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
// no OGAM ROM on DCN301
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
@@ -1893,6 +1895,10 @@ static bool dcn35_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
+
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
dc->caps.num_of_host_routers = 2;
dc->caps.num_of_dpias_per_host_router = 2;
@@ -2151,12 +2157,13 @@ static bool dcn35_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 4ebe4e00a4f8..0971c0f74186 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -1714,15 +1714,15 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
static enum dc_status dcn351_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
out = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ validate_mode);
- if (fast_validate)
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
@@ -1758,7 +1758,9 @@ static struct resource_funcs dcn351_res_pool_funcs = {
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn351_resource_construct(
@@ -1846,7 +1848,7 @@ static bool dcn351_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
// no OGAM ROM on DCN301
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
@@ -1865,6 +1867,10 @@ static bool dcn351_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
+
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
dc->caps.num_of_host_routers = 2;
dc->caps.num_of_dpias_per_host_router = 2;
@@ -2122,13 +2128,14 @@ static bool dcn351_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
index db36b8f9ce65..8bae7fcedc22 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -1715,15 +1715,15 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
out = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ validate_mode);
- if (fast_validate)
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
@@ -1759,7 +1759,9 @@ static struct resource_funcs dcn36_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn36_get_preferred_eng_id_dpia,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch,
+ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params
};
static bool dcn36_resource_construct(
@@ -1847,7 +1849,7 @@ static bool dcn36_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
// no OGAM ROM on DCN301
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
@@ -1866,6 +1868,10 @@ static bool dcn36_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
+
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
dc->caps.num_of_host_routers = 2;
dc->caps.num_of_dpias_per_host_router = 2;
@@ -2124,12 +2130,13 @@ static bool dcn36_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index f420c4dafa03..b3988e38d0a6 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -70,7 +70,6 @@
#include "dml/dcn30/display_mode_vba_30.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
-#include "dml/dcn401/dcn401_fpu.h"
#include "dc_state_priv.h"
@@ -1608,10 +1607,6 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-
/* re-calculate the available MALL size if required */
if (bw_params->num_channels > 0) {
dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall(
@@ -1622,15 +1617,11 @@ static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
DC_FP_START();
- dcn401_update_bw_bounding_box_fpu(dc, bw_params);
-
- dml2_opt->use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
- dml2_opt->use_clock_dc_limits = true;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
- dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+ dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source);
DC_FP_END();
}
@@ -1644,7 +1635,7 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta
enum dc_status dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
unsigned int i;
enum dc_status status = DC_OK;
@@ -1662,9 +1653,9 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc,
if (dc->debug.using_dml2)
status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
- if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) {
/* check new stream configuration still supports cursor if subvp used */
for (i = 0; i < context->stream_count; i++) {
stream = context->streams[i];
@@ -1679,12 +1670,12 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc,
};
}
- if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) {
/* attempt to validate again with subvp disabled due to cursor */
if (dc->debug.using_dml2)
status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
return status;
@@ -1957,8 +1948,30 @@ static bool dcn401_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.color.mpc.preblend = true;
dc->config.use_spl = true;
dc->config.prefer_easf = true;
+
+ dc->config.dcn_sharpness_range.sdr_rgb_min = 0;
+ dc->config.dcn_sharpness_range.sdr_rgb_max = 1750;
+ dc->config.dcn_sharpness_range.sdr_rgb_mid = 750;
+ dc->config.dcn_sharpness_range.sdr_yuv_min = 0;
+ dc->config.dcn_sharpness_range.sdr_yuv_max = 3500;
+ dc->config.dcn_sharpness_range.sdr_yuv_mid = 1500;
+ dc->config.dcn_sharpness_range.hdr_rgb_min = 0;
+ dc->config.dcn_sharpness_range.hdr_rgb_max = 2750;
+ dc->config.dcn_sharpness_range.hdr_rgb_mid = 1500;
+
+ dc->config.dcn_override_sharpness_range.sdr_rgb_min = 0;
+ dc->config.dcn_override_sharpness_range.sdr_rgb_max = 3250;
+ dc->config.dcn_override_sharpness_range.sdr_rgb_mid = 1250;
+ dc->config.dcn_override_sharpness_range.sdr_yuv_min = 0;
+ dc->config.dcn_override_sharpness_range.sdr_yuv_max = 3500;
+ dc->config.dcn_override_sharpness_range.sdr_yuv_mid = 1500;
+ dc->config.dcn_override_sharpness_range.hdr_rgb_min = 0;
+ dc->config.dcn_override_sharpness_range.hdr_rgb_max = 2750;
+ dc->config.dcn_override_sharpness_range.hdr_rgb_mid = 1500;
+
dc->config.dc_mode_clk_limit_support = true;
dc->config.enable_windowed_mpo_odm = true;
dc->config.set_pipe_unlock_order = true; /* Need to ensure DET gets freed before allocating */
@@ -2177,6 +2190,8 @@ static bool dcn401_resource_construct(
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
+ dc->caps.max_odm_combine_factor = 4;
+
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
@@ -2195,7 +2210,6 @@ static bool dcn401_resource_construct(
dc->config.sdpif_request_limit_words_per_umc = 16;
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
- dc->dml2_options.use_native_pstate_optimization = false;
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.map_dc_pipes_with_callbacks = true;
@@ -2228,6 +2242,10 @@ static bool dcn401_resource_construct(
/* SPL */
dc->caps.scl_caps.sharpener_support = true;
+ /* init DC limited DML2 options */
+ memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options));
+ dc->dml2_dc_power_options.use_clock_dc_limits = true;
+
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index dc52a30991af..2ae6831c31ef 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -24,7 +24,7 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta
enum dc_status dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
index e0008c5f08ad..55b929ca7982 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
@@ -196,7 +196,12 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1;
struct spl_rect mpc_rec;
- if (use_recout_width_aligned) {
+ if (spl_in->basic_in.custom_width != 0) {
+ mpc_rec.width = spl_in->basic_in.custom_width;
+ mpc_rec.x = spl_in->basic_in.custom_x;
+ mpc_rec.height = plane_clip_rec->height;
+ mpc_rec.y = plane_clip_rec->y;
+ } else if (use_recout_width_aligned) {
mpc_rec.width = recout_width_align;
if ((mpc_rec.width * (mpc_slice_idx + 1)) > plane_clip_rec->width) {
mpc_rec.width = plane_clip_rec->width % recout_width_align;
@@ -219,7 +224,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
/* extra pixels in the division remainder need to go to pipes after
* the extra pixel index minus one(epimo) defined here as:
*/
- if (mpc_slice_idx > epimo) {
+ if (mpc_slice_idx > epimo && spl_in->basic_in.custom_width == 0) {
mpc_rec.x += mpc_slice_idx - epimo - 1;
mpc_rec.width += 1;
}
@@ -252,10 +257,10 @@ static struct spl_rect calculate_odm_slice_in_timing_active(struct spl_in *spl_i
odm_rec.x = odm_slice_width * odm_slice_idx;
odm_rec.width = is_last_odm_slice ?
- /* last slice width is the reminder of h_active */
- h_active - odm_slice_width * (odm_slice_count - 1) :
- /* odm slice width is the floor of h_active / count */
- odm_slice_width;
+ /* last slice width is the reminder of h_active */
+ h_active - odm_slice_width * (odm_slice_count - 1) :
+ /* odm slice width is the floor of h_active / count */
+ odm_slice_width;
odm_rec.y = 0;
odm_rec.height = v_active;
@@ -884,7 +889,9 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
/* Calculate number of tap with adaptive scaling off */
static void spl_get_taps_non_adaptive_scaler(
- struct spl_scratch *spl_scratch, const struct spl_taps *in_taps, bool always_scale)
+ struct spl_scratch *spl_scratch,
+ const struct spl_taps *in_taps,
+ bool is_subsampled)
{
bool check_max_downscale = false;
@@ -945,14 +952,15 @@ static void spl_get_taps_non_adaptive_scaler(
SPL_ASSERT(check_max_downscale);
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz) && !always_scale)
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz))
spl_scratch->scl_data.taps.h_taps = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert) && !always_scale)
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))
spl_scratch->scl_data.taps.v_taps = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !always_scale)
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled)
spl_scratch->scl_data.taps.h_taps_c = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !always_scale)
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !is_subsampled)
spl_scratch->scl_data.taps.v_taps_c = 1;
+
}
/* Calculate optimal number of taps */
@@ -965,15 +973,13 @@ static bool spl_get_optimal_number_of_taps(
unsigned int max_taps_y, max_taps_c;
unsigned int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
- bool skip_easf = false;
- bool always_scale = spl_in->basic_out.always_scale;
+ bool skip_easf = false;
bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format);
-
if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active &&
max_downscale_src_width != 0 &&
spl_scratch->scl_data.viewport.width > max_downscale_src_width) {
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled);
*enable_easf_v = false;
*enable_easf_h = false;
*enable_isharp = false;
@@ -982,7 +988,7 @@ static bool spl_get_optimal_number_of_taps(
/* Disable adaptive scaler and sharpener when integer scaling is enabled */
if (spl_in->scaling_quality.integer_scaling) {
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled);
*enable_easf_v = false;
*enable_easf_h = false;
*enable_isharp = false;
@@ -997,8 +1003,9 @@ static bool spl_get_optimal_number_of_taps(
* From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
* taps = 4 for upscaling
*/
- if (skip_easf)
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale);
+ if (skip_easf) {
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled);
+ }
else {
if (spl_is_video_format(spl_in->basic_in.format)) {
spl_scratch->scl_data.taps.h_taps = 6;
@@ -1124,7 +1131,6 @@ static bool spl_get_optimal_number_of_taps(
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))) {
spl_scratch->scl_data.taps.h_taps = 1;
spl_scratch->scl_data.taps.v_taps = 1;
-
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled)
spl_scratch->scl_data.taps.h_taps_c = 1;
@@ -1149,6 +1155,7 @@ static bool spl_get_optimal_number_of_taps(
if ((!*enable_easf_v) && !is_subsampled &&
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)))
spl_scratch->scl_data.taps.v_taps_c = 1;
+
}
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
index 36a284305a70..23d254dea18f 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
@@ -460,6 +460,8 @@ struct basic_in {
enum spl_color_space color_space; // Color Space
unsigned int max_luminance; // Max Luminance TODO: Is determined in dc_hw_sequencer.c is_sdr
bool film_grain_applied; // Film Grain Applied // TODO: To check from where to get this?
+ int custom_width; // Width for non-standard segmentation - used when != 0
+ int custom_x; // Start x for non-standard segmentation - used when custom_width != 0
};
// Basic output information
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 3f3fa1b6a69e..0bafb6710761 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -129,7 +129,9 @@ enum dmub_window_id {
DMUB_WINDOW_5_TRACEBUFF,
DMUB_WINDOW_6_FW_STATE,
DMUB_WINDOW_7_SCRATCH_MEM,
+ DMUB_WINDOW_IB_MEM,
DMUB_WINDOW_SHARED_STATE,
+ DMUB_WINDOW_LSDMA_BUFFER,
DMUB_WINDOW_TOTAL,
};
@@ -355,6 +357,7 @@ struct dmub_diagnostic_data {
uint8_t is_traceport_en : 1;
uint8_t is_cw0_enabled : 1;
uint8_t is_cw6_enabled : 1;
+ uint8_t is_pwait : 1;
};
struct dmub_srv_inbox {
@@ -539,6 +542,7 @@ struct dmub_srv {
uint32_t fw_version;
bool is_virtual;
struct dmub_fb scratch_mem_fb;
+ struct dmub_fb ib_mem_gart;
volatile struct dmub_shared_state_feature_block *shared_state;
volatile const struct dmub_fw_state *fw_state;
@@ -576,6 +580,7 @@ struct dmub_srv {
enum dmub_srv_power_state_type power_state;
struct dmub_diagnostic_data debug;
+ struct dmub_fb lsdma_rb_fb;
};
/**
@@ -602,14 +607,6 @@ struct dmub_notification {
};
};
-/* enum dmub_ips_mode - IPS mode identifier */
-enum dmub_ips_mode {
- DMUB_IPS_MODE_IPS1_MAX = 0,
- DMUB_IPS_MODE_IPS2,
- DMUB_IPS_MODE_IPS1_RCG,
- DMUB_IPS_MODE_IPS1_ONO2_ON
-};
-
/**
* DMUB firmware version helper macro - useful for checking if the version
* of a firmware to know if feature or functionality is supported or present.
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index b66bd10cdc9b..c587b3441e07 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -104,6 +104,14 @@
*/
#define DMUB_MAX_FPO_STREAMS 4
+/* Define to ensure that the "common" members always appear in the same
+ * order in different structs for back compat purposes
+ */
+#define COMMON_STREAM_STATIC_SUB_STATE \
+ struct dmub_fams2_cmd_legacy_stream_static_state legacy; \
+ struct dmub_fams2_cmd_subvp_stream_static_state subvp; \
+ struct dmub_fams2_cmd_drr_stream_static_state drr;
+
/* Maximum number of streams on any ASIC. */
#define DMUB_MAX_STREAMS 6
@@ -291,6 +299,31 @@ union dmub_addr {
} u; /*<< Low/high bit access */
uint64_t quad_part; /*<< 64 bit address */
};
+
+/* Flattened structure containing SOC BB parameters stored in the VBIOS
+ * It is not practical to store the entire bounding box in VBIOS since the bounding box struct can gain new parameters.
+ * This also prevents alighment issues when new parameters are added to the SoC BB.
+ * The following parameters should be added since these values can't be obtained elsewhere:
+ * -dml2_soc_power_management_parameters
+ * -dml2_soc_vmin_clock_limits
+ */
+struct dmub_soc_bb_params {
+ uint32_t dram_clk_change_blackout_ns;
+ uint32_t dram_clk_change_read_only_ns;
+ uint32_t dram_clk_change_write_only_ns;
+ uint32_t fclk_change_blackout_ns;
+ uint32_t g7_ppt_blackout_ns;
+ uint32_t stutter_enter_plus_exit_latency_ns;
+ uint32_t stutter_exit_latency_ns;
+ uint32_t z8_stutter_enter_plus_exit_latency_ns;
+ uint32_t z8_stutter_exit_latency_ns;
+ uint32_t z8_min_idle_time_ns;
+ uint32_t type_b_dram_clk_change_blackout_ns;
+ uint32_t type_b_ppt_blackout_ns;
+ uint32_t vmin_limit_dispclk_khz;
+ uint32_t vmin_limit_dcfclk_khz;
+ uint32_t g7_temperature_read_blackout_ns;
+};
#pragma pack(pop)
/**
@@ -757,11 +790,29 @@ enum dmub_ips_rcg_disable_type {
DMUB_IPS_RCG_DISABLE = 3
};
+enum dmub_ips_in_vpb_disable_type {
+ DMUB_IPS_VPB_RCG_ONLY = 0, // Legacy behaviour
+ DMUB_IPS_VPB_DISABLE_ALL = 1,
+ DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG = 2,
+ DMUB_IPS_VPB_ENABLE_ALL = 3 // Enable IPS1 Z8, IPS1 and RCG
+};
+
#define DMUB_IPS1_ALLOW_MASK 0x00000001
#define DMUB_IPS2_ALLOW_MASK 0x00000002
#define DMUB_IPS1_COMMIT_MASK 0x00000004
#define DMUB_IPS2_COMMIT_MASK 0x00000008
+enum dmub_ips_comand_type {
+ /**
+ * Start/stop IPS residency measurements for a given IPS mode
+ */
+ DMUB_CMD__IPS_RESIDENCY_CNTL = 0,
+ /**
+ * Query IPS residency information for a given IPS mode
+ */
+ DMUB_CMD__IPS_QUERY_RESIDENCY_INFO = 1,
+};
+
/**
* union dmub_fw_boot_options - Boot option definitions for SCRATCH14
*/
@@ -831,7 +882,7 @@ enum dmub_shared_state_feature_id {
/**
* struct dmub_shared_state_ips_fw - Firmware signals for IPS.
*/
-union dmub_shared_state_ips_fw_signals {
+ union dmub_shared_state_ips_fw_signals {
struct {
uint32_t ips1_commit : 1; /**< 1 if in IPS1 or IPS0 RCG */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
@@ -846,7 +897,7 @@ union dmub_shared_state_ips_fw_signals {
/**
* struct dmub_shared_state_ips_signals - Firmware signals for IPS.
*/
-union dmub_shared_state_ips_driver_signals {
+ union dmub_shared_state_ips_driver_signals {
struct {
uint32_t allow_pg : 1; /**< 1 if PG is allowed */
uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
@@ -856,7 +907,9 @@ union dmub_shared_state_ips_driver_signals {
uint32_t allow_ips0_rcg : 1; /**< 1 is IPS0 RCG is allowed */
uint32_t allow_ips1_rcg : 1; /**< 1 is IPS1 RCG is allowed */
uint32_t allow_ips1z8 : 1; /**< 1 is IPS1 Z8 Retention is allowed */
- uint32_t reserved_bits : 24; /**< Reversed bits */
+ uint32_t allow_dynamic_ips1 : 1; /**< 1 if IPS1 is allowed in dynamic use cases such as VPB */
+ uint32_t allow_dynamic_ips1_z8: 1; /**< 1 if IPS1 z8 ret is allowed in dynamic use cases such as VPB */
+ uint32_t reserved_bits : 22; /**< Reversed bits */
} bits;
uint32_t all;
};
@@ -1508,6 +1561,16 @@ enum dmub_cmd_type {
*/
DMUB_CMD__FUSED_IO = 89,
+ /**
+ * Command type used for all LSDMA commands.
+ */
+ DMUB_CMD__LSDMA = 90,
+
+ /**
+ * Command type use for all IPS commands.
+ */
+ DMUB_CMD__IPS = 91,
+
DMUB_CMD__VBIOS = 128,
};
@@ -1918,6 +1981,121 @@ struct dmub_rb_cmd_fams2_flip {
struct dmub_fams2_flip_info flip_info;
};
+struct dmub_cmd_lsdma_data {
+ union {
+ struct lsdma_init_data {
+ union dmub_addr gpu_addr_base;
+ uint32_t ring_size;
+ } init_data;
+ struct lsdma_tiled_copy_data {
+ uint32_t src_addr_lo;
+ uint32_t src_addr_hi;
+ uint32_t dst_addr_lo;
+ uint32_t dst_addr_hi;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t src_width : 16;
+ uint32_t src_height : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t dst_width : 16;
+ uint32_t dst_height : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_swizzle_mode : 5;
+ uint32_t src_mip_max : 5;
+ uint32_t src_mip_id : 5;
+ uint32_t dst_mip_max : 5;
+ uint32_t dst_swizzle_mode : 5;
+ uint32_t dst_mip_id : 5;
+ uint32_t tmz : 1;
+ uint32_t dcc : 1;
+
+ uint32_t data_format : 6;
+ uint32_t padding1 : 4;
+ uint32_t dst_element_size : 3;
+ uint32_t num_type : 3;
+ uint32_t src_element_size : 3;
+ uint32_t write_compress : 2;
+ uint32_t cache_policy_dst : 2;
+ uint32_t cache_policy_src : 2;
+ uint32_t read_compress : 2;
+ uint32_t src_dim : 2;
+ uint32_t dst_dim : 2;
+ uint32_t max_uncom : 1;
+
+ uint32_t max_com : 2;
+ uint32_t padding : 30;
+ } tiled_copy_data;
+ struct lsdma_linear_copy_data {
+ uint32_t count : 30;
+ uint32_t cache_policy_dst : 2;
+
+ uint32_t tmz : 1;
+ uint32_t cache_policy_src : 2;
+ uint32_t padding : 29;
+
+ uint32_t src_lo;
+ uint32_t src_hi;
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+ } linear_copy_data;
+ struct lsdma_reg_write_data {
+ uint32_t reg_addr;
+ uint32_t reg_data;
+ } reg_write_data;
+ struct lsdma_pio_copy_data {
+ union {
+ struct {
+ uint32_t byte_count : 26;
+ uint32_t src_loc : 1;
+ uint32_t dst_loc : 1;
+ uint32_t src_addr_inc : 1;
+ uint32_t dst_addr_inc : 1;
+ uint32_t overlap_disable : 1;
+ uint32_t constant_fill : 1;
+ } fields;
+ uint32_t raw;
+ } packet;
+ uint32_t src_lo;
+ uint32_t src_hi;
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+ } pio_copy_data;
+ struct lsdma_pio_constfill_data {
+ union {
+ struct {
+ uint32_t byte_count : 26;
+ uint32_t src_loc : 1;
+ uint32_t dst_loc : 1;
+ uint32_t src_addr_inc : 1;
+ uint32_t dst_addr_inc : 1;
+ uint32_t overlap_disable : 1;
+ uint32_t constant_fill : 1;
+ } fields;
+ uint32_t raw;
+ } packet;
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+ uint32_t data;
+ } pio_constfill_data;
+
+ uint32_t all[14];
+ } u;
+
+};
+
+struct dmub_rb_cmd_lsdma {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_lsdma_data lsdma_data;
+};
+
struct dmub_optc_state_v2 {
uint32_t v_total_min;
uint32_t v_total_max;
@@ -1949,6 +2127,28 @@ enum fams2_stream_type {
FAMS2_STREAM_TYPE_SUBVP = 4,
};
+struct dmub_rect16 {
+ /**
+ * Dirty rect x offset.
+ */
+ uint16_t x;
+
+ /**
+ * Dirty rect y offset.
+ */
+ uint16_t y;
+
+ /**
+ * Dirty rect width.
+ */
+ uint16_t width;
+
+ /**
+ * Dirty rect height.
+ */
+ uint16_t height;
+};
+
/* static stream state */
struct dmub_fams2_legacy_stream_static_state {
uint8_t vactive_det_fill_delay_otg_vlines;
@@ -2021,11 +2221,13 @@ union dmub_fams2_stream_static_sub_state {
}; //v0
union dmub_fams2_cmd_stream_static_sub_state {
- struct dmub_fams2_cmd_legacy_stream_static_state legacy;
- struct dmub_fams2_cmd_subvp_stream_static_state subvp;
- struct dmub_fams2_cmd_drr_stream_static_state drr;
+ COMMON_STREAM_STATIC_SUB_STATE
}; //v1
+union dmub_fams2_stream_static_sub_state_v2 {
+ COMMON_STREAM_STATIC_SUB_STATE
+}; //v2
+
struct dmub_fams2_stream_static_state {
enum fams2_stream_type type;
uint32_t otg_vline_time_ns;
@@ -2091,7 +2293,7 @@ struct dmub_fams2_cmd_stream_static_base_state {
struct dmub_fams2_stream_static_state_v1 {
struct dmub_fams2_cmd_stream_static_base_state base;
- union dmub_fams2_cmd_stream_static_sub_state sub_state;
+ union dmub_fams2_stream_static_sub_state_v2 sub_state;
}; //v1
/**
@@ -2139,6 +2341,11 @@ union dmub_cmd_fams2_config {
} stream_v1; //v1
};
+struct dmub_fams2_config_v2 {
+ struct dmub_cmd_fams2_global_config global;
+ struct dmub_fams2_stream_static_state_v1 stream_v1[DMUB_MAX_STREAMS]; //v1
+};
+
/**
* DMUB rb command definition for FAMS2 (merged SubVP, FPO, Legacy)
*/
@@ -2148,6 +2355,22 @@ struct dmub_rb_cmd_fams2 {
};
/**
+ * Indirect buffer descriptor
+ */
+struct dmub_ib_data {
+ union dmub_addr src; // location of indirect buffer in memory
+ uint16_t size; // indirect buffer size in bytes
+};
+
+/**
+ * DMUB rb command definition for commands passed over indirect buffer
+ */
+struct dmub_rb_cmd_ib {
+ struct dmub_cmd_header header;
+ struct dmub_ib_data ib_data;
+};
+
+/**
* enum dmub_cmd_idle_opt_type - Idle optimization command type.
*/
enum dmub_cmd_idle_opt_type {
@@ -2170,6 +2393,11 @@ enum dmub_cmd_idle_opt_type {
* DCN hardware notify power state.
*/
DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3,
+
+ /**
+ * DCN notify to release HW.
+ */
+ DMUB_CMD__IDLE_OPT_RELEASE_HW = 4,
};
/**
@@ -2315,7 +2543,8 @@ struct dmub_dig_transmitter_control_data_v1_7 {
uint8_t connobj_id; /**< Connector Object Id defined in ObjectId.h */
uint8_t HPO_instance; /**< HPO instance (0: inst0, 1: inst1) */
uint8_t reserved1; /**< For future use */
- uint8_t reserved2[3]; /**< For future use */
+ uint8_t skip_phy_ssc_reduction;
+ uint8_t reserved2[2]; /**< For future use */
uint32_t reserved3[11]; /**< For future use */
};
@@ -2933,6 +3162,7 @@ enum dmub_cmd_fams_type {
DMUB_CMD__FAMS2_CONFIG = 4,
DMUB_CMD__FAMS2_DRR_UPDATE = 5,
DMUB_CMD__FAMS2_FLIP = 6,
+ DMUB_CMD__FAMS2_IB_CONFIG = 7,
};
/**
@@ -3818,6 +4048,14 @@ struct dmub_cmd_replay_copy_settings_data {
*/
uint8_t digbe_inst;
/**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
* AUX HW instance.
*/
uint8_t aux_inst;
@@ -3861,6 +4099,11 @@ struct dmub_cmd_replay_copy_settings_data {
* Use for AUX-less ALPM LFPS wake operation
*/
struct dmub_alpm_auxless_data auxless_alpm_data;
+
+ /**
+ * @pad: Align structure to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
@@ -3916,6 +4159,18 @@ struct dmub_rb_cmd_replay_enable_data {
* This does not support HDMI/DP2 for now.
*/
uint8_t phy_rate;
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
+ * @pad: Align structure to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
@@ -4416,6 +4671,37 @@ enum dmub_cmd_abm_type {
DMUB_CMD__ABM_GET_HISTOGRAM_DATA = 11,
};
+/**
+ * LSDMA command sub-types.
+ */
+enum dmub_cmd_lsdma_type {
+ /**
+ * Initialize parameters for LSDMA.
+ * Ring buffer is mapped to the ring buffer
+ */
+ DMUB_CMD__LSDMA_INIT_CONFIG = 0,
+ /**
+ * LSDMA copies data from source to destination linearly
+ */
+ DMUB_CMD__LSDMA_LINEAR_COPY = 1,
+ /**
+ * Send the tiled-to-tiled copy command
+ */
+ DMUB_CMD__LSDMA_TILED_TO_TILED_COPY = 2,
+ /**
+ * Send the poll reg write command
+ */
+ DMUB_CMD__LSDMA_POLL_REG_WRITE = 3,
+ /**
+ * Send the pio copy command
+ */
+ DMUB_CMD__LSDMA_PIO_COPY = 4,
+ /**
+ * Send the pio constfill command
+ */
+ DMUB_CMD__LSDMA_PIO_CONSTFILL = 5,
+};
+
struct abm_ace_curve {
/**
* @offsets: ACE curve offsets.
@@ -5621,6 +5907,59 @@ struct dmub_rb_cmd_assr_enable {
};
/**
+ * Current definition of "ips_mode" from driver
+ */
+enum ips_residency_mode {
+ IPS_RESIDENCY__IPS1_MAX,
+ IPS_RESIDENCY__IPS2,
+ IPS_RESIDENCY__IPS1_RCG,
+ IPS_RESIDENCY__IPS1_ONO2_ON,
+};
+
+#define NUM_IPS_HISTOGRAM_BUCKETS 16
+
+/**
+ * IPS residency statistics to be sent to driver - subset of struct dmub_ips_residency_stats
+ */
+struct dmub_ips_residency_info {
+ uint32_t residency_millipercent;
+ uint32_t entry_counter;
+ uint32_t histogram[NUM_IPS_HISTOGRAM_BUCKETS];
+ uint64_t total_time_us;
+ uint64_t total_inactive_time_us;
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__IPS_RESIDENCY_CNTL command.
+ */
+struct dmub_cmd_ips_residency_cntl_data {
+ uint8_t panel_inst;
+ uint8_t start_measurement;
+ uint8_t padding[2]; // align to 4-byte boundary
+};
+
+struct dmub_rb_cmd_ips_residency_cntl {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_ips_residency_cntl_data cntl_data;
+};
+
+/**
+ * Data passed from FW to driver in a DMUB_CMD__IPS_QUERY_RESIDENCY_INFO command.
+ */
+struct dmub_cmd_ips_query_residency_info_data {
+ union dmub_addr dest;
+ uint32_t size;
+ uint32_t ips_mode;
+ uint8_t panel_inst;
+ uint8_t padding[3]; // align to 4-byte boundary
+};
+
+struct dmub_rb_cmd_ips_query_residency_info {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_ips_query_residency_info_data info_data;
+};
+
+/**
* union dmub_rb_cmd - DMUB inbox command.
*/
union dmub_rb_cmd {
@@ -5926,13 +6265,25 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
*/
struct dmub_rb_cmd_assr_enable assr_enable;
+
struct dmub_rb_cmd_fams2 fams2_config;
+ struct dmub_rb_cmd_ib ib_fams2_config;
+
struct dmub_rb_cmd_fams2_drr_update fams2_drr_update;
struct dmub_rb_cmd_fams2_flip fams2_flip;
struct dmub_rb_cmd_fused_io fused_io;
+
+ /**
+ * Definition of a DMUB_CMD__LSDMA command.
+ */
+ struct dmub_rb_cmd_lsdma lsdma;
+
+ struct dmub_rb_cmd_ips_residency_cntl ips_residency_cntl;
+
+ struct dmub_rb_cmd_ips_query_residency_info ips_query_residency_info;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index a308bd604677..3f38db752b84 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -416,7 +416,7 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub)
void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
+ uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset, is_pwait;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
struct dmub_timeout_info timeout = {0};
@@ -466,6 +466,9 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub)
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
+
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
dmub->debug.is_dmcub_soft_reset = is_soft_reset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 72a0f078cd1a..2228d62adc7e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -92,19 +92,15 @@ void dmub_dcn35_reset(struct dmub_srv *dmub)
uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
- if (in_reset == 0) {
+ if (in_reset == 0 && is_enabled != 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
- /**
- * Timeout covers both the ACK and the wait
- * for remaining work to finish.
- */
-
for (i = 0; i < timeout; ++i) {
if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
break;
@@ -130,11 +126,9 @@ void dmub_dcn35_reset(struct dmub_srv *dmub)
/* Force reset in case we timed out, DMCUB is likely hung. */
}
- REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
-
if (is_enabled) {
REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ udelay(1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
}
@@ -160,11 +154,7 @@ void dmub_dcn35_reset_release(struct dmub_srv *dmub)
LONO_SOCCLK_GATE_DISABLE, 1,
LONO_DMCUBCLK_GATE_DISABLE, 1);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
- udelay(1);
REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- udelay(1);
REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 0);
}
@@ -464,7 +454,7 @@ uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub)
void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset;
+ uint32_t is_dmub_enabled, is_soft_reset, is_pwait;
uint32_t is_traceport_enabled, is_cw6_enabled;
struct dmub_timeout_info timeout = {0};
@@ -515,6 +505,9 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub)
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
+
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
dmub->debug.is_dmcub_soft_reset = is_soft_reset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index 2575dbc448f7..b31adbd0d685 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -413,7 +413,7 @@ uint32_t dmub_dcn401_get_current_time(struct dmub_srv *dmub)
void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
+ uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset, is_pwait;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
struct dmub_timeout_info timeout = {0};
@@ -464,6 +464,9 @@ void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub)
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
+
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
dmub->debug.is_dmcub_soft_reset = is_soft_reset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index acca7943a8c8..b17a19400c06 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -65,6 +65,12 @@
/* Default scratch mem size. */
#define DMUB_SCRATCH_MEM_SIZE (1024)
+/* Default indirect buffer size. */
+#define DMUB_IB_MEM_SIZE (1280)
+
+/* Default LSDMA ring buffer size. */
+#define DMUB_LSDMA_RB_SIZE (64 * 1024)
+
/* Number of windows in use. */
#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
/* Base addresses. */
@@ -559,7 +565,9 @@ enum dmub_status
window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
+ window_sizes[DMUB_WINDOW_IB_MEM] = DMUB_IB_MEM_SIZE;
window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size);
+ window_sizes[DMUB_WINDOW_LSDMA_BUFFER] = DMUB_LSDMA_RB_SIZE;
out->fb_size =
dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
@@ -645,6 +653,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
+ struct dmub_fb *ib_mem_gart = params->fb[DMUB_WINDOW_IB_MEM];
struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE];
struct dmub_rb_init_params rb_params, outbox0_rb_params;
@@ -655,7 +664,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
return DMUB_STATUS_INVALID;
if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb ||
- !tracebuff_fb || !fw_state_fb || !scratch_mem_fb) {
+ !tracebuff_fb || !fw_state_fb || !scratch_mem_fb || !ib_mem_gart) {
ASSERT(0);
return DMUB_STATUS_INVALID;
}
@@ -741,6 +750,8 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->scratch_mem_fb = *scratch_mem_fb;
+ dmub->ib_mem_gart = *ib_mem_gart;
+
if (dmub->hw_funcs.setup_windows)
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, &region6);
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 250f09922d2f..71efd2770c99 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -147,7 +147,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
((unsigned int)(div64_u64((1000000000ULL * 1000000),
refresh_in_uhz)));
- if (MICRO_HZ_TO_HZ(refresh_in_uhz) <= stream->timing.min_refresh_in_uhz) {
+ if (refresh_in_uhz <= stream->timing.min_refresh_in_uhz) {
/* When the target refresh rate is the minimum panel refresh rate,
* round down the vtotal value to avoid stretching vblank over
* panel's vtotal boundary.
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index 758a8aa31fbe..391209a3bf29 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -79,4 +79,6 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
bool fill_custom_backlight_caps(unsigned int config_no,
struct dm_acpi_atif_backlight_caps *caps);
void reset_replay_dsync_error_count(struct dc_link *link);
+void change_replay_to_psr(struct dc_link *link);
+void change_psr_to_replay(struct dc_link *link);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 11374a2cbab8..bfb446736ca8 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -396,6 +396,7 @@ enum amd_dpm_forced_level;
* (such as allocating any required memory)
* @suspend: handles IP specific hw/sw changes for suspend
* @resume: handles IP specific hw/sw changes for resume
+ * @complete: handles IP specific changes after resume
* @is_idle: returns current IP block idle status
* @wait_for_idle: poll for idle
* @check_soft_reset: check soft reset the IP block
@@ -427,6 +428,7 @@ struct amd_ip_funcs {
int (*prepare_suspend)(struct amdgpu_ip_block *ip_block);
int (*suspend)(struct amdgpu_ip_block *ip_block);
int (*resume)(struct amdgpu_ip_block *ip_block);
+ void (*complete)(struct amdgpu_ip_block *ip_block);
bool (*is_idle)(struct amdgpu_ip_block *ip_block);
int (*wait_for_idle)(struct amdgpu_ip_block *ip_block);
bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block);
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index f4d914dc731f..e2b1ea7467b0 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -108,6 +108,8 @@ enum pp_clock_type {
PP_VCLK1,
PP_DCLK,
PP_DCLK1,
+ PP_ISPICLK,
+ PP_ISPXCLK,
OD_SCLK,
OD_MCLK,
OD_VDDC_CURVE,
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 5c1cbdc122d2..71d986dd7a6e 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -98,6 +98,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
case AMD_IP_BLOCK_TYPE_GMC:
case AMD_IP_BLOCK_TYPE_ACP:
case AMD_IP_BLOCK_TYPE_VPE:
+ case AMD_IP_BLOCK_TYPE_ISP:
if (pp_funcs && pp_funcs->set_powergating_by_smu)
ret = (pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate, 0));
@@ -852,22 +853,16 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
uint32_t max)
{
struct smu_context *smu = adev->powerplay.pp_handle;
- int ret = 0;
-
- if (type != PP_SCLK)
- return -EINVAL;
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
- mutex_lock(&adev->pm.mutex);
- ret = smu_set_soft_freq_range(smu,
- SMU_SCLK,
+ guard(mutex)(&adev->pm.mutex);
+
+ return smu_set_soft_freq_range(smu,
+ type,
min,
max);
- mutex_unlock(&adev->pm.mutex);
-
- return ret;
}
int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index edd9895b46c0..4b64851fdb42 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1398,6 +1398,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
if (ret)
return -EINVAL;
parameter_size++;
+ if (!tmp_str)
+ break;
while (isspace(*tmp_str))
tmp_str++;
}
@@ -1890,7 +1892,7 @@ out:
static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states)
{
- if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
+ if (!amdgpu_device_supports_smart_shift(adev))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
@@ -1901,7 +1903,7 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
{
uint32_t ss_power;
- if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
+ if (!amdgpu_device_supports_smart_shift(adev))
*states = ATTR_STATE_UNSUPPORTED;
else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
(void *)&ss_power))
@@ -3645,6 +3647,9 @@ static int parse_input_od_command_lines(const char *buf,
return -EINVAL;
parameter_size++;
+ if (!tmp_str)
+ break;
+
while (isspace(*tmp_str))
tmp_str++;
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 34e71727b27d..307ebf7e3226 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -1242,7 +1242,7 @@ static void kv_dpm_enable_bapm(void *handle, bool enable)
if (pi->bapm_enable) {
ret = amdgpu_kv_smc_bapm_enable(adev, enable);
if (ret)
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
}
}
@@ -1266,40 +1266,40 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
ret = kv_process_firmware_header(adev);
if (ret) {
- DRM_ERROR("kv_process_firmware_header failed\n");
+ drm_err(adev_to_drm(adev), "kv_process_firmware_header failed\n");
return ret;
}
kv_init_fps_limits(adev);
kv_init_graphics_levels(adev);
ret = kv_program_bootup_state(adev);
if (ret) {
- DRM_ERROR("kv_program_bootup_state failed\n");
+ drm_err(adev_to_drm(adev), "kv_program_bootup_state failed\n");
return ret;
}
kv_calculate_dfs_bypass_settings(adev);
ret = kv_upload_dpm_settings(adev);
if (ret) {
- DRM_ERROR("kv_upload_dpm_settings failed\n");
+ drm_err(adev_to_drm(adev), "kv_upload_dpm_settings failed\n");
return ret;
}
ret = kv_populate_uvd_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_uvd_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_uvd_table failed\n");
return ret;
}
ret = kv_populate_vce_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_vce_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_vce_table failed\n");
return ret;
}
ret = kv_populate_samu_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_samu_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_samu_table failed\n");
return ret;
}
ret = kv_populate_acp_table(adev);
if (ret) {
- DRM_ERROR("kv_populate_acp_table failed\n");
+ drm_err(adev_to_drm(adev), "kv_populate_acp_table failed\n");
return ret;
}
kv_program_vc(adev);
@@ -1310,39 +1310,39 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
if (pi->enable_auto_thermal_throttling) {
ret = kv_enable_auto_thermal_throttling(adev);
if (ret) {
- DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_auto_thermal_throttling failed\n");
return ret;
}
}
ret = kv_enable_dpm_voltage_scaling(adev);
if (ret) {
- DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_dpm_voltage_scaling failed\n");
return ret;
}
ret = kv_set_dpm_interval(adev);
if (ret) {
- DRM_ERROR("kv_set_dpm_interval failed\n");
+ drm_err(adev_to_drm(adev), "kv_set_dpm_interval failed\n");
return ret;
}
ret = kv_set_dpm_boot_state(adev);
if (ret) {
- DRM_ERROR("kv_set_dpm_boot_state failed\n");
+ drm_err(adev_to_drm(adev), "kv_set_dpm_boot_state failed\n");
return ret;
}
ret = kv_enable_ulv(adev, true);
if (ret) {
- DRM_ERROR("kv_enable_ulv failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_ulv failed\n");
return ret;
}
kv_start_dpm(adev);
ret = kv_enable_didt(adev, true);
if (ret) {
- DRM_ERROR("kv_enable_didt failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_didt failed\n");
return ret;
}
ret = kv_enable_smc_cac(adev, true);
if (ret) {
- DRM_ERROR("kv_enable_smc_cac failed\n");
+ drm_err(adev_to_drm(adev), "kv_enable_smc_cac failed\n");
return ret;
}
@@ -1350,7 +1350,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
ret = amdgpu_kv_smc_bapm_enable(adev, false);
if (ret) {
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
return ret;
}
@@ -1358,7 +1358,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
if (ret) {
- DRM_ERROR("kv_set_thermal_temperature_range failed\n");
+ drm_err(adev_to_drm(adev), "kv_set_thermal_temperature_range failed\n");
return ret;
}
amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
@@ -1382,7 +1382,7 @@ static void kv_dpm_disable(struct amdgpu_device *adev)
err = amdgpu_kv_smc_bapm_enable(adev, false);
if (err)
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
if (adev->asic_type == CHIP_MULLINS)
kv_enable_nb_dpm(adev, false);
@@ -1920,7 +1920,7 @@ static int kv_dpm_set_power_state(void *handle)
if (pi->bapm_enable) {
ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
if (ret) {
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");
return ret;
}
}
@@ -1931,7 +1931,7 @@ static int kv_dpm_set_power_state(void *handle)
kv_update_dfs_bypass_settings(adev, new_ps);
ret = kv_calculate_ds_divider(adev);
if (ret) {
- DRM_ERROR("kv_calculate_ds_divider failed\n");
+ drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n");
return ret;
}
kv_calculate_nbps_level_settings(adev);
@@ -1947,7 +1947,7 @@ static int kv_dpm_set_power_state(void *handle)
ret = kv_update_vce_dpm(adev, new_ps, old_ps);
if (ret) {
- DRM_ERROR("kv_update_vce_dpm failed\n");
+ drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n");
return ret;
}
kv_update_sclk_t(adev);
@@ -1960,7 +1960,7 @@ static int kv_dpm_set_power_state(void *handle)
kv_update_dfs_bypass_settings(adev, new_ps);
ret = kv_calculate_ds_divider(adev);
if (ret) {
- DRM_ERROR("kv_calculate_ds_divider failed\n");
+ drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n");
return ret;
}
kv_calculate_nbps_level_settings(adev);
@@ -1972,7 +1972,7 @@ static int kv_dpm_set_power_state(void *handle)
kv_set_enabled_levels(adev);
ret = kv_update_vce_dpm(adev, new_ps, old_ps);
if (ret) {
- DRM_ERROR("kv_update_vce_dpm failed\n");
+ drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n");
return ret;
}
kv_update_acp_boot_level(adev);
@@ -2521,7 +2521,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
if (high_temp > max_temp)
high_temp = max_temp;
if (high_temp < low_temp) {
- DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+ drm_err(adev_to_drm(adev), "invalid thermal range: %d - %d\n", low_temp, high_temp);
return -EINVAL;
}
@@ -2563,7 +2563,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev)
data_offset);
if (crev != 8) {
- DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+ drm_err(adev_to_drm(adev), "Unsupported IGP table: %d %d\n", frev, crev);
return -EINVAL;
}
pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
@@ -2579,7 +2579,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev)
else
pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
- DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
+ drm_err(adev_to_drm(adev), "The htcTmpLmt should be larger than htcHystLmt.\n");
}
if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
@@ -2886,16 +2886,18 @@ kv_dpm_print_power_state(void *handle, void *request_ps)
struct kv_ps *ps = kv_get_ps(rps);
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
- printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2);
+ amdgpu_dpm_dbg_print_cap_info(adev, rps->caps);
+ drm_dbg(adev_to_drm(adev), "vclk: %d, dclk: %d\n",
+ rps->vclk, rps->dclk);
for (i = 0; i < ps->num_levels; i++) {
struct kv_pl *pl = &ps->levels[i];
- printk("\t\tpower level %d sclk: %u vddc: %u\n",
- i, pl->sclk,
- kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
+ drm_dbg(adev_to_drm(adev),
+ "power level %d sclk: %u vddc: %u\n",
+ i, pl->sclk,
+ kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
}
- amdgpu_dpm_print_ps_status(adev, rps);
+ amdgpu_dpm_dbg_print_ps_status(adev, rps);
}
static void kv_dpm_fini(struct amdgpu_device *adev)
@@ -3013,13 +3015,13 @@ static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block)
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
if (amdgpu_dpm == 1)
amdgpu_pm_print_power_states(adev);
- DRM_INFO("amdgpu: dpm initialized\n");
+ drm_info(adev_to_drm(adev), "dpm initialized\n");
return 0;
dpm_failed:
kv_dpm_fini(adev);
- DRM_ERROR("amdgpu: dpm initialization failed\n");
+ drm_err(adev_to_drm(adev), "dpm initialization failed: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index c7518b13e787..ea3ace882a10 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -47,7 +47,7 @@
#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
-void amdgpu_dpm_print_class_info(u32 class, u32 class2)
+void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2)
{
const char *s;
@@ -66,71 +66,45 @@ void amdgpu_dpm_print_class_info(u32 class, u32 class2)
s = "performance";
break;
}
- printk("\tui class: %s\n", s);
- printk("\tinternal class:");
+ drm_dbg(adev_to_drm(adev), "\tui class: %s\n", s);
if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
(class2 == 0))
- pr_cont(" none");
- else {
- if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
- pr_cont(" boot");
- if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
- pr_cont(" thermal");
- if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
- pr_cont(" limited_pwr");
- if (class & ATOM_PPLIB_CLASSIFICATION_REST)
- pr_cont(" rest");
- if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
- pr_cont(" forced");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- pr_cont(" 3d_perf");
- if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
- pr_cont(" ovrdrv");
- if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- pr_cont(" uvd");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
- pr_cont(" 3d_low");
- if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
- pr_cont(" acpi");
- if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- pr_cont(" uvd_hd2");
- if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- pr_cont(" uvd_hd");
- if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- pr_cont(" uvd_sd");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
- pr_cont(" limited_pwr2");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
- pr_cont(" ulv");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- pr_cont(" uvd_mvc");
- }
- pr_cont("\n");
+ drm_dbg(adev_to_drm(adev), "\tinternal class: none\n");
+ else
+ drm_dbg(adev_to_drm(adev), "\tinternal class: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ (class & ATOM_PPLIB_CLASSIFICATION_BOOT) ? " boot" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) ? " thermal" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) ? " limited_pwr" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_REST) ? " rest" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_FORCED) ? " forced" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) ? " 3d_perf" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) ? " ovrdrv" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ? " uvd" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) ? " 3d_low" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_ACPI) ? " acpi" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) ? " uvd_hd2" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) ? " uvd_hd" : "",
+ (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ? " uvd_sd" : "",
+ (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) ? " limited_pwr2" : "",
+ (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) ? " ulv" : "",
+ (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) ? " uvd_mvc" : "");
}
-void amdgpu_dpm_print_cap_info(u32 caps)
+void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps)
{
- printk("\tcaps:");
- if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
- pr_cont(" single_disp");
- if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
- pr_cont(" video");
- if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
- pr_cont(" no_dc");
- pr_cont("\n");
+ drm_dbg(adev_to_drm(adev), "\tcaps: %s%s%s\n",
+ (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) ? " single_disp" : "",
+ (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) ? " video" : "",
+ (caps & ATOM_PPLIB_DISALLOW_ON_DC) ? " no_dc" : "");
}
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
+void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
- printk("\tstatus:");
- if (rps == adev->pm.dpm.current_ps)
- pr_cont(" c");
- if (rps == adev->pm.dpm.requested_ps)
- pr_cont(" r");
- if (rps == adev->pm.dpm.boot_ps)
- pr_cont(" b");
- pr_cont("\n");
+ drm_dbg(adev_to_drm(adev), "\tstatus:%s%s%s\n",
+ rps == adev->pm.dpm.current_ps ? " c" : "",
+ rps == adev->pm.dpm.requested_ps ? " r" : "",
+ rps == adev->pm.dpm.boot_ps ? " b" : "");
}
void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
@@ -699,64 +673,64 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
}
if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_NI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_SI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_CI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
- DRM_INFO("Internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_KV;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
- DRM_INFO("External GPIO thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "External GPIO thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
} else if (controller->ucType ==
ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
- DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "ADT7473 with internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
} else if (controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
- DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+ drm_info(adev_to_drm(adev), "EMC2103 with internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
- DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+ drm_info(adev_to_drm(adev), "Possible %s thermal controller at 0x%02x %s fan control\n",
pp_lib_thermal_controller_names[controller->ucType],
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
@@ -772,7 +746,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
}
} else {
- DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+ drm_info(adev_to_drm(adev), "Unknown thermal controller type %d at 0x%02x %s fan control\n",
controller->ucType,
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
@@ -943,9 +917,9 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
return -EINVAL;
if (amdgpu_dpm == 1 && pp_funcs->print_power_state) {
- printk("switching from power state:\n");
+ drm_dbg(adev_to_drm(adev), "switching from power state\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
- printk("switching to power state:\n");
+ drm_dbg(adev_to_drm(adev), "switching to power state\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
index 93bd3973330c..7120eef30509 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
@@ -23,10 +23,9 @@
#ifndef __LEGACY_DPM_H__
#define __LEGACY_DPM_H__
-void amdgpu_dpm_print_class_info(u32 class, u32 class2);
-void amdgpu_dpm_print_cap_info(u32 caps);
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
- struct amdgpu_ps *rps);
+void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2);
+void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps);
+void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps);
int amdgpu_get_platform_caps(struct amdgpu_device *adev);
int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 4c0e976004ba..52e732be59e3 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7951,15 +7951,15 @@ static void si_dpm_print_power_state(void *handle,
struct rv7xx_pl *pl;
int i;
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
- DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2);
+ amdgpu_dpm_dbg_print_cap_info(adev, rps->caps);
+ drm_dbg(adev_to_drm(adev), "\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
for (i = 0; i < ps->performance_level_count; i++) {
pl = &ps->performance_levels[i];
- DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+ drm_dbg(adev_to_drm(adev), "\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
}
- amdgpu_dpm_print_ps_status(adev, rps);
+ amdgpu_dpm_dbg_print_ps_status(adev, rps);
}
static int si_dpm_early_init(struct amdgpu_ip_block *ip_block)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
index 79a566f3564a..c305ea4ec17d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
@@ -149,7 +149,7 @@ int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
}
cgs_write_register(hwmgr->device, indirect_port, index);
- return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
+ return phm_wait_on_register(hwmgr, indirect_port + 1, value, mask);
}
int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index d79a1d94661a..756afe78a6e5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -76,6 +76,7 @@ static void smu_power_profile_mode_get(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
static void smu_power_profile_mode_put(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
+static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type);
static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
@@ -134,12 +135,17 @@ int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
}
int smu_set_soft_freq_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
+ enum pp_clock_type type,
uint32_t min,
uint32_t max)
{
+ enum smu_clk_type clk_type;
int ret = 0;
+ clk_type = smu_convert_to_smuclk(type);
+ if (clk_type == SMU_CLK_COUNT)
+ return -EINVAL;
+
if (smu->ppt_funcs->set_soft_freq_limited_range)
ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
clk_type,
@@ -307,6 +313,26 @@ static int smu_dpm_set_vpe_enable(struct smu_context *smu,
return ret;
}
+static int smu_dpm_set_isp_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret;
+
+ if (!smu->ppt_funcs->dpm_set_isp_enable)
+ return 0;
+
+ if (atomic_read(&power_gate->isp_gated) ^ enable)
+ return 0;
+
+ ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable);
+ if (!ret)
+ atomic_set(&power_gate->isp_gated, !enable);
+
+ return ret;
+}
+
static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
bool enable)
{
@@ -408,6 +434,12 @@ static int smu_dpm_set_power_gate(void *handle,
dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
gate ? "gate" : "ungate");
break;
+ case AMD_IP_BLOCK_TYPE_ISP:
+ ret = smu_dpm_set_isp_enable(smu, !gate);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to power %s ISP!\n",
+ gate ? "gate" : "ungate");
+ break;
default:
dev_err(smu->adev->dev, "Unsupported block type!\n");
return -EINVAL;
@@ -1004,6 +1036,21 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
return 0;
}
+static void smu_update_gpu_addresses(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG;
+ struct smu_table *driver_table = &(smu_table->driver_table);
+ struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table;
+
+ if (pm_status_table->bo)
+ pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo);
+ if (driver_table->bo)
+ driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo);
+ if (dummy_read_1_table->bo)
+ dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo);
+}
+
/**
* smu_alloc_memory_pool - allocate memory pool in the system memory
*
@@ -1285,6 +1332,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
+ atomic_set(&smu->smu_power.power_gate.isp_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
smu_init_power_profile(smu);
@@ -1672,37 +1720,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
}
}
- ret = smu_system_features_control(smu, true);
- if (ret) {
- dev_err(adev->dev, "Failed to enable requested dpm features!\n");
- return ret;
- }
-
- smu_init_xgmi_plpd_mode(smu);
-
- ret = smu_feature_get_enabled_mask(smu, &features_supported);
- if (ret) {
- dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
- return ret;
- }
- bitmap_copy(feature->supported,
- (unsigned long *)&features_supported,
- feature->feature_num);
-
- if (!smu_is_dpm_running(smu))
- dev_info(adev->dev, "dpm has been disabled\n");
-
- /*
- * Set initialized values (get from vbios) to dpm tables context such as
- * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
- * type of clks.
- */
- ret = smu_set_default_dpm_table(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
- return ret;
- }
-
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)
pcie_gen = 4;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
@@ -1738,6 +1755,37 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
+ ret = smu_system_features_control(smu, true);
+ if (ret) {
+ dev_err(adev->dev, "Failed to enable requested dpm features!\n");
+ return ret;
+ }
+
+ smu_init_xgmi_plpd_mode(smu);
+
+ ret = smu_feature_get_enabled_mask(smu, &features_supported);
+ if (ret) {
+ dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
+ return ret;
+ }
+ bitmap_copy(feature->supported,
+ (unsigned long *)&features_supported,
+ feature->feature_num);
+
+ if (!smu_is_dpm_running(smu))
+ dev_info(adev->dev, "dpm has been disabled\n");
+
+ /*
+ * Set initialized values (get from vbios) to dpm tables context such as
+ * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
+ * type of clks.
+ */
+ ret = smu_set_default_dpm_table(smu);
+ if (ret) {
+ dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
+ return ret;
+ }
+
ret = smu_get_thermal_temperature_range(smu);
if (ret) {
dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
@@ -1780,6 +1828,9 @@ static int smu_start_smc_engine(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev))
+ smu_update_gpu_addresses(smu);
+
smu->smc_fw_state = SMU_FW_INIT;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
@@ -2935,6 +2986,12 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
clk_type = SMU_DCLK; break;
case PP_DCLK1:
clk_type = SMU_DCLK1; break;
+ case PP_ISPICLK:
+ clk_type = SMU_ISPICLK;
+ break;
+ case PP_ISPXCLK:
+ clk_type = SMU_ISPXCLK;
+ break;
case OD_SCLK:
clk_type = SMU_OD_SCLK; break;
case OD_MCLK:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 9aacc7bc1c69..b52e194397e2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -402,6 +402,7 @@ struct smu_power_gate {
atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES];
atomic_t jpeg_gated;
atomic_t vpe_gated;
+ atomic_t isp_gated;
atomic_t umsch_mm_gated;
};
@@ -1436,6 +1437,12 @@ struct pptable_funcs {
int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable);
/**
+ * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power
+ * management.
+ */
+ int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable);
+
+ /**
* @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power
* management.
*/
@@ -1635,7 +1642,7 @@ int smu_write_watermarks_table(struct smu_context *smu);
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max);
-int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type,
uint32_t min, uint32_t max);
int smu_set_gfx_power_up_by_imu(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
index 1bc30db22f9c..cd44f4254134 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
@@ -106,6 +106,7 @@ typedef struct {
#define NUM_FCLK_DPM_LEVELS 8
#define NUM_MEM_PSTATE_LEVELS 4
+#define ISP_ALL_TILES_MASK 0x7FF
typedef struct {
uint32_t UClk;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
index d7505cfc433a..0a2ca544f4e3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
@@ -86,8 +86,10 @@ typedef enum {
/*36*/ FEATURE_PIT = 36,
/*37*/ FEATURE_DVO = 37,
/*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38,
+/*39*/ FEATURE_GLOBAL_DPM = 39,
+/*40*/ FEATURE_NODE_POWER_MANAGER = 40,
-/*39*/ NUM_FEATURES = 39
+/*41*/ NUM_FEATURES = 41
} FEATURE_LIST_e;
//enum for MPIO PCIe gen speed msgs
@@ -133,7 +135,7 @@ typedef enum {
GFX_DVM_MARGIN_COUNT
} GFX_DVM_MARGIN_e;
-#define SMU_METRICS_TABLE_VERSION 0x12
+#define SMU_METRICS_TABLE_VERSION 0x13
typedef struct __attribute__((packed, aligned(4))) {
uint64_t AccumulationCounter;
@@ -275,6 +277,16 @@ typedef struct {
//PSNs
uint64_t PublicSerialNumber_AID[4];
uint64_t PublicSerialNumber_XCD[8];
+
+ //XGMI
+ uint32_t MaxXgmiWidth;
+ uint32_t MaxXgmiBitrate;
+
+ // Telemetry
+ uint32_t InputTelemetryVoltageInmV;
+
+ // General info
+ uint32_t pldmVersion[2];
} StaticMetricsTable_t;
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index eefdaa0b5df6..d7a9e41820fa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -305,6 +305,8 @@ enum smu_clk_type {
SMU_MCLK,
SMU_PCIE,
SMU_LCLK,
+ SMU_ISPICLK,
+ SMU_ISPXCLK,
SMU_OD_CCLK,
SMU_OD_SCLK,
SMU_OD_MCLK,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 7fad5dfb39c4..aac202d0c30e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2444,7 +2444,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t smu_pcie_arg;
- int ret, i;
+ int ret = 0;
+ int i;
/* lclk dpm table setup */
for (i = 0; i < MAX_PCIE_CONF; i++) {
@@ -2453,25 +2454,27 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
}
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- smu_pcie_arg = (i << 16) |
- ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
- (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
- pptable->PcieLaneCount[i] : pcie_width_cap);
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
-
- if (ret)
- return ret;
-
- if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
- dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
- if (pptable->PcieLaneCount[i] > pcie_width_cap)
- dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
+ if (pptable->PcieGenSpeed[i] > pcie_gen_cap ||
+ pptable->PcieLaneCount[i] > pcie_width_cap) {
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
+ pptable->PcieGenSpeed[i] > pcie_gen_cap ?
+ pcie_gen_cap : pptable->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
+ pptable->PcieLaneCount[i] > pcie_width_cap ?
+ pcie_width_cap : pptable->PcieLaneCount[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_gen_cap << 8;
+ smu_pcie_arg |= pcie_width_cap;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
}
- return 0;
+ return ret;
}
static inline void navi10_dump_od_table(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 115e3fa456bc..d57591509aed 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2145,7 +2145,8 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint8_t min_gen_speed, max_gen_speed;
uint8_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg;
- int ret, i;
+ int ret = 0;
+ int i;
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
@@ -2170,19 +2171,22 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- smu_pcie_arg = (i << 16 |
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK) ||
+ table_member1[i] > pcie_gen_cap || table_member2[i] > pcie_width_cap) {
+ smu_pcie_arg = (i << 16 |
pcie_table->pcie_gen[i] << 8 |
pcie_table->pcie_lane[i]);
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
- if (ret)
- return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
}
- return 0;
+ return ret;
}
static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index a55ea76d7399..2c9869feba61 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -666,7 +666,6 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
- struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
@@ -682,31 +681,25 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
switch (clk_type) {
case SMU_OD_SCLK:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
- (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
- (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
- }
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
break;
case SMU_OD_CCLK:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
- (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
- (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
- }
+ size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
break;
case SMU_OD_RANGE:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
- smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
- size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
- smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
- }
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
+ size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
+ smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
break;
case SMU_SOCCLK:
/* the level 3 ~ 6 of socclk use the same frequency for vangogh */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 9481f897432d..e97b0cf19197 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -497,7 +497,6 @@ static int renoir_print_clk_levels(struct smu_context *smu,
int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
SmuMetrics_t metrics;
- struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
bool cur_value_match_level = false;
memset(&metrics, 0, sizeof(metrics));
@@ -510,28 +509,24 @@ static int renoir_print_clk_levels(struct smu_context *smu,
switch (clk_type) {
case SMU_OD_RANGE:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_GetMinGfxclkFrequency,
- 0, &min);
- if (ret)
- return ret;
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_GetMaxGfxclkFrequency,
- 0, &max);
- if (ret)
- return ret;
- size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMinGfxclkFrequency,
+ 0, &min);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMaxGfxclkFrequency,
+ 0, &max);
+ if (ret)
+ return ret;
+ size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
break;
case SMU_OD_SCLK:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
- max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
- size += sysfs_emit_at(buf, size, "OD_SCLK\n");
- size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min);
- size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max);
- }
+ min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+ max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+ size += sysfs_emit_at(buf, size, "OD_SCLK\n");
+ size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min);
+ size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max);
break;
case SMU_GFXCLK:
case SMU_SCLK:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 6de653d2ed62..c63d2e28954d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -342,6 +342,61 @@ static int aldebaran_get_allowed_feature_mask(struct smu_context *smu,
return 0;
}
+static int aldebaran_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_dpm_table *dpm_table;
+ uint32_t min_clk, max_clk;
+
+ if (amdgpu_sriov_vf(smu->adev)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ break;
+ case SMU_SOCCLK:
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ break;
+ case SMU_FCLK:
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ break;
+ case SMU_VCLK:
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ break;
+ case SMU_DCLK:
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ min_clk = dpm_table->min;
+ max_clk = dpm_table->max;
+
+ if (min) {
+ if (!min_clk)
+ return -ENODATA;
+ *min = min_clk;
+ }
+ if (max) {
+ if (!max_clk)
+ return -ENODATA;
+ *max = max_clk;
+ }
+
+ } else {
+ return smu_v13_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
+ }
+
+ return 0;
+}
+
static int aldebaran_set_default_dpm_table(struct smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
@@ -2081,7 +2136,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
.get_bamaco_support = aldebaran_get_bamaco_support,
- .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
+ .get_dpm_ultimate_freq = aldebaran_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
.od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
.set_df_cstate = aldebaran_set_df_cstate,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 1c7235935d14..1a1f2a6b2e52 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -2386,7 +2386,8 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
&dpm_context->dpm_tables.pcie_table;
int num_of_levels = pcie_table->num_of_link_levels;
uint32_t smu_pcie_arg;
- int ret, i;
+ int ret = 0;
+ int i;
if (!num_of_levels)
return 0;
@@ -2402,30 +2403,38 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
}
} else {
for (i = 0; i < num_of_levels; i++) {
- if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
- if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
}
}
- for (i = 0; i < num_of_levels; i++) {
- smu_pcie_arg = i << 16;
- smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
- smu_pcie_arg |= pcie_table->pcie_lane[i];
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
- if (ret)
- return ret;
- }
-
- return 0;
+ return ret;
}
int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 5a9711e8cf68..e084ed99ec0e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -572,8 +572,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
struct smu_13_0_dpm_table *dpm_table;
- struct smu_13_0_pcie_table *pcie_table;
- uint32_t link_level;
int ret = 0;
/* socclk dpm table setup */
@@ -689,24 +687,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- pcie_table = &dpm_context->dpm_tables.pcie_table;
- pcie_table->num_of_link_levels = 0;
- for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
- if (!skutable->PcieGenSpeed[link_level] &&
- !skutable->PcieLaneCount[link_level] &&
- !skutable->LclkFreq[link_level])
- continue;
-
- pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
- skutable->PcieGenSpeed[link_level];
- pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
- skutable->PcieLaneCount[link_level];
- pcie_table->clk_freq[pcie_table->num_of_link_levels] =
- skutable->LclkFreq[link_level];
- pcie_table->num_of_link_levels++;
- }
-
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
@@ -3150,6 +3130,90 @@ static int smu_v13_0_0_set_power_limit(struct smu_context *smu,
return 0;
}
+static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu,
+ uint8_t pcie_gen_cap,
+ uint8_t pcie_width_cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_pcie_table *pcie_table =
+ &dpm_context->dpm_tables.pcie_table;
+ int num_of_levels;
+ uint32_t smu_pcie_arg;
+ uint32_t link_level;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ int ret = 0;
+ int i;
+
+ pcie_table->num_of_link_levels = 0;
+
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+
+ num_of_levels = pcie_table->num_of_link_levels;
+ if (!num_of_levels)
+ return 0;
+
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+ if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
+ pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
+
+ /* Force all levels to use the same settings */
+ for (i = 0; i < num_of_levels; i++) {
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ } else {
+ for (i = 0; i < num_of_levels; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
+ pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
+ pcie_gen_cap : pcie_table->pcie_gen[i];
+ pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
+ pcie_width_cap : pcie_table->pcie_lane[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -3179,7 +3243,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.feature_is_enabled = smu_cmn_feature_is_enabled,
.print_clk_levels = smu_v13_0_0_print_clk_levels,
.force_clk_levels = smu_v13_0_0_force_clk_levels,
- .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
+ .update_pcie_parameters = smu_v13_0_0_update_pcie_parameters,
.get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
.register_irq_handler = smu_v13_0_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index e0d356f93ab0..02a455a31c25 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -187,8 +187,34 @@ int smu_v13_0_12_get_max_metrics_size(void)
return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t));
}
+static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint16_t max_speed;
+ uint8_t max_width;
+ int ret;
+
+ if (smu_table->tables[SMU_TABLE_SMU_METRICS].version >= 0x13) {
+ max_width = (uint8_t)static_metrics->MaxXgmiWidth;
+ max_speed = (uint16_t)static_metrics->MaxXgmiBitrate;
+ ret = 0;
+ } else {
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+
+ ret = smu_v13_0_6_get_metrics_table(smu, NULL, true);
+ if (!ret) {
+ max_width = (uint8_t)metrics->XgmiWidth;
+ max_speed = (uint16_t)metrics->XgmiBitrate;
+ }
+ }
+ if (!ret)
+ amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width);
+}
+
int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_table_context *smu_table = &smu->smu_table;
StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
@@ -237,6 +263,18 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
if (ret)
return ret;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) {
+ if (!static_metrics->InputTelemetryVoltageInmV) {
+ dev_warn(smu->adev->dev, "Invalid board voltage %d\n",
+ static_metrics->InputTelemetryVoltageInmV);
+ }
+ dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
+ }
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) &&
+ static_metrics->pldmVersion[0] != 0xFFFFFFFF)
+ smu->adev->firmware.pldm_version =
+ static_metrics->pldmVersion[0];
+ smu_v13_0_12_init_xgmi_data(smu, static_metrics);
pptable->Init = true;
}
@@ -263,7 +301,6 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
struct smu_table_context *smu_table = &smu->smu_table;
MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
struct amdgpu_device *adev = smu->adev;
- int ret = 0;
int xcc_id;
/* For clocks with multiple instances, only report the first one */
@@ -319,7 +356,7 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
break;
}
- return ret;
+ return 0;
}
ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index f00ef7f3f355..9cc294f4708b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -345,6 +345,11 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
if (fw_ver >= 0x00562500)
smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+
+ if (fw_ver >= 0x04560100) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
@@ -685,8 +690,8 @@ static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
return 0;
}
-static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
- void *metrics_table, bool bypass_cache)
+int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
+ bool bypass_cache)
{
struct smu_table_context *smu_table = &smu->smu_table;
uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
@@ -800,6 +805,8 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
int version = smu_v13_0_6_get_metrics_version(smu);
int ret, i, retry = 100;
uint32_t table_version;
+ uint16_t max_speed;
+ uint8_t max_width;
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
@@ -835,6 +842,9 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version));
pptable->MinGfxclkFrequency =
SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version));
+ max_width = (uint8_t)GET_METRIC_FIELD(XgmiWidth, version);
+ max_speed = (uint16_t)GET_METRIC_FIELD(XgmiBitrate, version);
+ amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width);
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
@@ -871,51 +881,51 @@ static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- uint32_t clock_limit = 0, param;
+ struct smu_13_0_dpm_table *dpm_table;
+ uint32_t min_clk, max_clk, param;
int ret = 0, clk_id = 0;
- if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
+ /* Use dpm tables, if data is already fetched */
+ if (pptable->Init) {
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
- if (pptable->Init)
- clock_limit = pptable->UclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
break;
case SMU_GFXCLK:
case SMU_SCLK:
- if (pptable->Init)
- clock_limit = pptable->MinGfxclkFrequency;
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
break;
case SMU_SOCCLK:
- if (pptable->Init)
- clock_limit = pptable->SocclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.soc_table;
break;
case SMU_FCLK:
- if (pptable->Init)
- clock_limit = pptable->FclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
break;
case SMU_VCLK:
- if (pptable->Init)
- clock_limit = pptable->VclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
break;
case SMU_DCLK:
- if (pptable->Init)
- clock_limit = pptable->DclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
break;
default:
- break;
+ return -EINVAL;
}
- if (min)
- *min = clock_limit;
+ min_clk = dpm_table->min;
+ max_clk = dpm_table->max;
+ if (min)
+ *min = min_clk;
if (max)
- *max = clock_limit;
+ *max = max_clk;
- return 0;
+ if (min_clk && max_clk)
+ return 0;
}
if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) {
@@ -1377,8 +1387,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
return ret;
}
- min_clk = pstate_table->gfxclk_pstate.curr.min;
- max_clk = pstate_table->gfxclk_pstate.curr.max;
+ single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
+ min_clk = single_dpm_table->min;
+ max_clk = single_dpm_table->max;
if (now < SMU_13_0_6_DSCLK_THRESHOLD) {
size += sysfs_emit_at(buf, size, "S: %uMhz *\n",
@@ -2682,7 +2693,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
bool per_inst;
metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
- ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, true);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
if (ret) {
kfree(metrics_v0);
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index d38d6d76b1e7..67b30674fd31 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -74,6 +74,8 @@ enum smu_v13_0_6_caps {
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap);
int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu);
+int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
+ bool bypass_cache);
bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
int smu_v13_0_12_get_max_metrics_size(void);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index c8f4f6fb4083..c96fa5e49ed6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -579,8 +579,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
SkuTable_t *skutable = &driver_ppt->SkuTable;
struct smu_13_0_dpm_table *dpm_table;
- struct smu_13_0_pcie_table *pcie_table;
- uint32_t link_level;
int ret = 0;
/* socclk dpm table setup */
@@ -687,24 +685,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- pcie_table = &dpm_context->dpm_tables.pcie_table;
- pcie_table->num_of_link_levels = 0;
- for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
- if (!skutable->PcieGenSpeed[link_level] &&
- !skutable->PcieLaneCount[link_level] &&
- !skutable->LclkFreq[link_level])
- continue;
-
- pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
- skutable->PcieGenSpeed[link_level];
- pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
- skutable->PcieLaneCount[link_level];
- pcie_table->clk_freq[pcie_table->num_of_link_levels] =
- skutable->LclkFreq[link_level];
- pcie_table->num_of_link_levels++;
- }
-
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
@@ -2739,6 +2719,89 @@ static int smu_v13_0_7_set_power_limit(struct smu_context *smu,
return 0;
}
+static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu,
+ uint8_t pcie_gen_cap,
+ uint8_t pcie_width_cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_pcie_table *pcie_table =
+ &dpm_context->dpm_tables.pcie_table;
+ int num_of_levels;
+ int link_level;
+ uint32_t smu_pcie_arg;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ int ret = 0;
+ int i;
+
+ pcie_table->num_of_link_levels = 0;
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+
+ num_of_levels = pcie_table->num_of_link_levels;
+ if (!num_of_levels)
+ return 0;
+
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+ if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
+ pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
+
+ /* Force all levels to use the same settings */
+ for (i = 0; i < num_of_levels; i++) {
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ } else {
+ for (i = 0; i < num_of_levels; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
+ pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
+ pcie_gen_cap : pcie_table->pcie_gen[i];
+ pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
+ pcie_width_cap : pcie_table->pcie_lane[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -2768,7 +2831,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.feature_is_enabled = smu_cmn_feature_is_enabled,
.print_clk_levels = smu_v13_0_7_print_clk_levels,
.force_clk_levels = smu_v13_0_7_force_clk_levels,
- .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
+ .update_pcie_parameters = smu_v13_0_7_update_pcie_parameters,
.get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,
.register_irq_handler = smu_v13_0_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 84f9b007b59f..fe00c84b1cc6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -1207,11 +1207,13 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
+ u32 min,
+ u32 max,
+ bool __always_unused automatic)
{
- enum smu_message_type msg_set_min, msg_set_max;
- int ret = 0;
+ enum smu_message_type msg_set_min = SMU_MSG_MAX_COUNT;
+ enum smu_message_type msg_set_max = SMU_MSG_MAX_COUNT;
+ int ret = -EINVAL;
if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type))
return -EINVAL;
@@ -1240,16 +1242,23 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
msg_set_min = SMU_MSG_SetHardMinVcn1;
msg_set_max = SMU_MSG_SetSoftMaxVcn1;
break;
+ case SMU_ISPICLK:
+ msg_set_min = SMU_MSG_SetHardMinIspiclkByFreq;
+ break;
+ case SMU_ISPXCLK:
+ msg_set_min = SMU_MSG_SetHardMinIspxclkByFreq;
+ break;
default:
return -EINVAL;
}
- ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
- if (ret)
- return ret;
+ if (min && msg_set_min != SMU_MSG_MAX_COUNT)
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
+
+ if (max && msg_set_max != SMU_MSG_MAX_COUNT)
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL);
- return smu_cmn_send_smc_msg_with_param(smu, msg_set_max,
- max, NULL);
+ return ret;
}
static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
@@ -1278,7 +1287,7 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
if (ret)
break;
- ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
break;
default:
ret = -EINVAL;
@@ -1426,7 +1435,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_SCLK,
sclk_min,
- sclk_max);
+ sclk_max,
+ false);
if (ret)
return ret;
@@ -1438,7 +1448,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_FCLK,
fclk_min,
- fclk_max);
+ fclk_max,
+ false);
if (ret)
return ret;
}
@@ -1447,7 +1458,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_SOCCLK,
socclk_min,
- socclk_max);
+ socclk_max,
+ false);
if (ret)
return ret;
}
@@ -1456,7 +1468,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_VCLK,
vclk_min,
- vclk_max);
+ vclk_max,
+ false);
if (ret)
return ret;
}
@@ -1465,7 +1478,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_VCLK1,
vclk1_min,
- vclk1_max);
+ vclk1_max,
+ false);
if (ret)
return ret;
}
@@ -1474,7 +1488,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_DCLK,
dclk_min,
- dclk_max);
+ dclk_max,
+ false);
if (ret)
return ret;
}
@@ -1483,7 +1498,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_DCLK1,
dclk1_min,
- dclk1_max);
+ dclk1_max,
+ false);
if (ret)
return ret;
}
@@ -1533,6 +1549,14 @@ static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu,
0, NULL);
}
+static int smu_v14_0_0_set_isp_enable(struct smu_context *smu,
+ bool enable)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpIspByTile : SMU_MSG_PowerDownIspByTile,
+ ISP_ALL_TILES_MASK, NULL);
+}
+
static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
bool enable)
{
@@ -1662,6 +1686,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.gfx_off_control = smu_v14_0_gfx_off_control,
.mode2_reset = smu_v14_0_0_mode2_reset,
.get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v14_0_0_set_soft_freq_limited_range,
.od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
.print_clk_levels = smu_v14_0_0_print_clk_levels,
.force_clk_levels = smu_v14_0_0_force_clk_levels,
@@ -1669,6 +1694,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters,
.set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
+ .dpm_set_isp_enable = smu_v14_0_0_set_isp_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
.get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
.set_mall_enable = smu_v14_0_common_set_mall_enable,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 82c2db972491..3aea32baea3d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -502,8 +502,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
struct smu_14_0_dpm_table *dpm_table;
- struct smu_14_0_pcie_table *pcie_table;
- uint32_t link_level;
int ret = 0;
/* socclk dpm table setup */
@@ -619,27 +617,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- pcie_table = &dpm_context->dpm_tables.pcie_table;
- pcie_table->num_of_link_levels = 0;
- for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
- if (!skutable->PcieGenSpeed[link_level] &&
- !skutable->PcieLaneCount[link_level] &&
- !skutable->LclkFreq[link_level])
- continue;
-
- pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
- skutable->PcieGenSpeed[link_level];
- pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
- skutable->PcieLaneCount[link_level];
- pcie_table->clk_freq[pcie_table->num_of_link_levels] =
- skutable->LclkFreq[link_level];
- pcie_table->num_of_link_levels++;
-
- if (link_level == 0)
- link_level++;
- }
-
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
@@ -1487,10 +1464,31 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_14_0_pcie_table *pcie_table =
&dpm_context->dpm_tables.pcie_table;
- int num_of_levels = pcie_table->num_of_link_levels;
+ int num_of_levels;
uint32_t smu_pcie_arg;
- int ret, i;
+ uint32_t link_level;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ int ret = 0;
+ int i;
+
+ pcie_table->num_of_link_levels = 0;
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+ num_of_levels = pcie_table->num_of_link_levels;
if (!num_of_levels)
return 0;
@@ -1505,30 +1503,40 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
}
} else {
for (i = 0; i < num_of_levels; i++) {
- if (pcie_table->pcie_gen[i] > pcie_gen_cap)
- pcie_table->pcie_gen[i] = pcie_gen_cap;
- if (pcie_table->pcie_lane[i] > pcie_width_cap)
- pcie_table->pcie_lane[i] = pcie_width_cap;
- }
- }
-
- for (i = 0; i < num_of_levels; i++) {
- smu_pcie_arg = i << 16;
- smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
- smu_pcie_arg |= pcie_table->pcie_lane[i];
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
+ pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
+ pcie_gen_cap : pcie_table->pcie_gen[i];
+ pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
+ pcie_width_cap : pcie_table->pcie_lane[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
smu_pcie_arg,
NULL);
- if (ret)
- return ret;
+ if (ret)
+ break;
+ }
+ }
}
- return 0;
+ return ret;
}
static const struct smu_temperature_range smu14_thermal_policy[] = {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 7eaf58fd7f9a..59f9abd0f7b8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -86,6 +86,7 @@ static void smu_cmn_read_arg(struct smu_context *smu,
#define SMU_RESP_BUSY_OTHER 0xFC
#define SMU_RESP_DEBUG_END 0xFB
+#define SMU_RESP_UNEXP (~0U)
/**
* __smu_cmn_poll_stat -- poll for a status from the SMU
* @smu: a pointer to SMU context
@@ -171,6 +172,15 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
dev_err_ratelimited(adev->dev,
"SMU: I'm debugging!");
break;
+ case SMU_RESP_UNEXP:
+ if (amdgpu_device_bus_status_check(smu->adev)) {
+ /* print error immediately if device is off the bus */
+ dev_err(adev->dev,
+ "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
+ reg_c2pmsg_90, msg_index, param, message);
+ break;
+ }
+ fallthrough;
default:
dev_err_ratelimited(adev->dev,
"SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 7473672abd2a..a608cdbdada4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -40,28 +40,29 @@
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
-#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \
- do { \
- typecheck(struct gpu_metrics_v##frev##_##crev, \
- typeof(*(ptr))); \
- struct metrics_table_header *header = \
- (struct metrics_table_header *)(ptr); \
- memset(header, 0xFF, sizeof(*(ptr))); \
- header->format_revision = frev; \
- header->content_revision = crev; \
- header->structure_size = sizeof(*(ptr)); \
+#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \
+ do { \
+ typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \
+ struct gpu_metrics_v##frev##_##crev *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = frev; \
+ header->content_revision = crev; \
+ header->structure_size = sizeof(*tmp); \
} while (0)
-#define smu_cmn_init_partition_metrics(ptr, frev, crev) \
- do { \
- typecheck(struct amdgpu_partition_metrics_v##frev##_##crev, \
- typeof(*(ptr))); \
- struct metrics_table_header *header = \
- (struct metrics_table_header *)(ptr); \
- memset(header, 0xFF, sizeof(*(ptr))); \
- header->format_revision = frev; \
- header->content_revision = crev; \
- header->structure_size = sizeof(*(ptr)); \
+#define smu_cmn_init_partition_metrics(ptr, fr, cr) \
+ do { \
+ typecheck(struct amdgpu_partition_metrics_v##fr##_##cr *, \
+ (ptr)); \
+ struct amdgpu_partition_metrics_v##fr##_##cr *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = fr; \
+ header->content_revision = cr; \
+ header->structure_size = sizeof(*tmp); \
} while (0)
extern const int link_speed[];
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index df5da5a44755..901f938aefe0 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -157,6 +157,7 @@ komeda_fb_none_afbc_size_check(struct komeda_dev *mdev, struct komeda_fb *kfb,
struct drm_framebuffer *
komeda_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct komeda_dev *mdev = dev->dev_private;
@@ -177,7 +178,7 @@ komeda_fb_create(struct drm_device *dev, struct drm_file *file,
return ERR_PTR(-EINVAL);
}
- drm_helper_mode_fill_fb_struct(dev, &kfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &kfb->base, info, mode_cmd);
if (kfb->base.modifier)
ret = komeda_fb_afbc_size_check(kfb, file, mode_cmd);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
index c61ca98a3a63..02b2b8ae482a 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
@@ -37,6 +37,7 @@ struct komeda_fb {
struct drm_framebuffer *
komeda_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
int komeda_fb_check_src_coords(const struct komeda_fb *kfb,
u32 src_x, u32 src_y, u32 src_w, u32 src_h);
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 3cfefadc7c9d..806da0aaedf7 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -11,8 +11,8 @@
#include <linux/clk.h>
#include <linux/of_graph.h>
-#include <linux/platform_data/simplefb.h>
+#include <video/pixel_format.h>
#include <video/videomode.h>
#include <drm/drm_atomic.h>
@@ -73,7 +73,17 @@ static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
.disable_vblank = hdlcd_crtc_disable_vblank,
};
-static struct simplefb_format supported_formats[] = SIMPLEFB_FORMATS;
+static const struct {
+ u32 fourcc;
+ struct pixel_format pixel;
+} supported_formats[] = {
+ { DRM_FORMAT_RGB565, PIXEL_FORMAT_RGB565 },
+ { DRM_FORMAT_XRGB1555, PIXEL_FORMAT_XRGB1555 },
+ { DRM_FORMAT_RGB888, PIXEL_FORMAT_RGB888 },
+ { DRM_FORMAT_XRGB8888, PIXEL_FORMAT_XRGB8888 },
+ { DRM_FORMAT_XBGR8888, PIXEL_FORMAT_XBGR8888 },
+ { DRM_FORMAT_XRGB2101010, PIXEL_FORMAT_XRGB2101010},
+};
/*
* Setup the HDLCD registers for decoding the pixels out of the framebuffer
@@ -83,15 +93,12 @@ static int hdlcd_set_pxl_fmt(struct drm_crtc *crtc)
unsigned int btpp;
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
const struct drm_framebuffer *fb = crtc->primary->state->fb;
- uint32_t pixel_format;
- struct simplefb_format *format = NULL;
+ const struct pixel_format *format = NULL;
int i;
- pixel_format = fb->format->format;
-
for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
- if (supported_formats[i].fourcc == pixel_format)
- format = &supported_formats[i];
+ if (supported_formats[i].fourcc == fb->format->format)
+ format = &supported_formats[i].pixel;
}
if (WARN_ON(!format))
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index e083021e9e99..bc5f5e9798c3 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -306,10 +306,10 @@ malidp_verify_afbc_framebuffer_caps(struct drm_device *dev,
static bool
malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
int n_superblocks = 0;
- const struct drm_format_info *info;
struct drm_gem_object *objs = NULL;
u32 afbc_superblock_size = 0, afbc_superblock_height = 0;
u32 afbc_superblock_width = 0, afbc_size = 0;
@@ -325,8 +325,6 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
return false;
}
- info = drm_get_format_info(dev, mode_cmd);
-
n_superblocks = (mode_cmd->width / afbc_superblock_width) *
(mode_cmd->height / afbc_superblock_height);
@@ -366,24 +364,26 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
static bool
malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
if (malidp_verify_afbc_framebuffer_caps(dev, mode_cmd))
- return malidp_verify_afbc_framebuffer_size(dev, file, mode_cmd);
+ return malidp_verify_afbc_framebuffer_size(dev, file, info, mode_cmd);
return false;
}
static struct drm_framebuffer *
malidp_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
if (mode_cmd->modifier[0]) {
- if (!malidp_verify_afbc_framebuffer(dev, file, mode_cmd))
+ if (!malidp_verify_afbc_framebuffer(dev, file, info, mode_cmd))
return ERR_PTR(-EINVAL);
}
- return drm_gem_fb_create(dev, file, mode_cmd);
+ return drm_gem_fb_create(dev, file, info, mode_cmd);
}
static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index cf2e88218dc0..aa4289127086 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -18,7 +18,9 @@ static const struct drm_framebuffer_funcs armada_fb_funcs = {
};
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
+ const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode,
+ struct armada_gem_object *obj)
{
struct armada_framebuffer *dfb;
uint8_t format, config;
@@ -64,7 +66,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
dfb->mod = config;
dfb->fb.obj[0] = &obj->obj;
- drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode);
+ drm_helper_mode_fill_fb_struct(dev, &dfb->fb, info, mode);
ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
if (ret) {
@@ -84,9 +86,9 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
}
struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
- struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode)
+ struct drm_file *dfile, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode)
{
- const struct drm_format_info *info = drm_get_format_info(dev, mode);
struct armada_gem_object *obj;
struct armada_framebuffer *dfb;
int ret;
@@ -122,7 +124,7 @@ struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
goto err_unref;
}
- dfb = armada_framebuffer_create(dev, mode, obj);
+ dfb = armada_framebuffer_create(dev, info, mode, obj);
if (IS_ERR(dfb)) {
ret = PTR_ERR(dfb);
goto err;
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
index c5bc53d7e0c4..f2b990f055a2 100644
--- a/drivers/gpu/drm/armada/armada_fb.h
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -17,7 +17,9 @@ struct armada_framebuffer {
#define drm_fb_obj(fb) drm_to_armada_gem((fb)->obj[0])
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
- struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode);
+ struct drm_file *dfile, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode);
#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 6ee7ce04ee71..cb53cc91bafb 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -78,7 +78,10 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
return -ENOMEM;
}
- dfb = armada_framebuffer_create(dev, &mode, obj);
+ dfb = armada_framebuffer_create(dev,
+ drm_get_format_info(dev, mode.pixel_format,
+ mode.modifier[0]),
+ &mode, obj);
/*
* A reference is now held by the framebuffer object if
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 8d09ba5d5889..2547613155da 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -4,6 +4,11 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ast-y := \
+ ast_2000.o \
+ ast_2100.o \
+ ast_2300.o \
+ ast_2500.o \
+ ast_2600.o \
ast_cursor.o \
ast_ddc.o \
ast_dp501.o \
diff --git a/drivers/gpu/drm/ast/ast_2000.c b/drivers/gpu/drm/ast/ast_2000.c
new file mode 100644
index 000000000000..41c2aa1e425a
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2000.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+void ast_2000_set_def_ext_reg(struct ast_device *ast)
+{
+ static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
+ u8 i, index, reg;
+ const u8 *ext_reg_info;
+
+ /* reset scratch */
+ for (i = 0x81; i <= 0x9f; i++)
+ ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00);
+
+ ext_reg_info = extreginfo;
+ index = 0xa0;
+ while (*ext_reg_info != 0xff) {
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info);
+ index++;
+ ext_reg_info++;
+ }
+
+ /* disable standard IO/MEM decode if secondary */
+ /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */
+
+ /* Set Ext. Default */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01);
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00);
+
+ /* Enable RAMDAC for A1 */
+ reg = 0x04;
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
+}
+
+static const struct ast_dramstruct ast2000_dram_table_data[] = {
+ { 0x0108, 0x00000000 },
+ { 0x0120, 0x00004a21 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0000, 0xFFFFFFFF },
+ AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000089),
+ { 0x0008, 0x22331353 },
+ { 0x000C, 0x0d07000b },
+ { 0x0010, 0x11113333 },
+ { 0x0020, 0x00110350 },
+ { 0x0028, 0x1e0828f0 },
+ { 0x0024, 0x00000001 },
+ { 0x001C, 0x00000000 },
+ { 0x0014, 0x00000003 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0018, 0x00000131 },
+ { 0x0014, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0018, 0x00000031 },
+ { 0x0014, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0028, 0x1e0828f1 },
+ { 0x0024, 0x00000003 },
+ { 0x002C, 0x1f0f28fb },
+ { 0x0030, 0xFFFFFE01 },
+ AST_DRAMSTRUCT_INVALID,
+};
+
+static void ast_post_chip_2000(struct ast_device *ast)
+{
+ u8 j;
+ u32 temp, i;
+ const struct ast_dramstruct *dram_reg_info;
+
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+
+ if ((j & 0x80) == 0) { /* VGA only */
+ dram_reg_info = ast2000_dram_table_data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x10100, 0xa8);
+
+ do {
+ ;
+ } while (ast_read32(ast, 0x10100) != 0xa8);
+
+ while (!AST_DRAMSTRUCT_IS(dram_reg_info, INVALID)) {
+ if (AST_DRAMSTRUCT_IS(dram_reg_info, UDELAY)) {
+ for (i = 0; i < 15; i++)
+ udelay(dram_reg_info->data);
+ } else {
+ ast_write32(ast, 0x10000 + dram_reg_info->index,
+ dram_reg_info->data);
+ }
+ dram_reg_info++;
+ }
+
+ temp = ast_read32(ast, 0x10140);
+ ast_write32(ast, 0x10140, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((j & 0x40) == 0);
+}
+
+int ast_2000_post(struct ast_device *ast)
+{
+ ast_2000_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2000(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_2100.c b/drivers/gpu/drm/ast/ast_2100.c
new file mode 100644
index 000000000000..477ee15eff5d
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2100.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+static const struct ast_dramstruct ast1100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x000041f0 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00050000 },
+ AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000585),
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x22201724 },
+ { 0x0018, 0x1e29011a },
+ { 0x0020, 0x00c82222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x032aa02a },
+ { 0x0064, 0x002d3000 },
+ { 0x0068, 0x00000000 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x002C, 0x00000732 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000632 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00004c41 },
+ AST_DRAMSTRUCT_INVALID,
+};
+
+static const struct ast_dramstruct ast2100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x00004120 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00070000 },
+ AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000489),
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x32302926 },
+ { 0x0018, 0x274c0122 },
+ { 0x0020, 0x00ce2222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x0f2aa02a },
+ { 0x0064, 0x003f3005 },
+ { 0x0068, 0x02020202 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ AST_DRAMSTRUCT_UDELAY(67u),
+ { 0x002C, 0x00000942 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000842 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00005061 },
+ AST_DRAMSTRUCT_INVALID,
+};
+
+/*
+ * AST2100/2150 DLL CBR Setting
+ */
+#define CBR_SIZE_AST2150 ((16 << 10) - 1)
+#define CBR_PASSNUM_AST2150 5
+#define CBR_THRESHOLD_AST2150 10
+#define CBR_THRESHOLD2_AST2150 10
+#define TIMEOUT_AST2150 5000000
+
+#define CBR_PATNUM_AST2150 8
+
+static const u32 pattern_AST2150[14] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0xFFFE0001,
+ 0x683501FE,
+ 0x0F1929B0,
+ 0x2D0B4346,
+ 0x60767F02,
+ 0x6FBE36A6,
+ 0x3A253035,
+ 0x3019686D,
+ 0x41C6167E,
+ 0x620152BF,
+ 0x20F050E0
+};
+
+static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+
+static int cbrtest_ast2150(struct ast_device *ast)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ if (mmctestburst2_ast2150(ast, i))
+ return 0;
+ return 1;
+}
+
+static int cbrscan_ast2150(struct ast_device *ast, int busw)
+{
+ u32 patcnt, loop;
+
+ for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
+ if (cbrtest_ast2150(ast))
+ break;
+ }
+ if (loop == CBR_PASSNUM_AST2150)
+ return 0;
+ }
+ return 1;
+}
+
+static void cbrdlli_ast2150(struct ast_device *ast, int busw)
+{
+ u32 dll_min[4], dll_max[4], dlli, data, passcnt;
+
+cbr_start:
+ dll_min[0] = 0xff;
+ dll_min[1] = 0xff;
+ dll_min[2] = 0xff;
+ dll_min[3] = 0xff;
+ dll_max[0] = 0x00;
+ dll_max[1] = 0x00;
+ dll_max[2] = 0x00;
+ dll_max[3] = 0x00;
+ passcnt = 0;
+
+ for (dlli = 0; dlli < 100; dlli++) {
+ ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+ data = cbrscan_ast2150(ast, busw);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dll_min[0] > dlli)
+ dll_min[0] = dlli;
+ if (dll_max[0] < dlli)
+ dll_max[0] = dlli;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD_AST2150) {
+ goto cbr_start;
+ }
+ }
+ if (dll_max[0] == 0 || (dll_max[0] - dll_min[0]) < CBR_THRESHOLD_AST2150)
+ goto cbr_start;
+
+ dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
+ ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+}
+
+static void ast_post_chip_2100(struct ast_device *ast)
+{
+ u8 j;
+ u32 data, temp, i;
+ const struct ast_dramstruct *dram_reg_info;
+
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+
+ if ((j & 0x80) == 0) { /* VGA only */
+ if (ast->chip == AST2100 || ast->chip == AST2200)
+ dram_reg_info = ast2100_dram_table_data;
+ else
+ dram_reg_info = ast1100_dram_table_data;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688A8A8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x01);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x01);
+
+ while (!AST_DRAMSTRUCT_IS(dram_reg_info, INVALID)) {
+ if (AST_DRAMSTRUCT_IS(dram_reg_info, UDELAY)) {
+ for (i = 0; i < 15; i++)
+ udelay(dram_reg_info->data);
+ } else if (AST_DRAMSTRUCT_IS(dram_reg_info, DRAM_TYPE)) {
+ data = dram_reg_info->data;
+ if (ast->dram_type == AST_DRAM_1Gx16)
+ data = 0x00000d89;
+ else if (ast->dram_type == AST_DRAM_1Gx32)
+ data = 0x00000c8d;
+
+ temp = ast_read32(ast, 0x12070);
+ temp &= 0xc;
+ temp <<= 2;
+ ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
+ } else {
+ ast_write32(ast, 0x10000 + dram_reg_info->index,
+ dram_reg_info->data);
+ }
+ dram_reg_info++;
+ }
+
+ /* AST 2100/2150 DRAM calibration */
+ data = ast_read32(ast, 0x10120);
+ if (data == 0x5061) { /* 266Mhz */
+ data = ast_read32(ast, 0x10004);
+ if (data & 0x40)
+ cbrdlli_ast2150(ast, 16); /* 16 bits */
+ else
+ cbrdlli_ast2150(ast, 32); /* 32 bits */
+ }
+
+ temp = ast_read32(ast, 0x1200c);
+ ast_write32(ast, 0x1200c, temp & 0xfffffffd);
+ temp = ast_read32(ast, 0x12040);
+ ast_write32(ast, 0x12040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((j & 0x40) == 0);
+}
+
+int ast_2100_post(struct ast_device *ast)
+{
+ ast_2000_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2100(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_2300.c b/drivers/gpu/drm/ast/ast_2300.c
new file mode 100644
index 000000000000..dc2a32244689
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2300.c
@@ -0,0 +1,1328 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+void ast_2300_set_def_ext_reg(struct ast_device *ast)
+{
+ static const u8 extreginfo[] = { 0x0f, 0x04, 0x1f, 0xff };
+ u8 i, index, reg;
+ const u8 *ext_reg_info;
+
+ /* reset scratch */
+ for (i = 0x81; i <= 0x9f; i++)
+ ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00);
+
+ ext_reg_info = extreginfo;
+ index = 0xa0;
+ while (*ext_reg_info != 0xff) {
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info);
+ index++;
+ ext_reg_info++;
+ }
+
+ /* disable standard IO/MEM decode if secondary */
+ /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */
+
+ /* Set Ext. Default */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01);
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00);
+
+ /* Enable RAMDAC for A1 */
+ reg = 0x04;
+ reg |= 0x20;
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
+}
+
+/* AST 2300 DRAM settings */
+#define AST_DDR3 0
+#define AST_DDR2 1
+
+struct ast2300_dram_param {
+ u32 dram_type;
+ u32 dram_chipid;
+ u32 dram_freq;
+ u32 vram_size;
+ u32 odt;
+ u32 wodt;
+ u32 rodt;
+ u32 dram_config;
+ u32 reg_PERIOD;
+ u32 reg_MADJ;
+ u32 reg_SADJ;
+ u32 reg_MRS;
+ u32 reg_EMRS;
+ u32 reg_AC1;
+ u32 reg_AC2;
+ u32 reg_DQSIC;
+ u32 reg_DRV;
+ u32 reg_IOZ;
+ u32 reg_DQIDLY;
+ u32 reg_FREQ;
+ u32 madj_max;
+ u32 dll2_finetune_step;
+};
+
+/*
+ * DQSI DLL CBR Setting
+ */
+#define CBR_SIZE0 ((1 << 10) - 1)
+#define CBR_SIZE1 ((4 << 10) - 1)
+#define CBR_SIZE2 ((64 << 10) - 1)
+#define CBR_PASSNUM 5
+#define CBR_PASSNUM2 5
+#define CBR_THRESHOLD 10
+#define CBR_THRESHOLD2 10
+#define TIMEOUT 5000000
+#define CBR_PATNUM 8
+
+static const u32 pattern[8] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0x88778877,
+ 0x92CC4D6E,
+ 0x543D3CDE,
+ 0xF1E843C7,
+ 0x7C61D253
+};
+
+static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl)
+{
+ u32 data, timeout;
+
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl);
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x1000;
+ if (++timeout > TIMEOUT) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = ast_mindwm(ast, 0x1e6e0078);
+ data = (data | (data >> 16)) & 0xffff;
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+
+static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test2(ast, datagen, 0x41);
+}
+
+static bool mmc_test_single(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test(ast, datagen, 0xc5);
+}
+
+static u32 mmc_test_single2(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test2(ast, datagen, 0x05);
+}
+
+static int cbr_test(struct ast_device *ast)
+{
+ u32 data;
+ int i;
+
+ data = mmc_test_single2(ast, 0);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ for (i = 0; i < 8; i++) {
+ data = mmc_test_burst2(ast, i);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ }
+ if (!data)
+ return 3;
+ else if (data & 0xff)
+ return 2;
+ return 1;
+}
+
+static int cbr_scan(struct ast_device *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 3;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ data = cbr_test(ast);
+ if (data != 0) {
+ data2 &= data;
+ if (!data2)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+static u32 cbr_test2(struct ast_device *ast)
+{
+ u32 data;
+
+ data = mmc_test_burst2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+ data |= mmc_test_single2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+
+ return ~data & 0xffff;
+}
+
+static u32 cbr_scan2(struct ast_device *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 0xffff;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ data = cbr_test2(ast);
+ if (data != 0) {
+ data2 &= data;
+ if (!data2)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+static bool cbr_test3(struct ast_device *ast)
+{
+ if (!mmc_test_burst(ast, 0))
+ return false;
+ if (!mmc_test_single(ast, 0))
+ return false;
+ return true;
+}
+
+static bool cbr_scan3(struct ast_device *ast)
+{
+ u32 patcnt, loop;
+
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < 2; loop++) {
+ if (cbr_test3(ast))
+ break;
+ }
+ if (loop == 2)
+ return false;
+ }
+ return true;
+}
+
+static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
+ bool status = false;
+FINETUNE_START:
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli)
+ dllmin[cnt] = dlli;
+ if (dllmax[cnt] < dlli)
+ dllmax[cnt] = dlli;
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD2) {
+ break;
+ }
+ }
+ gold_sadj[0] = 0x0;
+ passcnt = 0;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) &&
+ ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ gold_sadj[0] += dllmin[cnt];
+ passcnt++;
+ }
+ }
+ if (retry++ > 10)
+ goto FINETUNE_DONE;
+ if (passcnt != 16)
+ goto FINETUNE_START;
+ status = true;
+FINETUNE_DONE:
+ gold_sadj[0] = gold_sadj[0] >> 4;
+ gold_sadj[1] = gold_sadj[0];
+
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) &&
+ ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[0] >= dlli) {
+ dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
+ if (dlli > 3)
+ dlli = 3;
+ } else {
+ dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
+ if (dlli > 4)
+ dlli = 4;
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ ast_moutdwm(ast, 0x1E6E0080, data);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) &&
+ ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[1] >= dlli) {
+ dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
+ if (dlli > 3)
+ dlli = 3;
+ else
+ dlli = (dlli - 1) & 0x7;
+ } else {
+ dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
+ dlli += 1;
+ if (dlli > 4)
+ dlli = 4;
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ ast_moutdwm(ast, 0x1E6E0084, data);
+ return status;
+} /* finetuneDQI_L */
+
+static void finetuneDQSI(struct ast_device *ast)
+{
+ u32 dlli, dqsip, dqidly;
+ u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
+ u32 g_dqidly, g_dqsip, g_margin, g_side;
+ u16 pass[32][2][2];
+ char tag[2][76];
+
+ /* Disable DQI CBR */
+ reg_mcr0c = ast_mindwm(ast, 0x1E6E000C);
+ reg_mcr18 = ast_mindwm(ast, 0x1E6E0018);
+ reg_mcr18 &= 0x0000ffff;
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
+
+ for (dlli = 0; dlli < 76; dlli++) {
+ tag[0][dlli] = 0x0;
+ tag[1][dlli] = 0x0;
+ }
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ pass[dqidly][0][0] = 0xff;
+ pass[dqidly][0][1] = 0x0;
+ pass[dqidly][1][0] = 0xff;
+ pass[dqidly][1][1] = 0x0;
+ }
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ passcnt[0] = 0;
+ passcnt[1] = 0;
+ for (dqsip = 0; dqsip < 2; dqsip++) {
+ ast_moutdwm(ast, 0x1E6E000C, 0);
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23));
+ ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c);
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068,
+ 0x00001300 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0070, 0);
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0);
+ if (cbr_scan3(ast)) {
+ if (dlli == 0)
+ break;
+ passcnt[dqsip]++;
+ tag[dqsip][dlli] = 'P';
+ if (dlli < pass[dqidly][dqsip][0])
+ pass[dqidly][dqsip][0] = (u16)dlli;
+ if (dlli > pass[dqidly][dqsip][1])
+ pass[dqidly][dqsip][1] = (u16)dlli;
+ } else if (passcnt[dqsip] >= 5) {
+ break;
+ } else {
+ pass[dqidly][dqsip][0] = 0xff;
+ pass[dqidly][dqsip][1] = 0x0;
+ }
+ }
+ }
+ if (passcnt[0] == 0 && passcnt[1] == 0)
+ dqidly++;
+ }
+ /* Search margin */
+ g_dqidly = 0;
+ g_dqsip = 0;
+ g_margin = 0;
+ g_side = 0;
+
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ for (dqsip = 0; dqsip < 2; dqsip++) {
+ if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1])
+ continue;
+ diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0];
+ if ((diff + 2) < g_margin)
+ continue;
+ passcnt[0] = 0;
+ passcnt[1] = 0;
+ for (dlli = pass[dqidly][dqsip][0];
+ dlli > 0 && tag[dqsip][dlli] != 0;
+ dlli--, passcnt[0]++) {
+ }
+ for (dlli = pass[dqidly][dqsip][1];
+ dlli < 76 && tag[dqsip][dlli] != 0;
+ dlli++, passcnt[1]++) {
+ }
+ if (passcnt[0] > passcnt[1])
+ passcnt[0] = passcnt[1];
+ passcnt[1] = 0;
+ if (passcnt[0] > g_side)
+ passcnt[1] = passcnt[0] - g_side;
+ if (diff > (g_margin + 1) && (passcnt[1] > 0 || passcnt[0] > 8)) {
+ g_margin = diff;
+ g_dqidly = dqidly;
+ g_dqsip = dqsip;
+ g_side = passcnt[0];
+ } else if (passcnt[1] > 1 && g_side < 8) {
+ if (diff > g_margin)
+ g_margin = diff;
+ g_dqidly = dqidly;
+ g_dqsip = dqsip;
+ g_side = passcnt[0];
+ }
+ }
+ }
+ reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23);
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
+}
+
+static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
+ bool status = false;
+
+ finetuneDQSI(ast);
+ if (finetuneDQI_L(ast, param) == false)
+ return status;
+
+CBR_START2:
+ dllmin[0] = 0xff;
+ dllmin[1] = 0xff;
+ dllmax[0] = 0x0;
+ dllmax[1] = 0x0;
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+ data = cbr_scan(ast);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dllmin[0] > dlli)
+ dllmin[0] = dlli;
+ if (dllmax[0] < dlli)
+ dllmax[0] = dlli;
+ }
+ if (data & 0x2) {
+ if (dllmin[1] > dlli)
+ dllmin[1] = dlli;
+ if (dllmax[1] < dlli)
+ dllmax[1] = dlli;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD) {
+ break;
+ }
+ }
+ if (retry++ > 10)
+ goto CBR_DONE2;
+ if (dllmax[0] == 0 || (dllmax[0] - dllmin[0]) < CBR_THRESHOLD)
+ goto CBR_START2;
+ if (dllmax[1] == 0 || (dllmax[1] - dllmin[1]) < CBR_THRESHOLD)
+ goto CBR_START2;
+ status = true;
+CBR_DONE2:
+ dlli = (dllmin[1] + dllmax[1]) >> 1;
+ dlli <<= 8;
+ dlli += (dllmin[0] + dllmax[0]) >> 1;
+ ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16));
+ return status;
+} /* CBRDLL2 */
+
+static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = 0x00020000 + (trap << 16);
+ trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
+ trap_MRS = 0x00000010 + (trap << 4);
+ trap_MRS |= ((trap & 0x2) << 18);
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 336:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 0;
+ param->reg_AC1 = 0x22202725;
+ param->reg_AC2 = 0xAA007613 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x04001400 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xAA007613 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xAA00761C | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xAA007636 | trap_AC2;
+ break;
+ }
+ break;
+ default:
+ case 396:
+ ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x00005040;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+ break;
+
+ case 408:
+ ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xCD44961A;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00081830;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 4;
+ break;
+ case 504:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0270);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xDE44A61D;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x070000BB;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 4;
+ break;
+ case 528:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0290);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xEF44B61E;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000088;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302A37;
+ param->reg_AC2 = 0xEF56B61E;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 136;
+ param->dll2_finetune_step = 3;
+ break;
+ case 600:
+ ast_moutdwm(ast, 0x1E6E2020, 0x02E1);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xDF56B61F;
+ param->reg_DQSIC = 0x0000014D;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000058C0;
+ param->madj_max = 132;
+ param->dll2_finetune_step = 3;
+ break;
+ case 624:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0160);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xEF56B621;
+ param->reg_DQSIC = 0x0000015A;
+ param->reg_MRS = 0x02101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000059C0;
+ param->madj_max = 128;
+ param->dll2_finetune_step = 3;
+ break;
+ } /* switch freq */
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x130;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x131;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x132;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x133;
+ break;
+ } /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case SZ_8M:
+ param->dram_config |= 0x00;
+ break;
+ case SZ_16M:
+ param->dram_config |= 0x04;
+ break;
+ case SZ_32M:
+ param->dram_config |= 0x08;
+ break;
+ case SZ_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+}
+
+static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2, retry = 0;
+
+ddr3_init_start:
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
+ ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000000);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
+ ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
+ ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ ast_moutdwm(ast, 0x1E6E0018, 0x4000A170);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00002370);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0040, 0xFF444444);
+ ast_moutdwm(ast, 0x1E6E0044, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E004C, 0x00000002);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0054, 0);
+ ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max)
+ break;
+ ast_moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000)
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ else
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ ast_moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff);
+ data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000040);
+ udelay(50);
+ /* Mode Register Setting */
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
+ data = 0;
+ if (param->wodt)
+ data = 0x300;
+ if (param->rodt)
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
+
+ /* Calibrate the DQSI delay */
+ if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
+ goto ddr3_init_start;
+
+ ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+ /* ECC Memory Initialization */
+#ifdef ECC
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = ast_mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+}
+
+static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = (trap << 20) | (trap << 16);
+ trap_AC2 += 0x00110000;
+ trap_MRS = 0x00000040 | (trap << 4);
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 264:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0130);
+ param->wodt = 0;
+ param->reg_AC1 = 0x11101513;
+ param->reg_AC2 = 0x78117011;
+ param->reg_DQSIC = 0x00000092;
+ param->reg_MRS = 0x00000842;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x000000F0;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x0000005A;
+ param->reg_FREQ = 0x00004AC0;
+ param->madj_max = 138;
+ param->dll2_finetune_step = 3;
+ break;
+ case 336:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 1;
+ param->reg_AC1 = 0x22202613;
+ param->reg_AC2 = 0xAA009016 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x00000A02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xAA009012 | trap_AC2;
+ break;
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xAA009016 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xAA009023 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xAA00903B | trap_AC2;
+ break;
+ }
+ break;
+ default:
+ case 396:
+ ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x00005040;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+
+ case 408:
+ ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xCD44B01E;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 3;
+ break;
+ case 504:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0261);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xDE44C022;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 3;
+ break;
+ case 528:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0120);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xEF44D024;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F9;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A7;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 552:
+ ast_moutdwm(ast, 0x1E6E2020, 0x02A1);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E025;
+ param->reg_DQSIC = 0x00000132;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000AD;
+ param->reg_FREQ = 0x000056C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E027;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000B3;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ }
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x100;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x121;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x122;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x123;
+ break;
+ } /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case SZ_8M:
+ param->dram_config |= 0x00;
+ break;
+ case SZ_16M:
+ param->dram_config |= 0x04;
+ break;
+ case SZ_32M:
+ param->dram_config |= 0x08;
+ break;
+ case SZ_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+}
+
+static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2, retry = 0;
+
+ddr2_init_start:
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
+ ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
+ ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
+ ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ ast_moutdwm(ast, 0x1E6E0018, 0x4000A130);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00002330);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0040, 0xFF808000);
+ ast_moutdwm(ast, 0x1E6E0044, 0x88848466);
+ ast_moutdwm(ast, 0x1E6E0048, 0x44440008);
+ ast_moutdwm(ast, 0x1E6E004C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0054, 0);
+ ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max)
+ break;
+ ast_moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000)
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ else
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ ast_moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff);
+ data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ udelay(50);
+ /* Mode Register Setting */
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+ data = 0;
+ if (param->wodt)
+ data = 0x500;
+ if (param->rodt)
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
+ ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+
+ /* Calibrate the DQSI delay */
+ if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
+ goto ddr2_init_start;
+
+ /* ECC Memory Initialization */
+#ifdef ECC
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = ast_mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+}
+
+static void ast_post_chip_2300(struct ast_device *ast)
+{
+ struct ast2300_dram_param param;
+ u32 temp;
+ u8 reg;
+
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ if ((reg & 0x80) == 0) {/* vga only */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688a8a8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x1);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x1);
+
+ /* Slow down CPU/AHB CLK in VGA only mode */
+ temp = ast_read32(ast, 0x12008);
+ temp |= 0x73;
+ ast_write32(ast, 0x12008, temp);
+
+ param.dram_freq = 396;
+ param.dram_type = AST_DDR3;
+ temp = ast_mindwm(ast, 0x1e6e2070);
+ if (temp & 0x01000000)
+ param.dram_type = AST_DDR2;
+ switch (temp & 0x18000000) {
+ case 0:
+ param.dram_chipid = AST_DRAM_512Mx16;
+ break;
+ default:
+ case 0x08000000:
+ param.dram_chipid = AST_DRAM_1Gx16;
+ break;
+ case 0x10000000:
+ param.dram_chipid = AST_DRAM_2Gx16;
+ break;
+ case 0x18000000:
+ param.dram_chipid = AST_DRAM_4Gx16;
+ break;
+ }
+ switch (temp & 0x0c) {
+ default:
+ case 0x00:
+ param.vram_size = SZ_8M;
+ break;
+ case 0x04:
+ param.vram_size = SZ_16M;
+ break;
+ case 0x08:
+ param.vram_size = SZ_32M;
+ break;
+ case 0x0c:
+ param.vram_size = SZ_64M;
+ break;
+ }
+
+ if (param.dram_type == AST_DDR3) {
+ get_ddr3_info(ast, &param);
+ ddr3_init(ast, &param);
+ } else {
+ get_ddr2_info(ast, &param);
+ ddr2_init(ast, &param);
+ }
+
+ temp = ast_mindwm(ast, 0x1e6e2040);
+ ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((reg & 0x40) == 0);
+}
+
+int ast_2300_post(struct ast_device *ast)
+{
+ ast_2300_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2300(ast);
+ ast_init_3rdtx(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_2500.c b/drivers/gpu/drm/ast/ast_2500.c
new file mode 100644
index 000000000000..1e541498ea67
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2500.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_print.h>
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+/*
+ * AST2500 DRAM settings modules
+ */
+
+#define REGTBL_NUM 17
+#define REGIDX_010 0
+#define REGIDX_014 1
+#define REGIDX_018 2
+#define REGIDX_020 3
+#define REGIDX_024 4
+#define REGIDX_02C 5
+#define REGIDX_030 6
+#define REGIDX_214 7
+#define REGIDX_2E0 8
+#define REGIDX_2E4 9
+#define REGIDX_2E8 10
+#define REGIDX_2EC 11
+#define REGIDX_2F0 12
+#define REGIDX_2F4 13
+#define REGIDX_2F8 14
+#define REGIDX_RFC 15
+#define REGIDX_PLL 16
+
+static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = {
+ 0x64604D38, /* 0x010 */
+ 0x29690599, /* 0x014 */
+ 0x00000300, /* 0x018 */
+ 0x00000000, /* 0x020 */
+ 0x00000000, /* 0x024 */
+ 0x02181E70, /* 0x02C */
+ 0x00000040, /* 0x030 */
+ 0x00000024, /* 0x214 */
+ 0x02001300, /* 0x2E0 */
+ 0x0E0000A0, /* 0x2E4 */
+ 0x000E001B, /* 0x2E8 */
+ 0x35B8C105, /* 0x2EC */
+ 0x08090408, /* 0x2F0 */
+ 0x9B000800, /* 0x2F4 */
+ 0x0E400A00, /* 0x2F8 */
+ 0x9971452F, /* tRFC */
+ 0x000071C1 /* PLL */
+};
+
+static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = {
+ 0x63604E37, /* 0x010 */
+ 0xE97AFA99, /* 0x014 */
+ 0x00019000, /* 0x018 */
+ 0x08000000, /* 0x020 */
+ 0x00000400, /* 0x024 */
+ 0x00000410, /* 0x02C */
+ 0x00000101, /* 0x030 */
+ 0x00000024, /* 0x214 */
+ 0x03002900, /* 0x2E0 */
+ 0x0E0000A0, /* 0x2E4 */
+ 0x000E001C, /* 0x2E8 */
+ 0x35B8C106, /* 0x2EC */
+ 0x08080607, /* 0x2F0 */
+ 0x9B000900, /* 0x2F4 */
+ 0x0E400A00, /* 0x2F8 */
+ 0x99714545, /* tRFC */
+ 0x000071C1 /* PLL */
+};
+
+#define TIMEOUT 5000000
+
+void ast_2500_patch_ahb(void __iomem *regs)
+{
+ u32 data;
+
+ /* Clear bus lock condition */
+ __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03);
+ __ast_moutdwm(regs, 0x1e600084, 0x00010000);
+ __ast_moutdwm(regs, 0x1e600088, 0x00000000);
+ __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+
+ data = __ast_mindwm(regs, 0x1e6e2070);
+ if (data & 0x08000000) { /* check fast reset */
+ /*
+ * If "Fast restet" is enabled for ARM-ICE debugger,
+ * then WDT needs to enable, that
+ * WDT04 is WDT#1 Reload reg.
+ * WDT08 is WDT#1 counter restart reg to avoid system deadlock
+ * WDT0C is WDT#1 control reg
+ * [6:5]:= 01:Full chip
+ * [4]:= 1:1MHz clock source
+ * [1]:= 1:WDT will be cleeared and disabled after timeout occurs
+ * [0]:= 1:WDT enable
+ */
+ __ast_moutdwm(regs, 0x1E785004, 0x00000010);
+ __ast_moutdwm(regs, 0x1E785008, 0x00004755);
+ __ast_moutdwm(regs, 0x1E78500c, 0x00000033);
+ udelay(1000);
+ }
+
+ do {
+ __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+ data = __ast_mindwm(regs, 0x1e6e2000);
+ } while (data != 1);
+
+ __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */
+}
+
+static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen)
+{
+ return mmc_test(ast, datagen, 0x85);
+}
+
+static bool cbr_test_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
+ ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
+ if (!mmc_test_burst(ast, 0))
+ return false;
+ if (!mmc_test_single_2500(ast, 0))
+ return false;
+ return true;
+}
+
+static bool ddr_test_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
+ ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
+ if (!mmc_test_burst(ast, 0))
+ return false;
+ if (!mmc_test_burst(ast, 1))
+ return false;
+ if (!mmc_test_burst(ast, 2))
+ return false;
+ if (!mmc_test_burst(ast, 3))
+ return false;
+ if (!mmc_test_single_2500(ast, 0))
+ return false;
+ return true;
+}
+
+static void ddr_init_common_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
+ ast_moutdwm(ast, 0x1E6E0008, 0x2003000F);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF);
+ ast_moutdwm(ast, 0x1E6E0040, 0x88448844);
+ ast_moutdwm(ast, 0x1E6E0044, 0x24422288);
+ ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E004C, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0208, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0218, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0220, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0228, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0230, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02A8, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02B0, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0240, 0x86000000);
+ ast_moutdwm(ast, 0x1E6E0244, 0x00008600);
+ ast_moutdwm(ast, 0x1E6E0248, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E024C, 0x80808080);
+}
+
+static void ddr_phy_init_2500(struct ast_device *ast)
+{
+ u32 data, pass, timecnt;
+
+ pass = 0;
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
+ while (!pass) {
+ for (timecnt = 0; timecnt < TIMEOUT; timecnt++) {
+ data = ast_mindwm(ast, 0x1E6E0060) & 0x1;
+ if (!data)
+ break;
+ }
+ if (timecnt != TIMEOUT) {
+ data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000;
+ if (!data)
+ pass = 1;
+ }
+ if (!pass) {
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ udelay(10); /* delay 10 us */
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
+ }
+ }
+
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000006);
+}
+
+/*
+ * Check DRAM Size
+ * 1Gb : 0x80000000 ~ 0x87FFFFFF
+ * 2Gb : 0x80000000 ~ 0x8FFFFFFF
+ * 4Gb : 0x80000000 ~ 0x9FFFFFFF
+ * 8Gb : 0x80000000 ~ 0xBFFFFFFF
+ */
+static void check_dram_size_2500(struct ast_device *ast, u32 tRFC)
+{
+ u32 reg_04, reg_14;
+
+ reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc;
+ reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00;
+
+ ast_moutdwm(ast, 0xA0100000, 0x41424344);
+ ast_moutdwm(ast, 0x90100000, 0x35363738);
+ ast_moutdwm(ast, 0x88100000, 0x292A2B2C);
+ ast_moutdwm(ast, 0x80100000, 0x1D1E1F10);
+
+ /* Check 8Gbit */
+ if (ast_mindwm(ast, 0xA0100000) == 0x41424344) {
+ reg_04 |= 0x03;
+ reg_14 |= (tRFC >> 24) & 0xFF;
+ /* Check 4Gbit */
+ } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) {
+ reg_04 |= 0x02;
+ reg_14 |= (tRFC >> 16) & 0xFF;
+ /* Check 2Gbit */
+ } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) {
+ reg_04 |= 0x01;
+ reg_14 |= (tRFC >> 8) & 0xFF;
+ } else {
+ reg_14 |= tRFC & 0xFF;
+ }
+ ast_moutdwm(ast, 0x1E6E0004, reg_04);
+ ast_moutdwm(ast, 0x1E6E0014, reg_14);
+}
+
+static void enable_cache_2500(struct ast_device *ast)
+{
+ u32 reg_04, data;
+
+ reg_04 = ast_mindwm(ast, 0x1E6E0004);
+ ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000);
+
+ do
+ data = ast_mindwm(ast, 0x1E6E0004);
+ while (!(data & 0x80000));
+ ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400);
+}
+
+static void set_mpll_2500(struct ast_device *ast)
+{
+ u32 addr, data, param;
+
+ /* Reset MMC */
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
+ for (addr = 0x1e6e0004; addr < 0x1e6e0090;) {
+ ast_moutdwm(ast, addr, 0x0);
+ addr += 4;
+ }
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020000);
+
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+ data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000;
+ if (data) {
+ /* CLKIN = 25MHz */
+ param = 0x930023E0;
+ ast_moutdwm(ast, 0x1E6E2160, 0x00011320);
+ } else {
+ /* CLKIN = 24MHz */
+ param = 0x93002400;
+ }
+ ast_moutdwm(ast, 0x1E6E2020, param);
+ udelay(100);
+}
+
+static void reset_mmc_2500(struct ast_device *ast)
+{
+ ast_moutdwm(ast, 0x1E78505C, 0x00000004);
+ ast_moutdwm(ast, 0x1E785044, 0x00000001);
+ ast_moutdwm(ast, 0x1E785048, 0x00004755);
+ ast_moutdwm(ast, 0x1E78504C, 0x00000013);
+ mdelay(100);
+ ast_moutdwm(ast, 0x1E785054, 0x00000077);
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+}
+
+static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table)
+{
+ ast_moutdwm(ast, 0x1E6E0004, 0x00000303);
+ ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
+ ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
+ ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
+ ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
+ ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
+ ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
+ ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
+
+ /* DDR PHY Setting */
+ ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE);
+ ast_moutdwm(ast, 0x1E6E0204, 0x00001001);
+ ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
+ ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
+ ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
+ ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
+ ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
+ ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
+ ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
+ ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
+ ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
+ ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
+ ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00000006);
+
+ /* Controller Setting */
+ ast_moutdwm(ast, 0x1E6E0034, 0x00020091);
+
+ /* Wait DDR PHY init done */
+ ddr_phy_init_2500(ast);
+
+ ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
+ ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
+ ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
+
+ check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
+ enable_cache_2500(ast);
+ ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
+ ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
+}
+
+static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table)
+{
+ u32 data, data2, pass, retrycnt;
+ u32 ddr_vref, phy_vref;
+ u32 min_ddr_vref = 0, min_phy_vref = 0;
+ u32 max_ddr_vref = 0, max_phy_vref = 0;
+
+ ast_moutdwm(ast, 0x1E6E0004, 0x00000313);
+ ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
+ ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
+ ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
+ ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
+ ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
+ ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
+ ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
+
+ /* DDR PHY Setting */
+ ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE);
+ ast_moutdwm(ast, 0x1E6E0204, 0x09002000);
+ ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
+ ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
+ ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
+ ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
+ ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
+ ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
+ ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
+ ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
+ ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
+ ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
+ ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
+ ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C);
+ ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E);
+
+ /* Controller Setting */
+ ast_moutdwm(ast, 0x1E6E0034, 0x0001A991);
+
+ /* Train PHY Vref first */
+ pass = 0;
+
+ for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
+ max_phy_vref = 0x0;
+ pass = 0;
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06);
+ for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) {
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8));
+ /* Fire DFI Init */
+ ddr_phy_init_2500(ast);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
+ if (cbr_test_2500(ast)) {
+ pass++;
+ data = ast_mindwm(ast, 0x1E6E03D0);
+ data2 = data >> 8;
+ data = data & 0xff;
+ if (data > data2)
+ data = data2;
+ if (max_phy_vref < data) {
+ max_phy_vref = data;
+ min_phy_vref = phy_vref;
+ }
+ } else if (pass > 0) {
+ break;
+ }
+ }
+ }
+ ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8));
+
+ /* Train DDR Vref next */
+ pass = 0;
+
+ for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
+ min_ddr_vref = 0xFF;
+ max_ddr_vref = 0x0;
+ pass = 0;
+ for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) {
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
+ /* Fire DFI Init */
+ ddr_phy_init_2500(ast);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
+ if (cbr_test_2500(ast)) {
+ pass++;
+ if (min_ddr_vref > ddr_vref)
+ min_ddr_vref = ddr_vref;
+ if (max_ddr_vref < ddr_vref)
+ max_ddr_vref = ddr_vref;
+ } else if (pass != 0) {
+ break;
+ }
+ }
+ }
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
+ ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1;
+ ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
+
+ /* Wait DDR PHY init done */
+ ddr_phy_init_2500(ast);
+
+ ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
+ ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
+ ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
+
+ check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
+ enable_cache_2500(ast);
+ ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
+ ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
+}
+
+static bool ast_dram_init_2500(struct ast_device *ast)
+{
+ u32 data;
+ u32 max_tries = 5;
+
+ do {
+ if (max_tries-- == 0)
+ return false;
+ set_mpll_2500(ast);
+ reset_mmc_2500(ast);
+ ddr_init_common_2500(ast);
+
+ data = ast_mindwm(ast, 0x1E6E2070);
+ if (data & 0x01000000)
+ ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table);
+ else
+ ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table);
+ } while (!ddr_test_2500(ast));
+
+ ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41);
+
+ /* Patch code */
+ data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF;
+ ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000);
+
+ return true;
+}
+
+static void ast_post_chip_2500(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ u32 temp;
+ u8 reg;
+
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ if ((reg & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
+ /* Clear bus lock condition */
+ ast_2500_patch_ahb(ast->regs);
+
+ /* Disable watchdog */
+ ast_moutdwm(ast, 0x1E78502C, 0x00000000);
+ ast_moutdwm(ast, 0x1E78504C, 0x00000000);
+
+ /*
+ * Reset USB port to patch USB unknown device issue
+ * SCU90 is Multi-function Pin Control #5
+ * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub
+ * port).
+ * SCU94 is Multi-function Pin Control #6
+ * [14:13]:= 1x:USB2.0 Host2 controller
+ * SCU70 is Hardware Strap reg
+ * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by
+ * [18]: 0(24)/1(48) MHz)
+ * SCU7C is Write clear reg to SCU70
+ * [23]:= write 1 and then SCU70[23] will be clear as 0b.
+ */
+ ast_moutdwm(ast, 0x1E6E2090, 0x20000000);
+ ast_moutdwm(ast, 0x1E6E2094, 0x00004000);
+ if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) {
+ ast_moutdwm(ast, 0x1E6E207C, 0x00800000);
+ mdelay(100);
+ ast_moutdwm(ast, 0x1E6E2070, 0x00800000);
+ }
+ /* Modify eSPI reset pin */
+ temp = ast_mindwm(ast, 0x1E6E2070);
+ if (temp & 0x02000000)
+ ast_moutdwm(ast, 0x1E6E207C, 0x00004000);
+
+ /* Slow down CPU/AHB CLK in VGA only mode */
+ temp = ast_read32(ast, 0x12008);
+ temp |= 0x73;
+ ast_write32(ast, 0x12008, temp);
+
+ if (!ast_dram_init_2500(ast))
+ drm_err(dev, "DRAM init failed !\n");
+
+ temp = ast_mindwm(ast, 0x1e6e2040);
+ ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
+ } while ((reg & 0x40) == 0);
+}
+
+int ast_2500_post(struct ast_device *ast)
+{
+ ast_2300_set_def_ext_reg(ast);
+
+ if (ast->config_mode == ast_use_p2a) {
+ ast_post_chip_2500(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_2600.c b/drivers/gpu/drm/ast/ast_2600.c
new file mode 100644
index 000000000000..8d75a47444f5
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2600.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include "ast_drv.h"
+#include "ast_post.h"
+
+/*
+ * POST
+ */
+
+int ast_2600_post(struct ast_device *ast)
+{
+ ast_2300_set_def_ext_reg(ast);
+
+ if (ast->tx_chip == AST_TX_ASTDP)
+ return ast_dp_launch(ast);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_dram_tables.h b/drivers/gpu/drm/ast/ast_dram_tables.h
deleted file mode 100644
index 1e9ac9d6d26c..000000000000
--- a/drivers/gpu/drm/ast/ast_dram_tables.h
+++ /dev/null
@@ -1,207 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef AST_DRAM_TABLES_H
-#define AST_DRAM_TABLES_H
-
-/* DRAM timing tables */
-struct ast_dramstruct {
- u16 index;
- u32 data;
-};
-
-static const struct ast_dramstruct ast2000_dram_table_data[] = {
- { 0x0108, 0x00000000 },
- { 0x0120, 0x00004a21 },
- { 0xFF00, 0x00000043 },
- { 0x0000, 0xFFFFFFFF },
- { 0x0004, 0x00000089 },
- { 0x0008, 0x22331353 },
- { 0x000C, 0x0d07000b },
- { 0x0010, 0x11113333 },
- { 0x0020, 0x00110350 },
- { 0x0028, 0x1e0828f0 },
- { 0x0024, 0x00000001 },
- { 0x001C, 0x00000000 },
- { 0x0014, 0x00000003 },
- { 0xFF00, 0x00000043 },
- { 0x0018, 0x00000131 },
- { 0x0014, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x0018, 0x00000031 },
- { 0x0014, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x0028, 0x1e0828f1 },
- { 0x0024, 0x00000003 },
- { 0x002C, 0x1f0f28fb },
- { 0x0030, 0xFFFFFE01 },
- { 0xFFFF, 0xFFFFFFFF }
-};
-
-static const struct ast_dramstruct ast1100_dram_table_data[] = {
- { 0x2000, 0x1688a8a8 },
- { 0x2020, 0x000041f0 },
- { 0xFF00, 0x00000043 },
- { 0x0000, 0xfc600309 },
- { 0x006C, 0x00909090 },
- { 0x0064, 0x00050000 },
- { 0x0004, 0x00000585 },
- { 0x0008, 0x0011030f },
- { 0x0010, 0x22201724 },
- { 0x0018, 0x1e29011a },
- { 0x0020, 0x00c82222 },
- { 0x0014, 0x01001523 },
- { 0x001C, 0x1024010d },
- { 0x0024, 0x00cb2522 },
- { 0x0038, 0xffffff82 },
- { 0x003C, 0x00000000 },
- { 0x0040, 0x00000000 },
- { 0x0044, 0x00000000 },
- { 0x0048, 0x00000000 },
- { 0x004C, 0x00000000 },
- { 0x0050, 0x00000000 },
- { 0x0054, 0x00000000 },
- { 0x0058, 0x00000000 },
- { 0x005C, 0x00000000 },
- { 0x0060, 0x032aa02a },
- { 0x0064, 0x002d3000 },
- { 0x0068, 0x00000000 },
- { 0x0070, 0x00000000 },
- { 0x0074, 0x00000000 },
- { 0x0078, 0x00000000 },
- { 0x007C, 0x00000000 },
- { 0x0034, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x002C, 0x00000732 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000005 },
- { 0x0028, 0x00000007 },
- { 0x0028, 0x00000003 },
- { 0x0028, 0x00000001 },
- { 0x000C, 0x00005a08 },
- { 0x002C, 0x00000632 },
- { 0x0028, 0x00000001 },
- { 0x0030, 0x000003c0 },
- { 0x0028, 0x00000003 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000003 },
- { 0x000C, 0x00005a21 },
- { 0x0034, 0x00007c03 },
- { 0x0120, 0x00004c41 },
- { 0xffff, 0xffffffff },
-};
-
-static const struct ast_dramstruct ast2100_dram_table_data[] = {
- { 0x2000, 0x1688a8a8 },
- { 0x2020, 0x00004120 },
- { 0xFF00, 0x00000043 },
- { 0x0000, 0xfc600309 },
- { 0x006C, 0x00909090 },
- { 0x0064, 0x00070000 },
- { 0x0004, 0x00000489 },
- { 0x0008, 0x0011030f },
- { 0x0010, 0x32302926 },
- { 0x0018, 0x274c0122 },
- { 0x0020, 0x00ce2222 },
- { 0x0014, 0x01001523 },
- { 0x001C, 0x1024010d },
- { 0x0024, 0x00cb2522 },
- { 0x0038, 0xffffff82 },
- { 0x003C, 0x00000000 },
- { 0x0040, 0x00000000 },
- { 0x0044, 0x00000000 },
- { 0x0048, 0x00000000 },
- { 0x004C, 0x00000000 },
- { 0x0050, 0x00000000 },
- { 0x0054, 0x00000000 },
- { 0x0058, 0x00000000 },
- { 0x005C, 0x00000000 },
- { 0x0060, 0x0f2aa02a },
- { 0x0064, 0x003f3005 },
- { 0x0068, 0x02020202 },
- { 0x0070, 0x00000000 },
- { 0x0074, 0x00000000 },
- { 0x0078, 0x00000000 },
- { 0x007C, 0x00000000 },
- { 0x0034, 0x00000001 },
- { 0xFF00, 0x00000043 },
- { 0x002C, 0x00000942 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000005 },
- { 0x0028, 0x00000007 },
- { 0x0028, 0x00000003 },
- { 0x0028, 0x00000001 },
- { 0x000C, 0x00005a08 },
- { 0x002C, 0x00000842 },
- { 0x0028, 0x00000001 },
- { 0x0030, 0x000003c0 },
- { 0x0028, 0x00000003 },
- { 0x0030, 0x00000040 },
- { 0x0028, 0x00000003 },
- { 0x000C, 0x00005a21 },
- { 0x0034, 0x00007c03 },
- { 0x0120, 0x00005061 },
- { 0xffff, 0xffffffff },
-};
-
-/*
- * AST2500 DRAM settings modules
- */
-#define REGTBL_NUM 17
-#define REGIDX_010 0
-#define REGIDX_014 1
-#define REGIDX_018 2
-#define REGIDX_020 3
-#define REGIDX_024 4
-#define REGIDX_02C 5
-#define REGIDX_030 6
-#define REGIDX_214 7
-#define REGIDX_2E0 8
-#define REGIDX_2E4 9
-#define REGIDX_2E8 10
-#define REGIDX_2EC 11
-#define REGIDX_2F0 12
-#define REGIDX_2F4 13
-#define REGIDX_2F8 14
-#define REGIDX_RFC 15
-#define REGIDX_PLL 16
-
-static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = {
- 0x64604D38, /* 0x010 */
- 0x29690599, /* 0x014 */
- 0x00000300, /* 0x018 */
- 0x00000000, /* 0x020 */
- 0x00000000, /* 0x024 */
- 0x02181E70, /* 0x02C */
- 0x00000040, /* 0x030 */
- 0x00000024, /* 0x214 */
- 0x02001300, /* 0x2E0 */
- 0x0E0000A0, /* 0x2E4 */
- 0x000E001B, /* 0x2E8 */
- 0x35B8C105, /* 0x2EC */
- 0x08090408, /* 0x2F0 */
- 0x9B000800, /* 0x2F4 */
- 0x0E400A00, /* 0x2F8 */
- 0x9971452F, /* tRFC */
- 0x000071C1 /* PLL */
-};
-
-static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = {
- 0x63604E37, /* 0x010 */
- 0xE97AFA99, /* 0x014 */
- 0x00019000, /* 0x018 */
- 0x08000000, /* 0x020 */
- 0x00000400, /* 0x024 */
- 0x00000410, /* 0x02C */
- 0x00000101, /* 0x030 */
- 0x00000024, /* 0x214 */
- 0x03002900, /* 0x2E0 */
- 0x0E0000A0, /* 0x2E4 */
- 0x000E001C, /* 0x2E8 */
- 0x35B8C106, /* 0x2EC */
- 0x08080607, /* 0x2F0 */
- 0x9B000900, /* 0x2F4 */
- 0x0E400A00, /* 0x2F8 */
- 0x99714545, /* tRFC */
- 0x000071C1 /* PLL */
-};
-
-#endif
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 6fbf62a99c48..473faa92d08c 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -171,7 +171,7 @@ static int ast_detect_chip(struct pci_dev *pdev,
/* Patch AST2500/AST2510 */
if ((pdev->revision & 0xf0) == 0x40) {
if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK))
- ast_patch_ahb_2500(regs);
+ ast_2500_patch_ahb(regs);
}
/* Double check that it's actually working */
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 2ee402096cd9..e37a55295ed7 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -417,11 +417,26 @@ struct ast_crtc_state {
int ast_mm_init(struct ast_device *ast);
+/* ast_2000.c */
+int ast_2000_post(struct ast_device *ast);
+
+/* ast_2100.c */
+int ast_2100_post(struct ast_device *ast);
+
+/* ast_2300.c */
+int ast_2300_post(struct ast_device *ast);
+
+/* ast_2500.c */
+void ast_2500_patch_ahb(void __iomem *regs);
+int ast_2500_post(struct ast_device *ast);
+
+/* ast_2600.c */
+int ast_2600_post(struct ast_device *ast);
+
/* ast post */
int ast_post_gpu(struct ast_device *ast);
u32 ast_mindwm(struct ast_device *ast, u32 r);
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
-void ast_patch_ahb_2500(void __iomem *regs);
int ast_vga_output_init(struct ast_device *ast);
int ast_sil164_output_init(struct ast_device *ast);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 031980d8f3ab..b4e8edc7c767 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -33,6 +33,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_format_helper.h>
@@ -70,31 +71,44 @@ static unsigned long ast_fb_vram_size(struct ast_device *ast)
return cursor_offset - offset;
}
-static inline void ast_load_palette_index(struct ast_device *ast,
- u8 index, u8 red, u8 green,
- u8 blue)
+static void ast_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
{
- ast_io_write8(ast, AST_IO_VGADWR, index);
+ struct drm_device *dev = crtc->dev;
+ struct ast_device *ast = to_ast_device(dev);
+ u8 i8 = index & 0xff;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ ast_io_write8(ast, AST_IO_VGADWR, i8);
ast_io_read8(ast, AST_IO_VGASRI);
- ast_io_write8(ast, AST_IO_VGAPDR, red);
+ ast_io_write8(ast, AST_IO_VGAPDR, r8);
ast_io_read8(ast, AST_IO_VGASRI);
- ast_io_write8(ast, AST_IO_VGAPDR, green);
+ ast_io_write8(ast, AST_IO_VGAPDR, g8);
ast_io_read8(ast, AST_IO_VGASRI);
- ast_io_write8(ast, AST_IO_VGAPDR, blue);
+ ast_io_write8(ast, AST_IO_VGAPDR, b8);
ast_io_read8(ast, AST_IO_VGASRI);
}
-static void ast_crtc_set_gamma_linear(struct ast_device *ast,
- const struct drm_format_info *format)
+static void ast_crtc_fill_gamma(struct ast_device *ast,
+ const struct drm_format_info *format)
{
- int i;
+ struct drm_crtc *crtc = &ast->crtc;
switch (format->format) {
- case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
+ case DRM_FORMAT_C8:
+ /* gamma table is used as color palette */
+ drm_crtc_fill_palette_8(crtc, ast_set_gamma_lut);
+ break;
case DRM_FORMAT_RGB565:
+ /* also uses 8-bit gamma ramp on low-color modes */
+ fallthrough;
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < AST_LUT_SIZE; i++)
- ast_load_palette_index(ast, i, i, i, i);
+ drm_crtc_fill_gamma_888(crtc, ast_set_gamma_lut);
break;
default:
drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
@@ -103,21 +117,22 @@ static void ast_crtc_set_gamma_linear(struct ast_device *ast,
}
}
-static void ast_crtc_set_gamma(struct ast_device *ast,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+static void ast_crtc_load_gamma(struct ast_device *ast,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
- int i;
+ struct drm_crtc *crtc = &ast->crtc;
switch (format->format) {
- case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
+ case DRM_FORMAT_C8:
+ /* gamma table is used as color palette */
+ drm_crtc_load_palette_8(crtc, lut, ast_set_gamma_lut);
+ break;
case DRM_FORMAT_RGB565:
+ /* also uses 8-bit gamma ramp on low-color modes */
+ fallthrough;
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < AST_LUT_SIZE; i++)
- ast_load_palette_index(ast, i,
- lut[i].red >> 8,
- lut[i].green >> 8,
- lut[i].blue >> 8);
+ drm_crtc_load_gamma_888(crtc, lut, ast_set_gamma_lut);
break;
default:
drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
@@ -810,11 +825,11 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
*/
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
if (crtc_state->gamma_lut)
- ast_crtc_set_gamma(ast,
- ast_crtc_state->format,
- crtc_state->gamma_lut->data);
+ ast_crtc_load_gamma(ast,
+ ast_crtc_state->format,
+ crtc_state->gamma_lut->data);
else
- ast_crtc_set_gamma_linear(ast, ast_crtc_state->format);
+ ast_crtc_fill_gamma(ast, ast_crtc_state->format);
}
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 37568cf3822c..b72914dbed38 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -31,51 +31,10 @@
#include <drm/drm_print.h>
-#include "ast_dram_tables.h"
#include "ast_drv.h"
+#include "ast_post.h"
-static void ast_post_chip_2300(struct ast_device *ast);
-static void ast_post_chip_2500(struct ast_device *ast);
-
-static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
-static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
-
-static void ast_set_def_ext_reg(struct ast_device *ast)
-{
- u8 i, index, reg;
- const u8 *ext_reg_info;
-
- /* reset scratch */
- for (i = 0x81; i <= 0x9f; i++)
- ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00);
-
- if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
- ext_reg_info = extreginfo_ast2300;
- else
- ext_reg_info = extreginfo;
-
- index = 0xa0;
- while (*ext_reg_info != 0xff) {
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info);
- index++;
- ext_reg_info++;
- }
-
- /* disable standard IO/MEM decode if secondary */
- /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */
-
- /* Set Ext. Default */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01);
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00);
-
- /* Enable RAMDAC for A1 */
- reg = 0x04;
- if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
- reg |= 0x20;
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
-}
-
-static u32 __ast_mindwm(void __iomem *regs, u32 r)
+u32 __ast_mindwm(void __iomem *regs, u32 r)
{
u32 data;
@@ -89,7 +48,7 @@ static u32 __ast_mindwm(void __iomem *regs, u32 r)
return __ast_read32(regs, 0x10000 + (r & 0x0000ffff));
}
-static void __ast_moutdwm(void __iomem *regs, u32 r, u32 v)
+void __ast_moutdwm(void __iomem *regs, u32 r, u32 v)
{
u32 data;
@@ -113,332 +72,38 @@ void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
__ast_moutdwm(ast->regs, r, v);
}
-/*
- * AST2100/2150 DLL CBR Setting
- */
-#define CBR_SIZE_AST2150 ((16 << 10) - 1)
-#define CBR_PASSNUM_AST2150 5
-#define CBR_THRESHOLD_AST2150 10
-#define CBR_THRESHOLD2_AST2150 10
-#define TIMEOUT_AST2150 5000000
-
-#define CBR_PATNUM_AST2150 8
-
-static const u32 pattern_AST2150[14] = {
- 0xFF00FF00,
- 0xCC33CC33,
- 0xAA55AA55,
- 0xFFFE0001,
- 0x683501FE,
- 0x0F1929B0,
- 0x2D0B4346,
- 0x60767F02,
- 0x6FBE36A6,
- 0x3A253035,
- 0x3019686D,
- 0x41C6167E,
- 0x620152BF,
- 0x20F050E0
-};
-
-static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen)
-{
- u32 data, timeout;
-
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
- if (++timeout > TIMEOUT_AST2150) {
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return 0xffffffff;
- }
- } while (!data);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
- if (++timeout > TIMEOUT_AST2150) {
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return 0xffffffff;
- }
- } while (!data);
- data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return data;
-}
-
-#if 0 /* unused in DDX driver - here for completeness */
-static u32 mmctestsingle2_ast2150(struct ast_device *ast, u32 datagen)
-{
- u32 data, timeout;
-
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
- if (++timeout > TIMEOUT_AST2150) {
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return 0xffffffff;
- }
- } while (!data);
- data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return data;
-}
-#endif
-
-static int cbrtest_ast2150(struct ast_device *ast)
-{
- int i;
-
- for (i = 0; i < 8; i++)
- if (mmctestburst2_ast2150(ast, i))
- return 0;
- return 1;
-}
-
-static int cbrscan_ast2150(struct ast_device *ast, int busw)
-{
- u32 patcnt, loop;
-
- for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
- for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
- if (cbrtest_ast2150(ast))
- break;
- }
- if (loop == CBR_PASSNUM_AST2150)
- return 0;
- }
- return 1;
-}
-
-
-static void cbrdlli_ast2150(struct ast_device *ast, int busw)
-{
- u32 dll_min[4], dll_max[4], dlli, data, passcnt;
-
-cbr_start:
- dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff;
- dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0;
- passcnt = 0;
-
- for (dlli = 0; dlli < 100; dlli++) {
- ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
- data = cbrscan_ast2150(ast, busw);
- if (data != 0) {
- if (data & 0x1) {
- if (dll_min[0] > dlli)
- dll_min[0] = dlli;
- if (dll_max[0] < dlli)
- dll_max[0] = dlli;
- }
- passcnt++;
- } else if (passcnt >= CBR_THRESHOLD_AST2150)
- goto cbr_start;
- }
- if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150)
- goto cbr_start;
-
- dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
- ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
-}
-
-
-
-static void ast_init_dram_reg(struct ast_device *ast)
-{
- u8 j;
- u32 data, temp, i;
- const struct ast_dramstruct *dram_reg_info;
-
- j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
-
- if ((j & 0x80) == 0) { /* VGA only */
- if (IS_AST_GEN1(ast)) {
- dram_reg_info = ast2000_dram_table_data;
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- ast_write32(ast, 0x10100, 0xa8);
-
- do {
- ;
- } while (ast_read32(ast, 0x10100) != 0xa8);
- } else { /* GEN2/GEN3 */
- if (ast->chip == AST2100 || ast->chip == AST2200)
- dram_reg_info = ast2100_dram_table_data;
- else
- dram_reg_info = ast1100_dram_table_data;
-
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- ast_write32(ast, 0x12000, 0x1688A8A8);
- do {
- ;
- } while (ast_read32(ast, 0x12000) != 0x01);
-
- ast_write32(ast, 0x10000, 0xfc600309);
- do {
- ;
- } while (ast_read32(ast, 0x10000) != 0x01);
- }
-
- while (dram_reg_info->index != 0xffff) {
- if (dram_reg_info->index == 0xff00) {/* delay fn */
- for (i = 0; i < 15; i++)
- udelay(dram_reg_info->data);
- } else if (dram_reg_info->index == 0x4 && !IS_AST_GEN1(ast)) {
- data = dram_reg_info->data;
- if (ast->dram_type == AST_DRAM_1Gx16)
- data = 0x00000d89;
- else if (ast->dram_type == AST_DRAM_1Gx32)
- data = 0x00000c8d;
-
- temp = ast_read32(ast, 0x12070);
- temp &= 0xc;
- temp <<= 2;
- ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
- } else
- ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data);
- dram_reg_info++;
- }
-
- /* AST 2100/2150 DRAM calibration */
- data = ast_read32(ast, 0x10120);
- if (data == 0x5061) { /* 266Mhz */
- data = ast_read32(ast, 0x10004);
- if (data & 0x40)
- cbrdlli_ast2150(ast, 16); /* 16 bits */
- else
- cbrdlli_ast2150(ast, 32); /* 32 bits */
- }
-
- switch (AST_GEN(ast)) {
- case 1:
- temp = ast_read32(ast, 0x10140);
- ast_write32(ast, 0x10140, temp | 0x40);
- break;
- case 2:
- case 3:
- temp = ast_read32(ast, 0x1200c);
- ast_write32(ast, 0x1200c, temp & 0xfffffffd);
- temp = ast_read32(ast, 0x12040);
- ast_write32(ast, 0x12040, temp | 0x40);
- break;
- default:
- break;
- }
- }
-
- /* wait ready */
- do {
- j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- } while ((j & 0x40) == 0);
-}
-
int ast_post_gpu(struct ast_device *ast)
{
int ret;
- ast_set_def_ext_reg(ast);
-
if (AST_GEN(ast) >= 7) {
- if (ast->tx_chip == AST_TX_ASTDP) {
- ret = ast_dp_launch(ast);
- if (ret)
- return ret;
- }
+ ret = ast_2600_post(ast);
+ if (ret)
+ return ret;
} else if (AST_GEN(ast) >= 6) {
- if (ast->config_mode == ast_use_p2a) {
- ast_post_chip_2500(ast);
- } else {
- if (ast->tx_chip == AST_TX_SIL164) {
- /* Enable DVO */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
- }
- }
+ ret = ast_2500_post(ast);
+ if (ret)
+ return ret;
} else if (AST_GEN(ast) >= 4) {
- if (ast->config_mode == ast_use_p2a) {
- ast_post_chip_2300(ast);
- ast_init_3rdtx(ast);
- } else {
- if (ast->tx_chip == AST_TX_SIL164) {
- /* Enable DVO */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
- }
- }
+ ret = ast_2300_post(ast);
+ if (ret)
+ return ret;
+ } else if (AST_GEN(ast) >= 2) {
+ ret = ast_2100_post(ast);
+ if (ret)
+ return ret;
} else {
- if (ast->config_mode == ast_use_p2a) {
- ast_init_dram_reg(ast);
- } else {
- if (ast->tx_chip == AST_TX_SIL164) {
- /* Enable DVO */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
- }
- }
+ ret = ast_2000_post(ast);
+ if (ret)
+ return ret;
}
return 0;
}
-/* AST 2300 DRAM settings */
-#define AST_DDR3 0
-#define AST_DDR2 1
-
-struct ast2300_dram_param {
- u32 dram_type;
- u32 dram_chipid;
- u32 dram_freq;
- u32 vram_size;
- u32 odt;
- u32 wodt;
- u32 rodt;
- u32 dram_config;
- u32 reg_PERIOD;
- u32 reg_MADJ;
- u32 reg_SADJ;
- u32 reg_MRS;
- u32 reg_EMRS;
- u32 reg_AC1;
- u32 reg_AC2;
- u32 reg_DQSIC;
- u32 reg_DRV;
- u32 reg_IOZ;
- u32 reg_DQIDLY;
- u32 reg_FREQ;
- u32 madj_max;
- u32 dll2_finetune_step;
-};
-
-/*
- * DQSI DLL CBR Setting
- */
-#define CBR_SIZE0 ((1 << 10) - 1)
-#define CBR_SIZE1 ((4 << 10) - 1)
-#define CBR_SIZE2 ((64 << 10) - 1)
-#define CBR_PASSNUM 5
-#define CBR_PASSNUM2 5
-#define CBR_THRESHOLD 10
-#define CBR_THRESHOLD2 10
#define TIMEOUT 5000000
-#define CBR_PATNUM 8
-static const u32 pattern[8] = {
- 0xFF00FF00,
- 0xCC33CC33,
- 0xAA55AA55,
- 0x88778877,
- 0x92CC4D6E,
- 0x543D3CDE,
- 0xF1E843C7,
- 0x7C61D253
-};
-
-static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
+bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
{
u32 data, timeout;
@@ -458,1657 +123,7 @@ static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
return true;
}
-static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl)
-{
- u32 data, timeout;
-
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl);
- timeout = 0;
- do {
- data = ast_mindwm(ast, 0x1e6e0070) & 0x1000;
- if (++timeout > TIMEOUT) {
- ast_moutdwm(ast, 0x1e6e0070, 0x0);
- return 0xffffffff;
- }
- } while (!data);
- data = ast_mindwm(ast, 0x1e6e0078);
- data = (data | (data >> 16)) & 0xffff;
- ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
- return data;
-}
-
-
-static bool mmc_test_burst(struct ast_device *ast, u32 datagen)
+bool mmc_test_burst(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0xc1);
}
-
-static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen)
-{
- return mmc_test2(ast, datagen, 0x41);
-}
-
-static bool mmc_test_single(struct ast_device *ast, u32 datagen)
-{
- return mmc_test(ast, datagen, 0xc5);
-}
-
-static u32 mmc_test_single2(struct ast_device *ast, u32 datagen)
-{
- return mmc_test2(ast, datagen, 0x05);
-}
-
-static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen)
-{
- return mmc_test(ast, datagen, 0x85);
-}
-
-static int cbr_test(struct ast_device *ast)
-{
- u32 data;
- int i;
- data = mmc_test_single2(ast, 0);
- if ((data & 0xff) && (data & 0xff00))
- return 0;
- for (i = 0; i < 8; i++) {
- data = mmc_test_burst2(ast, i);
- if ((data & 0xff) && (data & 0xff00))
- return 0;
- }
- if (!data)
- return 3;
- else if (data & 0xff)
- return 2;
- return 1;
-}
-
-static int cbr_scan(struct ast_device *ast)
-{
- u32 data, data2, patcnt, loop;
-
- data2 = 3;
- for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
- for (loop = 0; loop < CBR_PASSNUM2; loop++) {
- if ((data = cbr_test(ast)) != 0) {
- data2 &= data;
- if (!data2)
- return 0;
- break;
- }
- }
- if (loop == CBR_PASSNUM2)
- return 0;
- }
- return data2;
-}
-
-static u32 cbr_test2(struct ast_device *ast)
-{
- u32 data;
-
- data = mmc_test_burst2(ast, 0);
- if (data == 0xffff)
- return 0;
- data |= mmc_test_single2(ast, 0);
- if (data == 0xffff)
- return 0;
-
- return ~data & 0xffff;
-}
-
-static u32 cbr_scan2(struct ast_device *ast)
-{
- u32 data, data2, patcnt, loop;
-
- data2 = 0xffff;
- for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
- for (loop = 0; loop < CBR_PASSNUM2; loop++) {
- if ((data = cbr_test2(ast)) != 0) {
- data2 &= data;
- if (!data2)
- return 0;
- break;
- }
- }
- if (loop == CBR_PASSNUM2)
- return 0;
- }
- return data2;
-}
-
-static bool cbr_test3(struct ast_device *ast)
-{
- if (!mmc_test_burst(ast, 0))
- return false;
- if (!mmc_test_single(ast, 0))
- return false;
- return true;
-}
-
-static bool cbr_scan3(struct ast_device *ast)
-{
- u32 patcnt, loop;
-
- for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
- ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
- for (loop = 0; loop < 2; loop++) {
- if (cbr_test3(ast))
- break;
- }
- if (loop == 2)
- return false;
- }
- return true;
-}
-
-static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
- bool status = false;
-FINETUNE_START:
- for (cnt = 0; cnt < 16; cnt++) {
- dllmin[cnt] = 0xff;
- dllmax[cnt] = 0x0;
- }
- passcnt = 0;
- for (dlli = 0; dlli < 76; dlli++) {
- ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
- ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
- data = cbr_scan2(ast);
- if (data != 0) {
- mask = 0x00010001;
- for (cnt = 0; cnt < 16; cnt++) {
- if (data & mask) {
- if (dllmin[cnt] > dlli) {
- dllmin[cnt] = dlli;
- }
- if (dllmax[cnt] < dlli) {
- dllmax[cnt] = dlli;
- }
- }
- mask <<= 1;
- }
- passcnt++;
- } else if (passcnt >= CBR_THRESHOLD2) {
- break;
- }
- }
- gold_sadj[0] = 0x0;
- passcnt = 0;
- for (cnt = 0; cnt < 16; cnt++) {
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- gold_sadj[0] += dllmin[cnt];
- passcnt++;
- }
- }
- if (retry++ > 10)
- goto FINETUNE_DONE;
- if (passcnt != 16) {
- goto FINETUNE_START;
- }
- status = true;
-FINETUNE_DONE:
- gold_sadj[0] = gold_sadj[0] >> 4;
- gold_sadj[1] = gold_sadj[0];
-
- data = 0;
- for (cnt = 0; cnt < 8; cnt++) {
- data >>= 3;
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- dlli = dllmin[cnt];
- if (gold_sadj[0] >= dlli) {
- dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
- if (dlli > 3) {
- dlli = 3;
- }
- } else {
- dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
- if (dlli > 4) {
- dlli = 4;
- }
- dlli = (8 - dlli) & 0x7;
- }
- data |= dlli << 21;
- }
- }
- ast_moutdwm(ast, 0x1E6E0080, data);
-
- data = 0;
- for (cnt = 8; cnt < 16; cnt++) {
- data >>= 3;
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- dlli = dllmin[cnt];
- if (gold_sadj[1] >= dlli) {
- dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
- if (dlli > 3) {
- dlli = 3;
- } else {
- dlli = (dlli - 1) & 0x7;
- }
- } else {
- dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
- dlli += 1;
- if (dlli > 4) {
- dlli = 4;
- }
- dlli = (8 - dlli) & 0x7;
- }
- data |= dlli << 21;
- }
- }
- ast_moutdwm(ast, 0x1E6E0084, data);
- return status;
-} /* finetuneDQI_L */
-
-static void finetuneDQSI(struct ast_device *ast)
-{
- u32 dlli, dqsip, dqidly;
- u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
- u32 g_dqidly, g_dqsip, g_margin, g_side;
- u16 pass[32][2][2];
- char tag[2][76];
-
- /* Disable DQI CBR */
- reg_mcr0c = ast_mindwm(ast, 0x1E6E000C);
- reg_mcr18 = ast_mindwm(ast, 0x1E6E0018);
- reg_mcr18 &= 0x0000ffff;
- ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
-
- for (dlli = 0; dlli < 76; dlli++) {
- tag[0][dlli] = 0x0;
- tag[1][dlli] = 0x0;
- }
- for (dqidly = 0; dqidly < 32; dqidly++) {
- pass[dqidly][0][0] = 0xff;
- pass[dqidly][0][1] = 0x0;
- pass[dqidly][1][0] = 0xff;
- pass[dqidly][1][1] = 0x0;
- }
- for (dqidly = 0; dqidly < 32; dqidly++) {
- passcnt[0] = passcnt[1] = 0;
- for (dqsip = 0; dqsip < 2; dqsip++) {
- ast_moutdwm(ast, 0x1E6E000C, 0);
- ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23));
- ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c);
- for (dlli = 0; dlli < 76; dlli++) {
- ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
- ast_moutdwm(ast, 0x1E6E0070, 0);
- ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0);
- if (cbr_scan3(ast)) {
- if (dlli == 0)
- break;
- passcnt[dqsip]++;
- tag[dqsip][dlli] = 'P';
- if (dlli < pass[dqidly][dqsip][0])
- pass[dqidly][dqsip][0] = (u16) dlli;
- if (dlli > pass[dqidly][dqsip][1])
- pass[dqidly][dqsip][1] = (u16) dlli;
- } else if (passcnt[dqsip] >= 5)
- break;
- else {
- pass[dqidly][dqsip][0] = 0xff;
- pass[dqidly][dqsip][1] = 0x0;
- }
- }
- }
- if (passcnt[0] == 0 && passcnt[1] == 0)
- dqidly++;
- }
- /* Search margin */
- g_dqidly = g_dqsip = g_margin = g_side = 0;
-
- for (dqidly = 0; dqidly < 32; dqidly++) {
- for (dqsip = 0; dqsip < 2; dqsip++) {
- if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1])
- continue;
- diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0];
- if ((diff+2) < g_margin)
- continue;
- passcnt[0] = passcnt[1] = 0;
- for (dlli = pass[dqidly][dqsip][0]; dlli > 0 && tag[dqsip][dlli] != 0; dlli--, passcnt[0]++);
- for (dlli = pass[dqidly][dqsip][1]; dlli < 76 && tag[dqsip][dlli] != 0; dlli++, passcnt[1]++);
- if (passcnt[0] > passcnt[1])
- passcnt[0] = passcnt[1];
- passcnt[1] = 0;
- if (passcnt[0] > g_side)
- passcnt[1] = passcnt[0] - g_side;
- if (diff > (g_margin+1) && (passcnt[1] > 0 || passcnt[0] > 8)) {
- g_margin = diff;
- g_dqidly = dqidly;
- g_dqsip = dqsip;
- g_side = passcnt[0];
- } else if (passcnt[1] > 1 && g_side < 8) {
- if (diff > g_margin)
- g_margin = diff;
- g_dqidly = dqidly;
- g_dqsip = dqsip;
- g_side = passcnt[0];
- }
- }
- }
- reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23);
- ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
-
-}
-static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
- bool status = false;
-
- finetuneDQSI(ast);
- if (finetuneDQI_L(ast, param) == false)
- return status;
-
-CBR_START2:
- dllmin[0] = dllmin[1] = 0xff;
- dllmax[0] = dllmax[1] = 0x0;
- passcnt = 0;
- for (dlli = 0; dlli < 76; dlli++) {
- ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
- ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
- data = cbr_scan(ast);
- if (data != 0) {
- if (data & 0x1) {
- if (dllmin[0] > dlli) {
- dllmin[0] = dlli;
- }
- if (dllmax[0] < dlli) {
- dllmax[0] = dlli;
- }
- }
- if (data & 0x2) {
- if (dllmin[1] > dlli) {
- dllmin[1] = dlli;
- }
- if (dllmax[1] < dlli) {
- dllmax[1] = dlli;
- }
- }
- passcnt++;
- } else if (passcnt >= CBR_THRESHOLD) {
- break;
- }
- }
- if (retry++ > 10)
- goto CBR_DONE2;
- if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
- goto CBR_START2;
- }
- if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
- goto CBR_START2;
- }
- status = true;
-CBR_DONE2:
- dlli = (dllmin[1] + dllmax[1]) >> 1;
- dlli <<= 8;
- dlli += (dllmin[0] + dllmax[0]) >> 1;
- ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16));
- return status;
-} /* CBRDLL2 */
-
-static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 trap, trap_AC2, trap_MRS;
-
- ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
-
- /* Ger trap info */
- trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
- trap_AC2 = 0x00020000 + (trap << 16);
- trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
- trap_MRS = 0x00000010 + (trap << 4);
- trap_MRS |= ((trap & 0x2) << 18);
-
- param->reg_MADJ = 0x00034C4C;
- param->reg_SADJ = 0x00001800;
- param->reg_DRV = 0x000000F0;
- param->reg_PERIOD = param->dram_freq;
- param->rodt = 0;
-
- switch (param->dram_freq) {
- case 336:
- ast_moutdwm(ast, 0x1E6E2020, 0x0190);
- param->wodt = 0;
- param->reg_AC1 = 0x22202725;
- param->reg_AC2 = 0xAA007613 | trap_AC2;
- param->reg_DQSIC = 0x000000BA;
- param->reg_MRS = 0x04001400 | trap_MRS;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000074;
- param->reg_FREQ = 0x00004DC0;
- param->madj_max = 96;
- param->dll2_finetune_step = 3;
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xAA007613 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xAA00761C | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xAA007636 | trap_AC2;
- break;
- }
- break;
- default:
- case 396:
- ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
- param->wodt = 1;
- param->reg_AC1 = 0x33302825;
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x04001600 | trap_MRS;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000034;
- param->reg_DRV = 0x000000FA;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x00005040;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC009622 | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00963F | trap_AC2;
- break;
- }
- break;
-
- case 408:
- ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
- param->wodt = 1;
- param->reg_AC1 = 0x33302825;
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x04001600 | trap_MRS;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000023;
- param->reg_DRV = 0x000000FA;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x000050C0;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC009617 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC009622 | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00963F | trap_AC2;
- break;
- }
-
- break;
- case 456:
- ast_moutdwm(ast, 0x1E6E2020, 0x0230);
- param->wodt = 0;
- param->reg_AC1 = 0x33302926;
- param->reg_AC2 = 0xCD44961A;
- param->reg_DQSIC = 0x000000FC;
- param->reg_MRS = 0x00081830;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x00000097;
- param->reg_FREQ = 0x000052C0;
- param->madj_max = 88;
- param->dll2_finetune_step = 4;
- break;
- case 504:
- ast_moutdwm(ast, 0x1E6E2020, 0x0270);
- param->wodt = 1;
- param->reg_AC1 = 0x33302926;
- param->reg_AC2 = 0xDE44A61D;
- param->reg_DQSIC = 0x00000117;
- param->reg_MRS = 0x00081A30;
- param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x070000BB;
- param->reg_DQIDLY = 0x000000A0;
- param->reg_FREQ = 0x000054C0;
- param->madj_max = 79;
- param->dll2_finetune_step = 4;
- break;
- case 528:
- ast_moutdwm(ast, 0x1E6E2020, 0x0290);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302926;
- param->reg_AC2 = 0xEF44B61E;
- param->reg_DQSIC = 0x00000125;
- param->reg_MRS = 0x00081A30;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000088;
- param->reg_FREQ = 0x000055C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- case 576:
- ast_moutdwm(ast, 0x1E6E2020, 0x0140);
- param->reg_MADJ = 0x00136868;
- param->reg_SADJ = 0x00004534;
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302A37;
- param->reg_AC2 = 0xEF56B61E;
- param->reg_DQSIC = 0x0000013F;
- param->reg_MRS = 0x00101A50;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000078;
- param->reg_FREQ = 0x000057C0;
- param->madj_max = 136;
- param->dll2_finetune_step = 3;
- break;
- case 600:
- ast_moutdwm(ast, 0x1E6E2020, 0x02E1);
- param->reg_MADJ = 0x00136868;
- param->reg_SADJ = 0x00004534;
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x32302A37;
- param->reg_AC2 = 0xDF56B61F;
- param->reg_DQSIC = 0x0000014D;
- param->reg_MRS = 0x00101A50;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000023;
- param->reg_DQIDLY = 0x00000078;
- param->reg_FREQ = 0x000058C0;
- param->madj_max = 132;
- param->dll2_finetune_step = 3;
- break;
- case 624:
- ast_moutdwm(ast, 0x1E6E2020, 0x0160);
- param->reg_MADJ = 0x00136868;
- param->reg_SADJ = 0x00004534;
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x32302A37;
- param->reg_AC2 = 0xEF56B621;
- param->reg_DQSIC = 0x0000015A;
- param->reg_MRS = 0x02101A50;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000078;
- param->reg_FREQ = 0x000059C0;
- param->madj_max = 128;
- param->dll2_finetune_step = 3;
- break;
- } /* switch freq */
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->dram_config = 0x130;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->dram_config = 0x131;
- break;
- case AST_DRAM_2Gx16:
- param->dram_config = 0x132;
- break;
- case AST_DRAM_4Gx16:
- param->dram_config = 0x133;
- break;
- } /* switch size */
-
- switch (param->vram_size) {
- default:
- case SZ_8M:
- param->dram_config |= 0x00;
- break;
- case SZ_16M:
- param->dram_config |= 0x04;
- break;
- case SZ_32M:
- param->dram_config |= 0x08;
- break;
- case SZ_64M:
- param->dram_config |= 0x0c;
- break;
- }
-
-}
-
-static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 data, data2, retry = 0;
-
-ddr3_init_start:
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
- ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
- ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0034, 0x00000000);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
- ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
- udelay(10);
-
- ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
- ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
- ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
- ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
- ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
- ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
- ast_moutdwm(ast, 0x1E6E0018, 0x4000A170);
- ast_moutdwm(ast, 0x1E6E0018, 0x00002370);
- ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0040, 0xFF444444);
- ast_moutdwm(ast, 0x1E6E0044, 0x22222222);
- ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
- ast_moutdwm(ast, 0x1E6E004C, 0x00000002);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0054, 0);
- ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
- ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
- /* Wait MCLK2X lock to MCLK */
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
- data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
- if ((data2 & 0xff) > param->madj_max) {
- break;
- }
- ast_moutdwm(ast, 0x1E6E0064, data2);
- if (data2 & 0x00100000) {
- data2 = ((data2 & 0xff) >> 3) + 3;
- } else {
- data2 = ((data2 & 0xff) >> 2) + 5;
- }
- data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
- data2 += data & 0xff;
- data = data | (data2 << 8);
- ast_moutdwm(ast, 0x1E6E0068, data);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
- udelay(10);
- data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
- ast_moutdwm(ast, 0x1E6E0018, data);
- data = data | 0x200;
- ast_moutdwm(ast, 0x1E6E0018, data);
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
-
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- }
- ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff);
- data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
- ast_moutdwm(ast, 0x1E6E0018, data);
-
- ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
- ast_moutdwm(ast, 0x1E6E000C, 0x00000040);
- udelay(50);
- /* Mode Register Setting */
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
-
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
- data = 0;
- if (param->wodt) {
- data = 0x300;
- }
- if (param->rodt) {
- data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
- }
- ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
-
- /* Calibrate the DQSI delay */
- if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
- goto ddr3_init_start;
-
- ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
- /* ECC Memory Initialization */
-#ifdef ECC
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0070, 0x221);
- do {
- data = ast_mindwm(ast, 0x1E6E0070);
- } while (!(data & 0x00001000));
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
-#endif
-
-
-}
-
-static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 trap, trap_AC2, trap_MRS;
-
- ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
-
- /* Ger trap info */
- trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
- trap_AC2 = (trap << 20) | (trap << 16);
- trap_AC2 += 0x00110000;
- trap_MRS = 0x00000040 | (trap << 4);
-
-
- param->reg_MADJ = 0x00034C4C;
- param->reg_SADJ = 0x00001800;
- param->reg_DRV = 0x000000F0;
- param->reg_PERIOD = param->dram_freq;
- param->rodt = 0;
-
- switch (param->dram_freq) {
- case 264:
- ast_moutdwm(ast, 0x1E6E2020, 0x0130);
- param->wodt = 0;
- param->reg_AC1 = 0x11101513;
- param->reg_AC2 = 0x78117011;
- param->reg_DQSIC = 0x00000092;
- param->reg_MRS = 0x00000842;
- param->reg_EMRS = 0x00000000;
- param->reg_DRV = 0x000000F0;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x0000005A;
- param->reg_FREQ = 0x00004AC0;
- param->madj_max = 138;
- param->dll2_finetune_step = 3;
- break;
- case 336:
- ast_moutdwm(ast, 0x1E6E2020, 0x0190);
- param->wodt = 1;
- param->reg_AC1 = 0x22202613;
- param->reg_AC2 = 0xAA009016 | trap_AC2;
- param->reg_DQSIC = 0x000000BA;
- param->reg_MRS = 0x00000A02 | trap_MRS;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000074;
- param->reg_FREQ = 0x00004DC0;
- param->madj_max = 96;
- param->dll2_finetune_step = 3;
- switch (param->dram_chipid) {
- default:
- case AST_DRAM_512Mx16:
- param->reg_AC2 = 0xAA009012 | trap_AC2;
- break;
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xAA009016 | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xAA009023 | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xAA00903B | trap_AC2;
- break;
- }
- break;
- default:
- case 396:
- ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
- param->wodt = 1;
- param->rodt = 0;
- param->reg_AC1 = 0x33302714;
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x00000C02 | trap_MRS;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x00005040;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->reg_AC2 = 0xCC00B016 | trap_AC2;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC00B02B | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00B03F | trap_AC2;
- break;
- }
-
- break;
-
- case 408:
- ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
- param->wodt = 1;
- param->rodt = 0;
- param->reg_AC1 = 0x33302714;
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- param->reg_DQSIC = 0x000000E2;
- param->reg_MRS = 0x00000C02 | trap_MRS;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x000000FA;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x000050C0;
- param->madj_max = 96;
- param->dll2_finetune_step = 4;
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->reg_AC2 = 0xCC00B016 | trap_AC2;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->reg_AC2 = 0xCC00B01B | trap_AC2;
- break;
- case AST_DRAM_2Gx16:
- param->reg_AC2 = 0xCC00B02B | trap_AC2;
- break;
- case AST_DRAM_4Gx16:
- param->reg_AC2 = 0xCC00B03F | trap_AC2;
- break;
- }
-
- break;
- case 456:
- ast_moutdwm(ast, 0x1E6E2020, 0x0230);
- param->wodt = 0;
- param->reg_AC1 = 0x33302815;
- param->reg_AC2 = 0xCD44B01E;
- param->reg_DQSIC = 0x000000FC;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000000;
- param->reg_DRV = 0x00000000;
- param->reg_IOZ = 0x00000034;
- param->reg_DQIDLY = 0x00000097;
- param->reg_FREQ = 0x000052C0;
- param->madj_max = 88;
- param->dll2_finetune_step = 3;
- break;
- case 504:
- ast_moutdwm(ast, 0x1E6E2020, 0x0261);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302815;
- param->reg_AC2 = 0xDE44C022;
- param->reg_DQSIC = 0x00000117;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x0000000A;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000A0;
- param->reg_FREQ = 0x000054C0;
- param->madj_max = 79;
- param->dll2_finetune_step = 3;
- break;
- case 528:
- ast_moutdwm(ast, 0x1E6E2020, 0x0120);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x33302815;
- param->reg_AC2 = 0xEF44D024;
- param->reg_DQSIC = 0x00000125;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F9;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000A7;
- param->reg_FREQ = 0x000055C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- case 552:
- ast_moutdwm(ast, 0x1E6E2020, 0x02A1);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x43402915;
- param->reg_AC2 = 0xFF44E025;
- param->reg_DQSIC = 0x00000132;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000040;
- param->reg_DRV = 0x0000000A;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000AD;
- param->reg_FREQ = 0x000056C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- case 576:
- ast_moutdwm(ast, 0x1E6E2020, 0x0140);
- param->wodt = 1;
- param->rodt = 1;
- param->reg_AC1 = 0x43402915;
- param->reg_AC2 = 0xFF44E027;
- param->reg_DQSIC = 0x0000013F;
- param->reg_MRS = 0x00000E72;
- param->reg_EMRS = 0x00000004;
- param->reg_DRV = 0x000000F5;
- param->reg_IOZ = 0x00000045;
- param->reg_DQIDLY = 0x000000B3;
- param->reg_FREQ = 0x000057C0;
- param->madj_max = 76;
- param->dll2_finetune_step = 3;
- break;
- }
-
- switch (param->dram_chipid) {
- case AST_DRAM_512Mx16:
- param->dram_config = 0x100;
- break;
- default:
- case AST_DRAM_1Gx16:
- param->dram_config = 0x121;
- break;
- case AST_DRAM_2Gx16:
- param->dram_config = 0x122;
- break;
- case AST_DRAM_4Gx16:
- param->dram_config = 0x123;
- break;
- } /* switch size */
-
- switch (param->vram_size) {
- default:
- case SZ_8M:
- param->dram_config |= 0x00;
- break;
- case SZ_16M:
- param->dram_config |= 0x04;
- break;
- case SZ_32M:
- param->dram_config |= 0x08;
- break;
- case SZ_64M:
- param->dram_config |= 0x0c;
- break;
- }
-}
-
-static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param)
-{
- u32 data, data2, retry = 0;
-
-ddr2_init_start:
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
- ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
- ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
- ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
- udelay(10);
-
- ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
- ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
- ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
- ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
- ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
- ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
- ast_moutdwm(ast, 0x1E6E0018, 0x4000A130);
- ast_moutdwm(ast, 0x1E6E0018, 0x00002330);
- ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0040, 0xFF808000);
- ast_moutdwm(ast, 0x1E6E0044, 0x88848466);
- ast_moutdwm(ast, 0x1E6E0048, 0x44440008);
- ast_moutdwm(ast, 0x1E6E004C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0054, 0);
- ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
- ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
-
- /* Wait MCLK2X lock to MCLK */
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
- data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
- if ((data2 & 0xff) > param->madj_max) {
- break;
- }
- ast_moutdwm(ast, 0x1E6E0064, data2);
- if (data2 & 0x00100000) {
- data2 = ((data2 & 0xff) >> 3) + 3;
- } else {
- data2 = ((data2 & 0xff) >> 2) + 5;
- }
- data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
- data2 += data & 0xff;
- data = data | (data2 << 8);
- ast_moutdwm(ast, 0x1E6E0068, data);
- udelay(10);
- ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
- udelay(10);
- data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
- ast_moutdwm(ast, 0x1E6E0018, data);
- data = data | 0x200;
- ast_moutdwm(ast, 0x1E6E0018, data);
- do {
- data = ast_mindwm(ast, 0x1E6E001C);
- } while (!(data & 0x08000000));
-
- data = ast_mindwm(ast, 0x1E6E001C);
- data = (data >> 8) & 0xff;
- }
- ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff);
- data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
- ast_moutdwm(ast, 0x1E6E0018, data);
-
- ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- udelay(50);
- /* Mode Register Setting */
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
-
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
- ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
- ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
- ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
-
- ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
- data = 0;
- if (param->wodt) {
- data = 0x500;
- }
- if (param->rodt) {
- data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
- }
- ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
- ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
-
- /* Calibrate the DQSI delay */
- if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
- goto ddr2_init_start;
-
- /* ECC Memory Initialization */
-#ifdef ECC
- ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0070, 0x221);
- do {
- data = ast_mindwm(ast, 0x1E6E0070);
- } while (!(data & 0x00001000));
- ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
-#endif
-
-}
-
-static void ast_post_chip_2300(struct ast_device *ast)
-{
- struct ast2300_dram_param param;
- u32 temp;
- u8 reg;
-
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- if ((reg & 0x80) == 0) {/* vga only */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- ast_write32(ast, 0x12000, 0x1688a8a8);
- do {
- ;
- } while (ast_read32(ast, 0x12000) != 0x1);
-
- ast_write32(ast, 0x10000, 0xfc600309);
- do {
- ;
- } while (ast_read32(ast, 0x10000) != 0x1);
-
- /* Slow down CPU/AHB CLK in VGA only mode */
- temp = ast_read32(ast, 0x12008);
- temp |= 0x73;
- ast_write32(ast, 0x12008, temp);
-
- param.dram_freq = 396;
- param.dram_type = AST_DDR3;
- temp = ast_mindwm(ast, 0x1e6e2070);
- if (temp & 0x01000000)
- param.dram_type = AST_DDR2;
- switch (temp & 0x18000000) {
- case 0:
- param.dram_chipid = AST_DRAM_512Mx16;
- break;
- default:
- case 0x08000000:
- param.dram_chipid = AST_DRAM_1Gx16;
- break;
- case 0x10000000:
- param.dram_chipid = AST_DRAM_2Gx16;
- break;
- case 0x18000000:
- param.dram_chipid = AST_DRAM_4Gx16;
- break;
- }
- switch (temp & 0x0c) {
- default:
- case 0x00:
- param.vram_size = SZ_8M;
- break;
-
- case 0x04:
- param.vram_size = SZ_16M;
- break;
-
- case 0x08:
- param.vram_size = SZ_32M;
- break;
-
- case 0x0c:
- param.vram_size = SZ_64M;
- break;
- }
-
- if (param.dram_type == AST_DDR3) {
- get_ddr3_info(ast, &param);
- ddr3_init(ast, &param);
- } else {
- get_ddr2_info(ast, &param);
- ddr2_init(ast, &param);
- }
-
- temp = ast_mindwm(ast, 0x1e6e2040);
- ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
- }
-
- /* wait ready */
- do {
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- } while ((reg & 0x40) == 0);
-}
-
-static bool cbr_test_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
- ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
- if (!mmc_test_burst(ast, 0))
- return false;
- if (!mmc_test_single_2500(ast, 0))
- return false;
- return true;
-}
-
-static bool ddr_test_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
- ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
- if (!mmc_test_burst(ast, 0))
- return false;
- if (!mmc_test_burst(ast, 1))
- return false;
- if (!mmc_test_burst(ast, 2))
- return false;
- if (!mmc_test_burst(ast, 3))
- return false;
- if (!mmc_test_single_2500(ast, 0))
- return false;
- return true;
-}
-
-static void ddr_init_common_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
- ast_moutdwm(ast, 0x1E6E0008, 0x2003000F);
- ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF);
- ast_moutdwm(ast, 0x1E6E0040, 0x88448844);
- ast_moutdwm(ast, 0x1E6E0044, 0x24422288);
- ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
- ast_moutdwm(ast, 0x1E6E004C, 0x22222222);
- ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
- ast_moutdwm(ast, 0x1E6E0208, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0218, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0220, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0228, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0230, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02A8, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02B0, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0240, 0x86000000);
- ast_moutdwm(ast, 0x1E6E0244, 0x00008600);
- ast_moutdwm(ast, 0x1E6E0248, 0x80000000);
- ast_moutdwm(ast, 0x1E6E024C, 0x80808080);
-}
-
-static void ddr_phy_init_2500(struct ast_device *ast)
-{
- u32 data, pass, timecnt;
-
- pass = 0;
- ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
- while (!pass) {
- for (timecnt = 0; timecnt < TIMEOUT; timecnt++) {
- data = ast_mindwm(ast, 0x1E6E0060) & 0x1;
- if (!data)
- break;
- }
- if (timecnt != TIMEOUT) {
- data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000;
- if (!data)
- pass = 1;
- }
- if (!pass) {
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- udelay(10); /* delay 10 us */
- ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
- }
- }
-
- ast_moutdwm(ast, 0x1E6E0060, 0x00000006);
-}
-
-/*
- * Check DRAM Size
- * 1Gb : 0x80000000 ~ 0x87FFFFFF
- * 2Gb : 0x80000000 ~ 0x8FFFFFFF
- * 4Gb : 0x80000000 ~ 0x9FFFFFFF
- * 8Gb : 0x80000000 ~ 0xBFFFFFFF
- */
-static void check_dram_size_2500(struct ast_device *ast, u32 tRFC)
-{
- u32 reg_04, reg_14;
-
- reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc;
- reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00;
-
- ast_moutdwm(ast, 0xA0100000, 0x41424344);
- ast_moutdwm(ast, 0x90100000, 0x35363738);
- ast_moutdwm(ast, 0x88100000, 0x292A2B2C);
- ast_moutdwm(ast, 0x80100000, 0x1D1E1F10);
-
- /* Check 8Gbit */
- if (ast_mindwm(ast, 0xA0100000) == 0x41424344) {
- reg_04 |= 0x03;
- reg_14 |= (tRFC >> 24) & 0xFF;
- /* Check 4Gbit */
- } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) {
- reg_04 |= 0x02;
- reg_14 |= (tRFC >> 16) & 0xFF;
- /* Check 2Gbit */
- } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) {
- reg_04 |= 0x01;
- reg_14 |= (tRFC >> 8) & 0xFF;
- } else {
- reg_14 |= tRFC & 0xFF;
- }
- ast_moutdwm(ast, 0x1E6E0004, reg_04);
- ast_moutdwm(ast, 0x1E6E0014, reg_14);
-}
-
-static void enable_cache_2500(struct ast_device *ast)
-{
- u32 reg_04, data;
-
- reg_04 = ast_mindwm(ast, 0x1E6E0004);
- ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000);
-
- do
- data = ast_mindwm(ast, 0x1E6E0004);
- while (!(data & 0x80000));
- ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400);
-}
-
-static void set_mpll_2500(struct ast_device *ast)
-{
- u32 addr, data, param;
-
- /* Reset MMC */
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
- ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
- for (addr = 0x1e6e0004; addr < 0x1e6e0090;) {
- ast_moutdwm(ast, addr, 0x0);
- addr += 4;
- }
- ast_moutdwm(ast, 0x1E6E0034, 0x00020000);
-
- ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
- data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000;
- if (data) {
- /* CLKIN = 25MHz */
- param = 0x930023E0;
- ast_moutdwm(ast, 0x1E6E2160, 0x00011320);
- } else {
- /* CLKIN = 24MHz */
- param = 0x93002400;
- }
- ast_moutdwm(ast, 0x1E6E2020, param);
- udelay(100);
-}
-
-static void reset_mmc_2500(struct ast_device *ast)
-{
- ast_moutdwm(ast, 0x1E78505C, 0x00000004);
- ast_moutdwm(ast, 0x1E785044, 0x00000001);
- ast_moutdwm(ast, 0x1E785048, 0x00004755);
- ast_moutdwm(ast, 0x1E78504C, 0x00000013);
- mdelay(100);
- ast_moutdwm(ast, 0x1E785054, 0x00000077);
- ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
-}
-
-static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table)
-{
-
- ast_moutdwm(ast, 0x1E6E0004, 0x00000303);
- ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
- ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
- ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
- ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
- ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
- ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
- ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
-
- /* DDR PHY Setting */
- ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE);
- ast_moutdwm(ast, 0x1E6E0204, 0x00001001);
- ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
- ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
- ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
- ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
- ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
- ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
- ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
- ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
- ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
- ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
- ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
- ast_moutdwm(ast, 0x1E6E02C0, 0x00000006);
-
- /* Controller Setting */
- ast_moutdwm(ast, 0x1E6E0034, 0x00020091);
-
- /* Wait DDR PHY init done */
- ddr_phy_init_2500(ast);
-
- ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
- ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
- ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
-
- check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
- enable_cache_2500(ast);
- ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
- ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
-}
-
-static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table)
-{
- u32 data, data2, pass, retrycnt;
- u32 ddr_vref, phy_vref;
- u32 min_ddr_vref = 0, min_phy_vref = 0;
- u32 max_ddr_vref = 0, max_phy_vref = 0;
-
- ast_moutdwm(ast, 0x1E6E0004, 0x00000313);
- ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
- ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
- ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
- ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */
- ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */
- ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
- ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */
-
- /* DDR PHY Setting */
- ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE);
- ast_moutdwm(ast, 0x1E6E0204, 0x09002000);
- ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
- ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
- ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
- ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
- ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
- ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
- ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
- ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
- ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
- ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
- ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
- ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C);
- ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E);
-
- /* Controller Setting */
- ast_moutdwm(ast, 0x1E6E0034, 0x0001A991);
-
- /* Train PHY Vref first */
- pass = 0;
-
- for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
- max_phy_vref = 0x0;
- pass = 0;
- ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06);
- for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) {
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8));
- /* Fire DFI Init */
- ddr_phy_init_2500(ast);
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
- if (cbr_test_2500(ast)) {
- pass++;
- data = ast_mindwm(ast, 0x1E6E03D0);
- data2 = data >> 8;
- data = data & 0xff;
- if (data > data2)
- data = data2;
- if (max_phy_vref < data) {
- max_phy_vref = data;
- min_phy_vref = phy_vref;
- }
- } else if (pass > 0)
- break;
- }
- }
- ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8));
-
- /* Train DDR Vref next */
- pass = 0;
-
- for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
- min_ddr_vref = 0xFF;
- max_ddr_vref = 0x0;
- pass = 0;
- for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) {
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
- /* Fire DFI Init */
- ddr_phy_init_2500(ast);
- ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
- if (cbr_test_2500(ast)) {
- pass++;
- if (min_ddr_vref > ddr_vref)
- min_ddr_vref = ddr_vref;
- if (max_ddr_vref < ddr_vref)
- max_ddr_vref = ddr_vref;
- } else if (pass != 0)
- break;
- }
- }
-
- ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
- ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
- ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1;
- ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
-
- /* Wait DDR PHY init done */
- ddr_phy_init_2500(ast);
-
- ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
- ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
- ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
-
- check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
- enable_cache_2500(ast);
- ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
- ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
-}
-
-static bool ast_dram_init_2500(struct ast_device *ast)
-{
- u32 data;
- u32 max_tries = 5;
-
- do {
- if (max_tries-- == 0)
- return false;
- set_mpll_2500(ast);
- reset_mmc_2500(ast);
- ddr_init_common_2500(ast);
-
- data = ast_mindwm(ast, 0x1E6E2070);
- if (data & 0x01000000)
- ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table);
- else
- ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table);
- } while (!ddr_test_2500(ast));
-
- ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41);
-
- /* Patch code */
- data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF;
- ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000);
-
- return true;
-}
-
-void ast_patch_ahb_2500(void __iomem *regs)
-{
- u32 data;
-
- /* Clear bus lock condition */
- __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03);
- __ast_moutdwm(regs, 0x1e600084, 0x00010000);
- __ast_moutdwm(regs, 0x1e600088, 0x00000000);
- __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
-
- data = __ast_mindwm(regs, 0x1e6e2070);
- if (data & 0x08000000) { /* check fast reset */
- /*
- * If "Fast restet" is enabled for ARM-ICE debugger,
- * then WDT needs to enable, that
- * WDT04 is WDT#1 Reload reg.
- * WDT08 is WDT#1 counter restart reg to avoid system deadlock
- * WDT0C is WDT#1 control reg
- * [6:5]:= 01:Full chip
- * [4]:= 1:1MHz clock source
- * [1]:= 1:WDT will be cleeared and disabled after timeout occurs
- * [0]:= 1:WDT enable
- */
- __ast_moutdwm(regs, 0x1E785004, 0x00000010);
- __ast_moutdwm(regs, 0x1E785008, 0x00004755);
- __ast_moutdwm(regs, 0x1E78500c, 0x00000033);
- udelay(1000);
- }
-
- do {
- __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
- data = __ast_mindwm(regs, 0x1e6e2000);
- } while (data != 1);
-
- __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */
-}
-
-void ast_post_chip_2500(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- u32 temp;
- u8 reg;
-
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- if ((reg & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
- /* Clear bus lock condition */
- ast_patch_ahb_2500(ast->regs);
-
- /* Disable watchdog */
- ast_moutdwm(ast, 0x1E78502C, 0x00000000);
- ast_moutdwm(ast, 0x1E78504C, 0x00000000);
-
- /*
- * Reset USB port to patch USB unknown device issue
- * SCU90 is Multi-function Pin Control #5
- * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub
- * port).
- * SCU94 is Multi-function Pin Control #6
- * [14:13]:= 1x:USB2.0 Host2 controller
- * SCU70 is Hardware Strap reg
- * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by
- * [18]: 0(24)/1(48) MHz)
- * SCU7C is Write clear reg to SCU70
- * [23]:= write 1 and then SCU70[23] will be clear as 0b.
- */
- ast_moutdwm(ast, 0x1E6E2090, 0x20000000);
- ast_moutdwm(ast, 0x1E6E2094, 0x00004000);
- if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) {
- ast_moutdwm(ast, 0x1E6E207C, 0x00800000);
- mdelay(100);
- ast_moutdwm(ast, 0x1E6E2070, 0x00800000);
- }
- /* Modify eSPI reset pin */
- temp = ast_mindwm(ast, 0x1E6E2070);
- if (temp & 0x02000000)
- ast_moutdwm(ast, 0x1E6E207C, 0x00004000);
-
- /* Slow down CPU/AHB CLK in VGA only mode */
- temp = ast_read32(ast, 0x12008);
- temp |= 0x73;
- ast_write32(ast, 0x12008, temp);
-
- if (!ast_dram_init_2500(ast))
- drm_err(dev, "DRAM init failed !\n");
-
- temp = ast_mindwm(ast, 0x1e6e2040);
- ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
- }
-
- /* wait ready */
- do {
- reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- } while ((reg & 0x40) == 0);
-}
diff --git a/drivers/gpu/drm/ast/ast_post.h b/drivers/gpu/drm/ast/ast_post.h
new file mode 100644
index 000000000000..aa5d247bebe8
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_post.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef AST_POST_H
+#define AST_POST_H
+
+#include <linux/limits.h>
+#include <linux/types.h>
+
+struct ast_device;
+
+/* DRAM timing tables */
+struct ast_dramstruct {
+ u16 index;
+ u32 data;
+};
+
+/* hardware fields */
+#define __AST_DRAMSTRUCT_DRAM_TYPE 0x0004
+
+/* control commands */
+#define __AST_DRAMSTRUCT_UDELAY 0xff00
+#define __AST_DRAMSTRUCT_INVALID 0xffff
+
+#define __AST_DRAMSTRUCT_INDEX(_name) \
+ (__AST_DRAMSTRUCT_ ## _name)
+
+#define AST_DRAMSTRUCT_INIT(_name, _value) \
+ { __AST_DRAMSTRUCT_INDEX(_name), (_value) }
+
+#define AST_DRAMSTRUCT_UDELAY(_usecs) \
+ AST_DRAMSTRUCT_INIT(UDELAY, _usecs)
+#define AST_DRAMSTRUCT_INVALID \
+ AST_DRAMSTRUCT_INIT(INVALID, U32_MAX)
+
+#define AST_DRAMSTRUCT_IS(_entry, _name) \
+ ((_entry)->index == __AST_DRAMSTRUCT_INDEX(_name))
+
+u32 __ast_mindwm(void __iomem *regs, u32 r);
+void __ast_moutdwm(void __iomem *regs, u32 r, u32 v);
+
+bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl);
+bool mmc_test_burst(struct ast_device *ast, u32 datagen);
+
+/* ast_2000.c */
+void ast_2000_set_def_ext_reg(struct ast_device *ast);
+
+/* ast_2300.c */
+void ast_2300_set_def_ext_reg(struct ast_device *ast);
+
+#endif
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index f46a5e26b5dd..59a5256ce8a6 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -5,6 +5,9 @@ config DRM_I2C_ADV7511
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_MIPI_DSI
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_DISPLAY_HDMI_STATE_HELPER
help
Support for the Analog Devices ADV7511(W)/13/33/35 HDMI encoders.
@@ -19,7 +22,7 @@ config DRM_I2C_ADV7511_AUDIO
config DRM_I2C_ADV7511_CEC
bool "ADV7511/33/35 HDMI CEC driver"
depends on DRM_I2C_ADV7511
- select CEC_CORE
+ select DRM_DISPLAY_HDMI_CEC_HELPER
default y
help
When selected the HDMI transmitter will support the CEC feature.
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index ec0b7f3d889c..85ebead9809c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -313,16 +313,11 @@ enum adv7511_csc_scaling {
* @csc_enable: Whether to enable color space conversion
* @csc_scaling_factor: Color space conversion scaling factor
* @csc_coefficents: Color space conversion coefficents
- * @hdmi_mode: Whether to use HDMI or DVI output mode
- * @avi_infoframe: HDMI infoframe
*/
struct adv7511_video_config {
bool csc_enable;
enum adv7511_csc_scaling csc_scaling_factor;
const uint16_t *csc_coefficents;
-
- bool hdmi_mode;
- struct hdmi_avi_infoframe avi_infoframe;
};
enum adv7511_type {
@@ -337,6 +332,7 @@ struct adv7511_chip_info {
enum adv7511_type type;
unsigned int max_mode_clock_khz;
unsigned int max_lane_freq_khz;
+ const char *name;
const char * const *supply_names;
unsigned int num_supplies;
unsigned int reg_cec_offset;
@@ -371,7 +367,7 @@ struct adv7511 {
struct work_struct hpd_work;
struct drm_bridge bridge;
- struct drm_connector connector;
+ struct drm_connector *cec_connector;
bool embedded_sync;
enum adv7511_sync_polarity vsync_polarity;
@@ -389,9 +385,7 @@ struct adv7511 {
bool use_timing_gen;
const struct adv7511_chip_info *info;
- struct platform_device *audio_pdev;
- struct cec_adapter *cec_adap;
u8 cec_addr[ADV7511_MAX_ADDRS];
u8 cec_valid_addrs;
bool cec_enabled_adap;
@@ -399,20 +393,29 @@ struct adv7511 {
u32 cec_clk_freq;
};
+static inline struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct adv7511, bridge);
+}
+
#ifdef CONFIG_DRM_I2C_ADV7511_CEC
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511);
+int adv7511_cec_init(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+int adv7511_cec_enable(struct drm_bridge *bridge, bool enable);
+int adv7511_cec_log_addr(struct drm_bridge *bridge, u8 addr);
+int adv7511_cec_transmit(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg);
int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
#else
-static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
-{
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
- ADV7511_CEC_CTRL_POWER_DOWN);
- return 0;
-}
+#define adv7511_cec_init NULL
+#define adv7511_cec_enable NULL
+#define adv7511_cec_log_addr NULL
+#define adv7511_cec_transmit NULL
#endif
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
+void adv7533_dsi_config_timing_gen(struct adv7511 *adv);
enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
const struct drm_display_mode *mode);
int adv7533_patch_registers(struct adv7511 *adv);
@@ -421,16 +424,18 @@ int adv7533_attach_dsi(struct adv7511 *adv);
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
-int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511);
-void adv7511_audio_exit(struct adv7511 *adv7511);
+int adv7511_hdmi_audio_startup(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
#else /*CONFIG_DRM_I2C_ADV7511_AUDIO */
-static inline int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
-{
- return 0;
-}
-static inline void adv7511_audio_exit(struct adv7511 *adv7511)
-{
-}
+#define adv7511_hdmi_audio_startup NULL
+#define adv7511_hdmi_audio_shutdown NULL
+#define adv7511_hdmi_audio_prepare NULL
#endif /* CONFIG_DRM_I2C_ADV7511_AUDIO */
#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 1ff8c815ec79..766b1c96bc88 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -55,11 +55,12 @@ static int adv7511_update_cts_n(struct adv7511 *adv7511)
return 0;
}
-static int adv7511_hdmi_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *fmt,
- struct hdmi_codec_params *hparms)
+int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
{
- struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int audio_source, i2s_format = 0;
unsigned int invert_clock;
unsigned int rate;
@@ -167,9 +168,10 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
return 0;
}
-static int audio_startup(struct device *dev, void *data)
+int adv7511_hdmi_audio_startup(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
BIT(7), 0);
@@ -204,69 +206,12 @@ static int audio_startup(struct device *dev, void *data)
return 0;
}
-static void audio_shutdown(struct device *dev, void *data)
+void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
BIT(7), 0);
}
-
-static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint,
- void *data)
-{
- struct of_endpoint of_ep;
- int ret;
-
- ret = of_graph_parse_endpoint(endpoint, &of_ep);
- if (ret < 0)
- return ret;
-
- /*
- * HDMI sound should be located as reg = <2>
- * Then, it is sound port 0
- */
- if (of_ep.port == 2)
- return 0;
-
- return -EINVAL;
-}
-
-static const struct hdmi_codec_ops adv7511_codec_ops = {
- .hw_params = adv7511_hdmi_hw_params,
- .audio_shutdown = audio_shutdown,
- .audio_startup = audio_startup,
- .get_dai_id = adv7511_hdmi_i2s_get_dai_id,
-};
-
-static const struct hdmi_codec_pdata codec_data = {
- .ops = &adv7511_codec_ops,
- .i2s_formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |
- SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE),
- .max_i2s_channels = 2,
- .i2s = 1,
- .no_i2s_capture = 1,
- .spdif = 1,
- .no_spdif_capture = 1,
-};
-
-int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
-{
- adv7511->audio_pdev = platform_device_register_data(dev,
- HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data,
- sizeof(codec_data));
- return PTR_ERR_OR_ZERO(adv7511->audio_pdev);
-}
-
-void adv7511_audio_exit(struct adv7511 *adv7511)
-{
- if (adv7511->audio_pdev) {
- platform_device_unregister(adv7511->audio_pdev);
- adv7511->audio_pdev = NULL;
- }
-}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index 2e9c88a2b5ed..8ecbc25dc647 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -12,6 +12,8 @@
#include <media/cec.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
+
#include "adv7511.h"
static const u8 ADV7511_REG_CEC_RX_FRAME_HDR[] = {
@@ -44,8 +46,8 @@ static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
return;
if (tx_raw_status & ADV7511_INT1_CEC_TX_ARBIT_LOST) {
- cec_transmit_attempt_done(adv7511->cec_adap,
- CEC_TX_STATUS_ARB_LOST);
+ drm_connector_hdmi_cec_transmit_attempt_done(adv7511->cec_connector,
+ CEC_TX_STATUS_ARB_LOST);
return;
}
if (tx_raw_status & ADV7511_INT1_CEC_TX_RETRY_TIMEOUT) {
@@ -72,12 +74,14 @@ static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
if (low_drive_cnt)
status |= CEC_TX_STATUS_LOW_DRIVE;
}
- cec_transmit_done(adv7511->cec_adap, status,
- 0, nack_cnt, low_drive_cnt, err_cnt);
+ drm_connector_hdmi_cec_transmit_done(adv7511->cec_connector, status,
+ 0, nack_cnt, low_drive_cnt,
+ err_cnt);
return;
}
if (tx_raw_status & ADV7511_INT1_CEC_TX_READY) {
- cec_transmit_attempt_done(adv7511->cec_adap, CEC_TX_STATUS_OK);
+ drm_connector_hdmi_cec_transmit_attempt_done(adv7511->cec_connector,
+ CEC_TX_STATUS_OK);
return;
}
}
@@ -116,7 +120,7 @@ static void adv7511_cec_rx(struct adv7511 *adv7511, int rx_buf)
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_RX_BUFFERS + offset, BIT(rx_buf), 0);
- cec_received_msg(adv7511->cec_adap, &msg);
+ drm_connector_hdmi_cec_received_msg(adv7511->cec_connector, &msg);
}
int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
@@ -179,9 +183,9 @@ int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
return IRQ_HANDLED;
}
-static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
+int adv7511_cec_enable(struct drm_bridge *bridge, bool enable)
{
- struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int offset = adv7511->info->reg_cec_offset;
if (adv7511->i2c_cec == NULL)
@@ -225,9 +229,9 @@ static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
return 0;
}
-static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+int adv7511_cec_log_addr(struct drm_bridge *bridge, u8 addr)
{
- struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int offset = adv7511->info->reg_cec_offset;
unsigned int i, free_idx = ADV7511_MAX_ADDRS;
@@ -293,10 +297,10 @@ static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
return 0;
}
-static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
- u32 signal_free_time, struct cec_msg *msg)
+int adv7511_cec_transmit(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
{
- struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int offset = adv7511->info->reg_cec_offset;
u8 len = msg->len;
unsigned int i;
@@ -328,12 +332,6 @@ static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
return 0;
}
-static const struct cec_adap_ops adv7511_cec_adap_ops = {
- .adap_enable = adv7511_cec_adap_enable,
- .adap_log_addr = adv7511_cec_adap_log_addr,
- .adap_transmit = adv7511_cec_adap_transmit,
-};
-
static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
{
adv7511->cec_clk = devm_clk_get(dev, "cec");
@@ -348,20 +346,18 @@ static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
return 0;
}
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+int adv7511_cec_init(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
+ struct device *dev = &adv7511->i2c_main->dev;
unsigned int offset = adv7511->info->reg_cec_offset;
int ret = adv7511_cec_parse_dt(dev, adv7511);
if (ret)
goto err_cec_parse_dt;
- adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
- adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
- if (IS_ERR(adv7511->cec_adap)) {
- ret = PTR_ERR(adv7511->cec_adap);
- goto err_cec_alloc;
- }
+ adv7511->cec_connector = connector;
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, 0);
/* cec soft reset */
@@ -378,17 +374,8 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
ADV7511_REG_CEC_CLK_DIV + offset,
((adv7511->cec_clk_freq / 750000) - 1) << 2);
- ret = cec_register_adapter(adv7511->cec_adap, dev);
- if (ret)
- goto err_cec_register;
return 0;
-err_cec_register:
- cec_delete_adapter(adv7511->cec_adap);
- adv7511->cec_adap = NULL;
-err_cec_alloc:
- dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
- ret);
err_cec_parse_dt:
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 1257009e850c..00d6417c177b 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -12,14 +12,17 @@
#include <linux/of.h>
#include <linux/slab.h>
-#include <media/cec.h>
+#include <sound/pcm.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
#include "adv7511.h"
@@ -203,62 +206,37 @@ static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
static void adv7511_set_config_csc(struct adv7511 *adv7511,
struct drm_connector *connector,
- bool rgb, bool hdmi_mode)
+ bool rgb)
{
struct adv7511_video_config config;
bool output_format_422, output_format_ycbcr;
unsigned int mode;
- uint8_t infoframe[17];
-
- config.hdmi_mode = hdmi_mode;
-
- hdmi_avi_infoframe_init(&config.avi_infoframe);
-
- config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
if (rgb) {
config.csc_enable = false;
- config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ output_format_422 = false;
+ output_format_ycbcr = false;
} else {
config.csc_scaling_factor = ADV7511_CSC_SCALING_4;
config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
if ((connector->display_info.color_formats &
DRM_COLOR_FORMAT_YCBCR422) &&
- config.hdmi_mode) {
+ connector->display_info.is_hdmi) {
config.csc_enable = false;
- config.avi_infoframe.colorspace =
- HDMI_COLORSPACE_YUV422;
- } else {
- config.csc_enable = true;
- config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
- }
- }
-
- if (config.hdmi_mode) {
- mode = ADV7511_HDMI_CFG_MODE_HDMI;
-
- switch (config.avi_infoframe.colorspace) {
- case HDMI_COLORSPACE_YUV444:
- output_format_422 = false;
- output_format_ycbcr = true;
- break;
- case HDMI_COLORSPACE_YUV422:
output_format_422 = true;
output_format_ycbcr = true;
- break;
- default:
+ } else {
+ config.csc_enable = true;
output_format_422 = false;
output_format_ycbcr = false;
- break;
}
- } else {
- mode = ADV7511_HDMI_CFG_MODE_DVI;
- output_format_422 = false;
- output_format_ycbcr = false;
}
- adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+ if (connector->display_info.is_hdmi)
+ mode = ADV7511_HDMI_CFG_MODE_HDMI;
+ else
+ mode = ADV7511_HDMI_CFG_MODE_DVI;
adv7511_set_colormap(adv7511, config.csc_enable,
config.csc_coefficents,
@@ -269,15 +247,6 @@ static void adv7511_set_config_csc(struct adv7511 *adv7511,
regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG,
ADV7511_HDMI_CFG_MODE_MASK, mode);
-
- hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe,
- sizeof(infoframe));
-
- /* The AVI infoframe id is not configurable */
- regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
- infoframe + 1, sizeof(infoframe) - 1);
-
- adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
}
static void adv7511_set_link_config(struct adv7511 *adv7511,
@@ -446,22 +415,16 @@ static void adv7511_hpd_work(struct work_struct *work)
* restore its state.
*/
if (status == connector_status_connected &&
- adv7511->connector.status == connector_status_disconnected &&
+ adv7511->status == connector_status_disconnected &&
adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
}
- if (adv7511->connector.status != status) {
- adv7511->connector.status = status;
+ if (adv7511->status != status) {
+ adv7511->status = status;
- if (adv7511->connector.dev) {
- if (status == connector_status_disconnected)
- cec_phys_addr_invalidate(adv7511->cec_adap);
- drm_kms_helper_hotplug_event(adv7511->connector.dev);
- } else {
- drm_bridge_hpd_notify(&adv7511->bridge, status);
- }
+ drm_bridge_hpd_notify(&adv7511->bridge, status);
}
}
@@ -636,45 +599,11 @@ static const struct drm_edid *adv7511_edid_read(struct adv7511 *adv7511,
if (!adv7511->powered)
__adv7511_power_off(adv7511);
- if (drm_edid) {
- /*
- * FIXME: The CEC physical address should be set using
- * cec_s_phys_addr(adap,
- * connector->display_info.source_physical_address, false) from
- * a path that has read the EDID and called
- * drm_edid_connector_update().
- */
- const struct edid *edid = drm_edid_raw(drm_edid);
-
- adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
- drm_detect_hdmi_monitor(edid));
-
- cec_s_phys_addr_from_edid(adv7511->cec_adap, edid);
- } else {
- cec_s_phys_addr_from_edid(adv7511->cec_adap, NULL);
- }
-
return drm_edid;
}
-static int adv7511_get_modes(struct adv7511 *adv7511,
- struct drm_connector *connector)
-{
- const struct drm_edid *drm_edid;
- unsigned int count;
-
- drm_edid = adv7511_edid_read(adv7511, connector);
-
- drm_edid_connector_update(connector, drm_edid);
- count = drm_edid_connector_add_modes(connector);
-
- drm_edid_free(drm_edid);
-
- return count;
-}
-
static enum drm_connector_status
-adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
+adv7511_detect(struct adv7511 *adv7511)
{
enum drm_connector_status status;
unsigned int val;
@@ -699,8 +628,6 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
if (status == connector_status_connected && hpd && adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
- if (connector)
- adv7511_get_modes(adv7511, connector);
if (adv7511->status == connector_status_connected)
status = connector_status_disconnected;
} else {
@@ -719,17 +646,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
return status;
}
-static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
- const struct drm_display_mode *mode)
-{
- if (mode->clock > 165000)
- return MODE_CLOCK_HIGH;
-
- return MODE_OK;
-}
-
static void adv7511_mode_set(struct adv7511 *adv7511,
- const struct drm_display_mode *mode,
const struct drm_display_mode *adj_mode)
{
unsigned int low_refresh_rate;
@@ -800,11 +717,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
vsync_polarity = 1;
}
- if (drm_mode_vrefresh(mode) <= 24)
+ if (drm_mode_vrefresh(adj_mode) <= 24)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
- else if (drm_mode_vrefresh(mode) <= 25)
+ else if (drm_mode_vrefresh(adj_mode) <= 25)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
- else if (drm_mode_vrefresh(mode) <= 30)
+ else if (drm_mode_vrefresh(adj_mode) <= 30)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
else
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
@@ -821,82 +738,30 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
drm_mode_copy(&adv7511->curr_mode, adj_mode);
+ /* Update horizontal/vertical porch params */
+ if (adv7511->info->has_dsi && adv7511->use_timing_gen)
+ adv7533_dsi_config_timing_gen(adv7511);
+
/*
* TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
* supposed to give better results.
*/
- adv7511->f_tmds = mode->clock;
-}
-
-/* -----------------------------------------------------------------------------
- * DRM Connector Operations
- */
-
-static struct adv7511 *connector_to_adv7511(struct drm_connector *connector)
-{
- return container_of(connector, struct adv7511, connector);
-}
-
-static int adv7511_connector_get_modes(struct drm_connector *connector)
-{
- struct adv7511 *adv = connector_to_adv7511(connector);
-
- return adv7511_get_modes(adv, connector);
-}
-
-static enum drm_mode_status
-adv7511_connector_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
-{
- struct adv7511 *adv = connector_to_adv7511(connector);
-
- return adv7511_mode_valid(adv, mode);
+ adv7511->f_tmds = adj_mode->clock;
}
-static struct drm_connector_helper_funcs adv7511_connector_helper_funcs = {
- .get_modes = adv7511_connector_get_modes,
- .mode_valid = adv7511_connector_mode_valid,
-};
-
-static enum drm_connector_status
-adv7511_connector_detect(struct drm_connector *connector, bool force)
-{
- struct adv7511 *adv = connector_to_adv7511(connector);
-
- return adv7511_detect(adv, connector);
-}
-
-static const struct drm_connector_funcs adv7511_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = adv7511_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
static int adv7511_connector_init(struct adv7511 *adv)
{
struct drm_bridge *bridge = &adv->bridge;
- int ret;
-
- if (adv->i2c_main->irq)
- adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
- else
- adv->connector.polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ struct drm_connector *connector;
- ret = drm_connector_init(bridge->dev, &adv->connector,
- &adv7511_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- if (ret < 0) {
+ connector = drm_bridge_connector_init(bridge->dev, bridge->encoder);
+ if (IS_ERR(connector)) {
DRM_ERROR("Failed to initialize connector with drm\n");
- return ret;
+ return PTR_ERR(connector);
}
- drm_connector_helper_add(&adv->connector,
- &adv7511_connector_helper_funcs);
- drm_connector_attach_encoder(&adv->connector, bridge->encoder);
+
+ drm_connector_attach_encoder(connector, bridge->encoder);
return 0;
}
@@ -905,7 +770,7 @@ static int adv7511_connector_init(struct adv7511 *adv)
* DRM Bridge Operations
*/
-static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+static const struct adv7511 *bridge_to_adv7511_const(const struct drm_bridge *bridge)
{
return container_of(bridge, struct adv7511, bridge);
}
@@ -914,8 +779,29 @@ static void adv7511_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
adv7511_power_on(adv);
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ adv7511_set_config_csc(adv, connector, adv->rgb);
+
+ adv7511_mode_set(adv, &crtc_state->adjusted_mode);
+
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
}
static void adv7511_bridge_atomic_disable(struct drm_bridge *bridge,
@@ -926,13 +812,17 @@ static void adv7511_bridge_atomic_disable(struct drm_bridge *bridge,
adv7511_power_off(adv);
}
-static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adj_mode)
+static enum drm_mode_status
+adv7511_bridge_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
{
- struct adv7511 *adv = bridge_to_adv7511(bridge);
+ const struct adv7511 *adv = bridge_to_adv7511_const(bridge);
- adv7511_mode_set(adv, mode, adj_mode);
+ if (tmds_rate > 1000ULL * adv->info->max_mode_clock_khz)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
}
static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
@@ -941,10 +831,10 @@ static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
- if (adv->info->has_dsi)
- return adv7533_mode_valid(adv, mode);
- else
- return adv7511_mode_valid(adv, mode);
+ if (!adv->info->has_dsi)
+ return MODE_OK;
+
+ return adv7533_mode_valid(adv, mode);
}
static int adv7511_bridge_attach(struct drm_bridge *bridge,
@@ -974,11 +864,12 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge,
return ret;
}
-static enum drm_connector_status adv7511_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+adv7511_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
- return adv7511_detect(adv, NULL);
+ return adv7511_detect(adv);
}
static const struct drm_edid *adv7511_bridge_edid_read(struct drm_bridge *bridge,
@@ -989,28 +880,71 @@ static const struct drm_edid *adv7511_bridge_edid_read(struct drm_bridge *bridge
return adv7511_edid_read(adv, connector);
}
-static void adv7511_bridge_hpd_notify(struct drm_bridge *bridge,
- enum drm_connector_status status)
+static int adv7511_bridge_hdmi_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
{
- struct adv7511 *adv = bridge_to_adv7511(bridge);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+ break;
+ default:
+ drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
+ break;
+ }
- if (status == connector_status_disconnected)
- cec_phys_addr_invalidate(adv->cec_adap);
+ return 0;
+}
+
+static int adv7511_bridge_hdmi_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
+{
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
+
+ adv7511_bridge_hdmi_clear_infoframe(bridge, type);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ /* The AVI infoframe id is not configurable */
+ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
+ buffer + 1, len - 1);
+
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+ break;
+ default:
+ drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
+ break;
+ }
+
+ return 0;
}
static const struct drm_bridge_funcs adv7511_bridge_funcs = {
- .mode_set = adv7511_bridge_mode_set,
.mode_valid = adv7511_bridge_mode_valid,
.attach = adv7511_bridge_attach,
.detect = adv7511_bridge_detect,
.edid_read = adv7511_bridge_edid_read,
- .hpd_notify = adv7511_bridge_hpd_notify,
.atomic_enable = adv7511_bridge_atomic_enable,
.atomic_disable = adv7511_bridge_atomic_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
+
+ .hdmi_tmds_char_rate_valid = adv7511_bridge_hdmi_tmds_char_rate_valid,
+ .hdmi_clear_infoframe = adv7511_bridge_hdmi_clear_infoframe,
+ .hdmi_write_infoframe = adv7511_bridge_hdmi_write_infoframe,
+
+ .hdmi_audio_startup = adv7511_hdmi_audio_startup,
+ .hdmi_audio_prepare = adv7511_hdmi_audio_prepare,
+ .hdmi_audio_shutdown = adv7511_hdmi_audio_shutdown,
+
+ .hdmi_cec_init = adv7511_cec_init,
+ .hdmi_cec_enable = adv7511_cec_enable,
+ .hdmi_cec_log_addr = adv7511_cec_log_addr,
+ .hdmi_cec_transmit = adv7511_cec_transmit,
};
/* -----------------------------------------------------------------------------
@@ -1224,9 +1158,10 @@ static int adv7511_probe(struct i2c_client *i2c)
if (!dev->of_node)
return -EINVAL;
- adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL);
- if (!adv7511)
- return -ENOMEM;
+ adv7511 = devm_drm_bridge_alloc(dev, struct adv7511, bridge,
+ &adv7511_bridge_funcs);
+ if (IS_ERR(adv7511))
+ return PTR_ERR(adv7511);
adv7511->i2c_main = i2c;
adv7511->powered = false;
@@ -1323,22 +1258,43 @@ static int adv7511_probe(struct i2c_client *i2c)
if (adv7511->info->link_config)
adv7511_set_link_config(adv7511, &link_config);
- ret = adv7511_cec_init(dev, adv7511);
- if (ret)
- goto err_unregister_cec;
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
+ ADV7511_CEC_CTRL_POWER_DOWN);
- adv7511->bridge.funcs = &adv7511_bridge_funcs;
- adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HDMI;
if (adv7511->i2c_main->irq)
adv7511->bridge.ops |= DRM_BRIDGE_OP_HPD;
+ adv7511->bridge.vendor = "Analog";
+ adv7511->bridge.product = adv7511->info->name;
+
+#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
+ adv7511->bridge.ops |= DRM_BRIDGE_OP_HDMI_AUDIO;
+ adv7511->bridge.hdmi_audio_dev = dev;
+ adv7511->bridge.hdmi_audio_max_i2s_playback_channels = 2;
+ adv7511->bridge.hdmi_audio_i2s_formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_3LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE),
+ adv7511->bridge.hdmi_audio_spdif_playback = 1;
+ adv7511->bridge.hdmi_audio_dai_port = 2;
+#endif
+
+#ifdef CONFIG_DRM_I2C_ADV7511_CEC
+ adv7511->bridge.ops |= DRM_BRIDGE_OP_HDMI_CEC_ADAPTER;
+ adv7511->bridge.hdmi_cec_dev = dev;
+ adv7511->bridge.hdmi_cec_adapter_name = dev_name(dev);
+ adv7511->bridge.hdmi_cec_available_las = ADV7511_MAX_ADDRS;
+#endif
+
adv7511->bridge.of_node = dev->of_node;
adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
drm_bridge_add(&adv7511->bridge);
- adv7511_audio_init(dev, adv7511);
-
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
@@ -1360,10 +1316,7 @@ static int adv7511_probe(struct i2c_client *i2c)
return 0;
err_unregister_audio:
- adv7511_audio_exit(adv7511);
drm_bridge_remove(&adv7511->bridge);
-err_unregister_cec:
- cec_unregister_adapter(adv7511->cec_adap);
i2c_unregister_device(adv7511->i2c_cec);
clk_disable_unprepare(adv7511->cec_clk);
err_i2c_unregister_packet:
@@ -1388,9 +1341,6 @@ static void adv7511_remove(struct i2c_client *i2c)
drm_bridge_remove(&adv7511->bridge);
- adv7511_audio_exit(adv7511);
-
- cec_unregister_adapter(adv7511->cec_adap);
i2c_unregister_device(adv7511->i2c_cec);
clk_disable_unprepare(adv7511->cec_clk);
@@ -1400,6 +1350,8 @@ static void adv7511_remove(struct i2c_client *i2c)
static const struct adv7511_chip_info adv7511_chip_info = {
.type = ADV7511,
+ .name = "ADV7511",
+ .max_mode_clock_khz = 165000,
.supply_names = adv7511_supply_names,
.num_supplies = ARRAY_SIZE(adv7511_supply_names),
.link_config = true,
@@ -1407,6 +1359,7 @@ static const struct adv7511_chip_info adv7511_chip_info = {
static const struct adv7511_chip_info adv7533_chip_info = {
.type = ADV7533,
+ .name = "ADV7533",
.max_mode_clock_khz = 80000,
.max_lane_freq_khz = 800000,
.supply_names = adv7533_supply_names,
@@ -1417,6 +1370,7 @@ static const struct adv7511_chip_info adv7533_chip_info = {
static const struct adv7511_chip_info adv7535_chip_info = {
.type = ADV7535,
+ .name = "ADV7535",
.max_mode_clock_khz = 148500,
.max_lane_freq_khz = 891000,
.supply_names = adv7533_supply_names,
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index 122ad91e8a32..188c1093a66e 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -24,7 +24,7 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = {
{ 0x05, 0xc8 },
};
-static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
+void adv7533_dsi_config_timing_gen(struct adv7511 *adv)
{
struct mipi_dsi_device *dsi = adv->dsi;
struct drm_display_mode *mode = &adv->curr_mode;
@@ -67,9 +67,6 @@ void adv7533_dsi_power_on(struct adv7511 *adv)
{
struct mipi_dsi_device *dsi = adv->dsi;
- if (adv->use_timing_gen)
- adv7511_dsi_config_timing_gen(adv);
-
/* set number of dsi lanes */
regmap_write(adv->regmap_cec, 0x1c, dsi->lanes << 4);
@@ -106,10 +103,6 @@ enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
struct mipi_dsi_device *dsi = adv->dsi;
u8 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
- /* Check max clock for either 7533 or 7535 */
- if (mode->clock > adv->info->max_mode_clock_khz)
- return MODE_CLOCK_HIGH;
-
/* Check max clock for each lane */
if (mode->clock * bpp > adv->info->max_lane_freq_khz * adv->num_dsi_lanes)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index a83020d6576f..ba0fc149a9e7 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -1193,9 +1193,10 @@ static int anx78xx_i2c_probe(struct i2c_client *client)
bool found = false;
int err;
- anx78xx = devm_kzalloc(&client->dev, sizeof(*anx78xx), GFP_KERNEL);
- if (!anx78xx)
- return -ENOMEM;
+ anx78xx = devm_drm_bridge_alloc(&client->dev, struct anx78xx, bridge,
+ &anx78xx_bridge_funcs);
+ if (IS_ERR(anx78xx))
+ return PTR_ERR(anx78xx);
pdata = &anx78xx->pdata;
@@ -1306,8 +1307,6 @@ static int anx78xx_i2c_probe(struct i2c_client *client)
goto err_poweroff;
}
- anx78xx->bridge.funcs = &anx78xx_bridge_funcs;
-
drm_bridge_add(&anx78xx->bridge);
/* If cable is pulled out, just poweroff and wait for HPD event */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
index b1e482994ffe..e8662168717d 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
@@ -5,6 +5,8 @@
* Based on anx7808 driver obtained from chromeos with copyright:
* Copyright(c) 2013, Google Inc.
*/
+
+#include <linux/export.h>
#include <linux/regmap.h>
#include <drm/display/drm_dp_helper.h>
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 505eec6b819b..ed35e567d117 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -1040,7 +1041,7 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_connector *connector = NULL;
int ret = 0;
@@ -1124,7 +1125,7 @@ struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -1179,7 +1180,7 @@ out_dp_init:
static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int timeout_loop = 0;
@@ -1216,7 +1217,7 @@ static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
@@ -1239,7 +1240,7 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *old_crtc, *new_crtc;
struct drm_crtc_state *old_crtc_state = NULL;
struct drm_crtc_state *new_crtc_state = NULL;
@@ -1277,7 +1278,7 @@ out:
static void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
int ret;
@@ -1299,7 +1300,7 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *orig_mode,
const struct drm_display_mode *mode)
{
- struct analogix_dp_device *dp = bridge->driver_private;
+ struct analogix_dp_device *dp = to_dp(bridge);
struct drm_display_info *display_info = &dp->connector.display_info;
struct video_info *video = &dp->video_info;
struct device_node *dp_node = dp->dev->of_node;
@@ -1384,25 +1385,6 @@ static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
.attach = analogix_dp_bridge_attach,
};
-static int analogix_dp_create_bridge(struct drm_device *drm_dev,
- struct analogix_dp_device *dp)
-{
- struct drm_bridge *bridge;
-
- bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge) {
- DRM_ERROR("failed to allocate for drm bridge\n");
- return -ENOMEM;
- }
-
- dp->bridge = bridge;
-
- bridge->driver_private = dp;
- bridge->funcs = &analogix_dp_bridge_funcs;
-
- return drm_bridge_attach(dp->encoder, bridge, NULL, 0);
-}
-
static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
{
struct device_node *dp_node = dp->dev->of_node;
@@ -1490,7 +1472,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
return ERR_PTR(-EINVAL);
}
- dp = devm_kzalloc(dev, sizeof(struct analogix_dp_device), GFP_KERNEL);
+ dp = devm_drm_bridge_alloc(dev, struct analogix_dp_device, bridge,
+ &analogix_dp_bridge_funcs);
if (!dp)
return ERR_PTR(-ENOMEM);
@@ -1642,7 +1625,7 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
return ret;
}
- ret = analogix_dp_create_bridge(drm_dev, dp);
+ ret = drm_bridge_attach(dp->encoder, &dp->bridge, NULL, 0);
if (ret) {
DRM_ERROR("failed to create bridge (%d)\n", ret);
goto err_unregister_aux;
@@ -1659,7 +1642,7 @@ EXPORT_SYMBOL_GPL(analogix_dp_bind);
void analogix_dp_unbind(struct analogix_dp_device *dp)
{
- analogix_dp_bridge_disable(dp->bridge);
+ analogix_dp_bridge_disable(&dp->bridge);
dp->connector.funcs->destroy(&dp->connector);
drm_panel_unprepare(dp->plat_data->panel);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index 2b54120ba4a3..b86e93f30ed6 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -11,6 +11,7 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_bridge.h>
#define DP_TIMEOUT_LOOP_COUNT 100
#define MAX_CR_LOOP 5
@@ -154,7 +155,7 @@ struct analogix_dp_device {
struct device *dev;
struct drm_device *drm_dev;
struct drm_connector connector;
- struct drm_bridge *bridge;
+ struct drm_bridge bridge;
struct drm_dp_aux aux;
struct clk *clock;
unsigned int irq;
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 8a9079c2ed5c..c0ad8f59e483 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2448,7 +2448,7 @@ anx7625_audio_update_connector_status(struct anx7625_data *ctx,
enum drm_connector_status status);
static enum drm_connector_status
-anx7625_bridge_detect(struct drm_bridge *bridge)
+anx7625_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
@@ -2596,7 +2596,6 @@ static int anx7625_link_bridge(struct drm_dp_aux *aux)
return ret;
}
- platform->bridge.funcs = &anx7625_bridge_funcs;
platform->bridge.of_node = dev->of_node;
if (!anx7625_of_panel_on_aux_bus(dev))
platform->bridge.ops |= DRM_BRIDGE_OP_EDID;
@@ -2630,10 +2629,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
return -ENODEV;
}
- platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
- if (!platform) {
+ platform = devm_drm_bridge_alloc(dev, struct anx7625_data, bridge, &anx7625_bridge_funcs);
+ if (IS_ERR(platform)) {
DRM_DEV_ERROR(dev, "fail to allocate driver data\n");
- return -ENOMEM;
+ return PTR_ERR(platform);
}
pdata = &platform->pdata;
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index c179b86d208f..b63304d3a80f 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -5,6 +5,7 @@
* Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
*/
#include <linux/auxiliary_bus.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -109,9 +110,10 @@ static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
{
struct drm_aux_bridge_data *data;
- data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ data = devm_drm_bridge_alloc(&auxdev->dev, struct drm_aux_bridge_data,
+ bridge, &drm_aux_bridge_funcs);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
data->dev = &auxdev->dev;
data->next_bridge = devm_drm_of_get_bridge(&auxdev->dev, auxdev->dev.of_node, 0, 0);
@@ -119,7 +121,6 @@ static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
return dev_err_probe(&auxdev->dev, PTR_ERR(data->next_bridge),
"failed to acquire drm_bridge\n");
- data->bridge.funcs = &drm_aux_bridge_funcs;
data->bridge.of_node = data->dev->of_node;
/* passthrough data, allow everything */
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
index af6f79793407..2e9c702c7087 100644
--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -5,6 +5,7 @@
* Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
*/
#include <linux/auxiliary_bus.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -172,12 +173,13 @@ static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev,
{
struct drm_aux_hpd_bridge_data *data;
- data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ data = devm_drm_bridge_alloc(&auxdev->dev,
+ struct drm_aux_hpd_bridge_data, bridge,
+ &drm_aux_hpd_bridge_funcs);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
data->dev = &auxdev->dev;
- data->bridge.funcs = &drm_aux_hpd_bridge_funcs;
data->bridge.of_node = dev_get_platdata(data->dev);
data->bridge.ops = DRM_BRIDGE_OP_HPD;
data->bridge.type = id->driver_data;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index b022dd6e6b6e..a57ca8c3bdae 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -670,13 +670,28 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void cdns_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void cdns_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
u32 val;
+ /*
+ * The cdns-dsi controller needs to be disabled after it's DPI source
+ * has stopped streaming. If this is not followed, there is a brief
+ * window before DPI source is disabled and after cdns-dsi controller
+ * has been disabled where the DPI stream is still on, but the cdns-dsi
+ * controller is not ready anymore to accept the incoming signals. This
+ * is one of the reasons why a shift in pixel colors is observed on
+ * displays that have cdns-dsi as one of the bridges.
+ *
+ * To mitigate this, disable this bridge from the bridge post_disable()
+ * hook, instead of the bridge _disable() hook. The bridge post_disable()
+ * hook gets called after the CRTC disable, where often many DPI sources
+ * disable their streams.
+ */
+
val = readl(dsi->regs + MCTL_MAIN_DATA_CTL);
val &= ~(IF_VID_SELECT_MASK | IF_VID_MODE | VID_EN | HOST_EOT_GEN |
DISP_EOT_GEN);
@@ -688,15 +703,6 @@ static void cdns_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
if (dsi->platform_ops && dsi->platform_ops->disable)
dsi->platform_ops->disable(dsi);
- pm_runtime_put(dsi->base.dev);
-}
-
-static void cdns_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
-{
- struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
- struct cdns_dsi *dsi = input_to_dsi(input);
-
dsi->phy_initialized = false;
dsi->link_initialized = false;
phy_power_off(dsi->dphy);
@@ -774,8 +780,8 @@ static void cdns_dsi_init_link(struct cdns_dsi *dsi)
dsi->link_initialized = true;
}
-static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
@@ -792,6 +798,21 @@ static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
u32 tmp, reg_wakeup, div, status;
int nlanes;
+ /*
+ * The cdns-dsi controller needs to be enabled before it's DPI source
+ * has begun streaming. If this is not followed, there is a brief window
+ * after DPI source enable and before cdns-dsi controller enable where
+ * the DPI stream is on, but the cdns-dsi controller is not ready to
+ * accept the incoming signals. This is one of the reasons why a shift
+ * in pixel colors is observed on displays that have cdns-dsi as one of
+ * the bridges.
+ *
+ * To mitigate this, enable this bridge from the bridge pre_enable()
+ * hook, instead of the bridge _enable() hook. The bridge pre_enable()
+ * hook gets called before the CRTC enable, where often many DPI sources
+ * enable their streams.
+ */
+
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
@@ -811,8 +832,8 @@ static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
mode = &crtc_state->adjusted_mode;
nlanes = output->dev->lanes;
- cdns_dsi_hs_init(dsi);
cdns_dsi_init_link(dsi);
+ cdns_dsi_hs_init(dsi);
/*
* Now that the DSI Link and DSI Phy are initialized,
@@ -941,19 +962,6 @@ static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
writel(tmp, dsi->regs + MCTL_MAIN_EN);
}
-static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
-{
- struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
- struct cdns_dsi *dsi = input_to_dsi(input);
-
- if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
- return;
-
- cdns_dsi_init_link(dsi);
- cdns_dsi_hs_init(dsi);
-}
-
static u32 *cdns_dsi_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
@@ -1048,9 +1056,7 @@ cdns_dsi_bridge_atomic_reset(struct drm_bridge *bridge)
static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
.attach = cdns_dsi_bridge_attach,
.mode_valid = cdns_dsi_bridge_mode_valid,
- .atomic_disable = cdns_dsi_bridge_atomic_disable,
.atomic_pre_enable = cdns_dsi_bridge_atomic_pre_enable,
- .atomic_enable = cdns_dsi_bridge_atomic_enable,
.atomic_post_disable = cdns_dsi_bridge_atomic_post_disable,
.atomic_check = cdns_dsi_bridge_atomic_check,
.atomic_reset = cdns_dsi_bridge_atomic_reset,
@@ -1289,9 +1295,10 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
int ret, irq;
u32 val;
- dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct cdns_dsi, input.bridge,
+ &cdns_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
platform_set_drvdata(pdev, dsi);
@@ -1349,7 +1356,6 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
* CDNS_DPI_INPUT.
*/
input->id = CDNS_DPI_INPUT;
- input->bridge.funcs = &cdns_dsi_bridge_funcs;
input->bridge.of_node = pdev->dev.of_node;
/* Mask all interrupts before registering the IRQ handler. */
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index b431e7efd1f0..a614d1384f71 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2143,7 +2143,8 @@ static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
return 0;
}
-static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+cdns_mhdp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
@@ -2389,9 +2390,10 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
int ret;
int irq;
- mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
- if (!mhdp)
- return -ENOMEM;
+ mhdp = devm_drm_bridge_alloc(dev, struct cdns_mhdp_device, bridge,
+ &cdns_mhdp_bridge_funcs);
+ if (IS_ERR(mhdp))
+ return PTR_ERR(mhdp);
clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
@@ -2481,7 +2483,6 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
mhdp->display_fmt.bpc = 8;
mhdp->bridge.of_node = pdev->dev.of_node;
- mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD;
mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 634c5b030667..814713c5bea9 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -691,9 +691,10 @@ static int chipone_common_probe(struct device *dev, struct chipone **icnr)
struct chipone *icn;
int ret;
- icn = devm_kzalloc(dev, sizeof(struct chipone), GFP_KERNEL);
- if (!icn)
- return -ENOMEM;
+ icn = devm_drm_bridge_alloc(dev, struct chipone, bridge,
+ &chipone_bridge_funcs);
+ if (IS_ERR(icn))
+ return PTR_ERR(icn);
icn->dev = dev;
@@ -701,7 +702,6 @@ static int chipone_common_probe(struct device *dev, struct chipone **icnr)
if (ret)
return ret;
- icn->bridge.funcs = &chipone_bridge_funcs;
icn->bridge.type = DRM_MODE_CONNECTOR_DPI;
icn->bridge.of_node = dev->of_node;
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index 210c45c1efd4..54d49d4882c8 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -215,7 +215,7 @@ static enum drm_connector_status ch7033_connector_detect(
{
struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
- return drm_bridge_detect(priv->next_bridge);
+ return drm_bridge_detect(priv->next_bridge, connector);
}
static const struct drm_connector_funcs ch7033_connector_funcs = {
@@ -536,9 +536,10 @@ static int ch7033_probe(struct i2c_client *client)
unsigned int val;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_bridge_alloc(dev, struct ch7033_priv, bridge,
+ &ch7033_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dev_set_drvdata(dev, priv);
@@ -575,7 +576,6 @@ static int ch7033_probe(struct i2c_client *client)
}
INIT_LIST_HEAD(&priv->bridge.list);
- priv->bridge.funcs = &ch7033_bridge_funcs;
priv->bridge.of_node = dev->of_node;
drm_bridge_add(&priv->bridge);
diff --git a/drivers/gpu/drm/bridge/cros-ec-anx7688.c b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
index c8abd9920fee..a35dae9b56e2 100644
--- a/drivers/gpu/drm/bridge/cros-ec-anx7688.c
+++ b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
@@ -103,9 +103,10 @@ static int cros_ec_anx7688_bridge_probe(struct i2c_client *client)
u8 buffer[4];
int ret;
- anx7688 = devm_kzalloc(dev, sizeof(*anx7688), GFP_KERNEL);
- if (!anx7688)
- return -ENOMEM;
+ anx7688 = devm_drm_bridge_alloc(dev, struct cros_ec_anx7688, bridge,
+ &cros_ec_anx7688_bridge_funcs);
+ if (IS_ERR(anx7688))
+ return PTR_ERR(anx7688);
anx7688->client = client;
i2c_set_clientdata(client, anx7688);
@@ -153,7 +154,6 @@ static int cros_ec_anx7688_bridge_probe(struct i2c_client *client)
DRM_WARN("Old ANX7688 FW version (0x%04x), not filtering\n",
fw_version);
- anx7688->bridge.funcs = &cros_ec_anx7688_bridge_funcs;
drm_bridge_add(&anx7688->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index badd2c7f91a1..52b7b5889e6f 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -40,8 +40,7 @@ static int display_connector_attach(struct drm_bridge *bridge,
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
}
-static enum drm_connector_status
-display_connector_detect(struct drm_bridge *bridge)
+static enum drm_connector_status display_connector_detect(struct drm_bridge *bridge)
{
struct display_connector *conn = to_display_connector(bridge);
@@ -82,6 +81,12 @@ display_connector_detect(struct drm_bridge *bridge)
}
}
+static enum drm_connector_status
+display_connector_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
+{
+ return display_connector_detect(bridge);
+}
+
static const struct drm_edid *display_connector_edid_read(struct drm_bridge *bridge,
struct drm_connector *connector)
{
@@ -172,7 +177,7 @@ static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge,
static const struct drm_bridge_funcs display_connector_bridge_funcs = {
.attach = display_connector_attach,
- .detect = display_connector_detect,
+ .detect = display_connector_bridge_detect,
.edid_read = display_connector_edid_read,
.atomic_get_output_bus_fmts = display_connector_get_output_bus_fmts,
.atomic_get_input_bus_fmts = display_connector_get_input_bus_fmts,
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index 2cb6dfc7a6d3..5c3cf37200bc 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -298,16 +298,15 @@ static int fsl_ldb_probe(struct platform_device *pdev)
struct fsl_ldb *fsl_ldb;
int dual_link;
- fsl_ldb = devm_kzalloc(dev, sizeof(*fsl_ldb), GFP_KERNEL);
- if (!fsl_ldb)
- return -ENOMEM;
+ fsl_ldb = devm_drm_bridge_alloc(dev, struct fsl_ldb, bridge, &funcs);
+ if (IS_ERR(fsl_ldb))
+ return PTR_ERR(fsl_ldb);
fsl_ldb->devdata = of_device_get_match_data(dev);
if (!fsl_ldb->devdata)
return -EINVAL;
fsl_ldb->dev = &pdev->dev;
- fsl_ldb->bridge.funcs = &funcs;
fsl_ldb->bridge.of_node = dev->of_node;
fsl_ldb->clk = devm_clk_get(dev, "ldb");
diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
index f072c6ed39ef..0e31d5000e7c 100644
--- a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
+++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
@@ -5,6 +5,8 @@
* bridge driver for legacy DT bindings, utilizing display-timings node
*/
+#include <linux/export.h>
+
#include <drm/drm_bridge.h>
#include <drm/drm_modes.h>
#include <drm/drm_probe_helper.h>
@@ -59,9 +61,10 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
struct imx_legacy_bridge *imx_bridge;
int ret;
- imx_bridge = devm_kzalloc(dev, sizeof(*imx_bridge), GFP_KERNEL);
- if (!imx_bridge)
- return ERR_PTR(-ENOMEM);
+ imx_bridge = devm_drm_bridge_alloc(dev, struct imx_legacy_bridge,
+ base, &imx_legacy_bridge_funcs);
+ if (IS_ERR(imx_bridge))
+ return ERR_CAST(imx_bridge);
ret = of_get_drm_display_mode(np,
&imx_bridge->mode,
@@ -72,7 +75,6 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
imx_bridge->mode.type |= DRM_MODE_TYPE_DRIVER;
- imx_bridge->base.funcs = &imx_legacy_bridge_funcs;
imx_bridge->base.of_node = np;
imx_bridge->base.ops = DRM_BRIDGE_OP_MODES;
imx_bridge->base.type = type;
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
index 8a4fd7d77a8d..3a6f8587a257 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -140,9 +140,10 @@ static int imx8mp_hdmi_pvi_probe(struct platform_device *pdev)
struct device_node *remote;
struct imx8mp_hdmi_pvi *pvi;
- pvi = devm_kzalloc(&pdev->dev, sizeof(*pvi), GFP_KERNEL);
- if (!pvi)
- return -ENOMEM;
+ pvi = devm_drm_bridge_alloc(&pdev->dev, struct imx8mp_hdmi_pvi,
+ bridge, &imx_hdmi_pvi_bridge_funcs);
+ if (IS_ERR(pvi))
+ return PTR_ERR(pvi);
platform_set_drvdata(pdev, pvi);
pvi->dev = &pdev->dev;
@@ -166,7 +167,6 @@ static int imx8mp_hdmi_pvi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
/* Register the bridge. */
- pvi->bridge.funcs = &imx_hdmi_pvi_bridge_funcs;
pvi->bridge.of_node = pdev->dev.of_node;
pvi->bridge.timings = pvi->next_bridge->timings;
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
index 1f6fd488e703..8517b1c953d4 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -63,12 +63,11 @@ struct imx8qxp_pc_channel {
struct drm_bridge *next_bridge;
struct imx8qxp_pc *pc;
unsigned int stream_id;
- bool is_available;
};
struct imx8qxp_pc {
struct device *dev;
- struct imx8qxp_pc_channel ch[2];
+ struct imx8qxp_pc_channel *ch[2];
struct clk *clk_apb;
void __iomem *base;
};
@@ -307,7 +306,14 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
goto free_child;
}
- ch = &pc->ch[i];
+ ch = devm_drm_bridge_alloc(dev, struct imx8qxp_pc_channel, bridge,
+ &imx8qxp_pc_bridge_funcs);
+ if (IS_ERR(ch)) {
+ ret = PTR_ERR(ch);
+ goto free_child;
+ }
+
+ pc->ch[i] = ch;
ch->pc = pc;
ch->stream_id = i;
@@ -333,9 +339,7 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
of_node_put(remote);
ch->bridge.driver_private = ch;
- ch->bridge.funcs = &imx8qxp_pc_bridge_funcs;
ch->bridge.of_node = child;
- ch->is_available = true;
drm_bridge_add(&ch->bridge);
}
@@ -345,8 +349,8 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
free_child:
of_node_put(child);
- if (i == 1 && pc->ch[0].next_bridge)
- drm_bridge_remove(&pc->ch[0].bridge);
+ if (i == 1 && pc->ch[0]->next_bridge)
+ drm_bridge_remove(&pc->ch[0]->bridge);
pm_runtime_disable(dev);
return ret;
@@ -359,13 +363,10 @@ static void imx8qxp_pc_bridge_remove(struct platform_device *pdev)
int i;
for (i = 0; i < 2; i++) {
- ch = &pc->ch[i];
-
- if (!ch->is_available)
- continue;
+ ch = pc->ch[i];
- drm_bridge_remove(&ch->bridge);
- ch->is_available = false;
+ if (ch)
+ drm_bridge_remove(&ch->bridge);
}
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
index e092c9ea99b0..e5943506981d 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
@@ -327,9 +327,10 @@ static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
int ret;
- pl = devm_kzalloc(dev, sizeof(*pl), GFP_KERNEL);
- if (!pl)
- return -ENOMEM;
+ pl = devm_drm_bridge_alloc(dev, struct imx8qxp_pixel_link, bridge,
+ &imx8qxp_pixel_link_bridge_funcs);
+ if (IS_ERR(pl))
+ return PTR_ERR(pl);
ret = imx_scu_get_handle(&pl->ipc_handle);
if (ret) {
@@ -384,7 +385,6 @@ static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pl);
pl->bridge.driver_private = pl;
- pl->bridge.funcs = &imx8qxp_pixel_link_bridge_funcs;
pl->bridge.of_node = np;
drm_bridge_add(&pl->bridge);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
index da138ab51b3b..111310acab2c 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
@@ -392,9 +392,10 @@ static int imx8qxp_pxl2dpi_bridge_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
int ret;
- p2d = devm_kzalloc(dev, sizeof(*p2d), GFP_KERNEL);
- if (!p2d)
- return -ENOMEM;
+ p2d = devm_drm_bridge_alloc(dev, struct imx8qxp_pxl2dpi, bridge,
+ &imx8qxp_pxl2dpi_bridge_funcs);
+ if (IS_ERR(p2d))
+ return PTR_ERR(p2d);
p2d->regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(p2d->regmap)) {
@@ -441,7 +442,6 @@ static int imx8qxp_pxl2dpi_bridge_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
p2d->bridge.driver_private = p2d;
- p2d->bridge.funcs = &imx8qxp_pxl2dpi_bridge_funcs;
p2d->bridge.of_node = np;
drm_bridge_add(&p2d->bridge);
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index a3a63a977b0a..cf813672b4ff 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -693,7 +693,8 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
return 0;
}
-static enum drm_connector_status it6263_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+it6263_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct it6263 *it = bridge_to_it6263(bridge);
@@ -816,9 +817,10 @@ static int it6263_probe(struct i2c_client *client)
struct it6263 *it;
int ret;
- it = devm_kzalloc(dev, sizeof(*it), GFP_KERNEL);
- if (!it)
- return -ENOMEM;
+ it = devm_drm_bridge_alloc(dev, struct it6263, bridge,
+ &it6263_bridge_funcs);
+ if (IS_ERR(it))
+ return PTR_ERR(it);
it->dev = dev;
it->hdmi_i2c = client;
@@ -866,7 +868,6 @@ static int it6263_probe(struct i2c_client *client)
i2c_set_clientdata(client, it);
- it->bridge.funcs = &it6263_bridge_funcs;
it->bridge.of_node = dev->of_node;
/* IT6263 chip doesn't support HPD interrupt. */
it->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 1383d1e21afe..89649c17ffad 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -3238,7 +3238,7 @@ static void it6505_bridge_atomic_post_disable(struct drm_bridge *bridge,
}
static enum drm_connector_status
-it6505_bridge_detect(struct drm_bridge *bridge)
+it6505_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
@@ -3583,9 +3583,10 @@ static int it6505_i2c_probe(struct i2c_client *client)
struct extcon_dev *extcon;
int err;
- it6505 = devm_kzalloc(&client->dev, sizeof(*it6505), GFP_KERNEL);
- if (!it6505)
- return -ENOMEM;
+ it6505 = devm_drm_bridge_alloc(&client->dev, struct it6505, bridge,
+ &it6505_bridge_funcs);
+ if (IS_ERR(it6505))
+ return PTR_ERR(it6505);
mutex_init(&it6505->extcon_lock);
mutex_init(&it6505->mode_lock);
@@ -3660,7 +3661,6 @@ static int it6505_i2c_probe(struct i2c_client *client)
it6505->aux.transfer = it6505_aux_transfer;
drm_dp_aux_init(&it6505->aux);
- it6505->bridge.funcs = &it6505_bridge_funcs;
it6505->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
it6505->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 7b110ae53291..aa7b1dcc5d70 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -843,7 +843,8 @@ static enum drm_mode_status it66121_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static enum drm_connector_status it66121_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+it66121_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
@@ -1516,9 +1517,10 @@ static int it66121_probe(struct i2c_client *client)
return -ENXIO;
}
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct it66121_ctx, bridge,
+ &it66121_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
if (!ep)
@@ -1577,7 +1579,6 @@ static int it66121_probe(struct i2c_client *client)
return -ENODEV;
}
- ctx->bridge.funcs = &it66121_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 3e49d855b364..342374cb8fc6 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -408,7 +408,7 @@ lt8912_connector_detect(struct drm_connector *connector, bool force)
struct lt8912 *lt = connector_to_lt8912(connector);
if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT)
- return drm_bridge_detect(lt->hdmi_port);
+ return drm_bridge_detect(lt->hdmi_port, connector);
return lt8912_check_cable_status(lt);
}
@@ -607,12 +607,12 @@ lt8912_bridge_mode_valid(struct drm_bridge *bridge,
}
static enum drm_connector_status
-lt8912_bridge_detect(struct drm_bridge *bridge)
+lt8912_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT)
- return drm_bridge_detect(lt->hdmi_port);
+ return drm_bridge_detect(lt->hdmi_port, connector);
return lt8912_check_cable_status(lt);
}
@@ -761,9 +761,10 @@ static int lt8912_probe(struct i2c_client *client)
int ret = 0;
struct device *dev = &client->dev;
- lt = devm_kzalloc(dev, sizeof(struct lt8912), GFP_KERNEL);
- if (!lt)
- return -ENOMEM;
+ lt = devm_drm_bridge_alloc(dev, struct lt8912, bridge,
+ &lt8912_bridge_funcs);
+ if (IS_ERR(lt))
+ return PTR_ERR(lt);
lt->dev = dev;
lt->i2c_client[0] = client;
@@ -778,7 +779,6 @@ static int lt8912_probe(struct i2c_client *client)
i2c_set_clientdata(client, lt);
- lt->bridge.funcs = &lt8912_bridge_funcs;
lt->bridge.of_node = dev->of_node;
lt->bridge.ops = (DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_DETECT);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 9b2dac9bd63c..399fa7eebd49 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -727,9 +727,9 @@ static int lt9211_probe(struct i2c_client *client)
struct lt9211 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct lt9211, bridge, &lt9211_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
@@ -755,7 +755,6 @@ static int lt9211_probe(struct i2c_client *client)
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &lt9211_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index a35a8b8ca89c..a2d032ee4744 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -543,7 +543,8 @@ static int lt9611_regulator_enable(struct lt9611 *lt9611)
return 0;
}
-static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+lt9611_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
unsigned int reg_val = 0;
@@ -936,8 +937,8 @@ lt9611_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
return MODE_OK;
}
-static int lt9611_hdmi_audio_startup(struct drm_connector *connector,
- struct drm_bridge *bridge)
+static int lt9611_hdmi_audio_startup(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
@@ -952,8 +953,8 @@ static int lt9611_hdmi_audio_startup(struct drm_connector *connector,
return 0;
}
-static int lt9611_hdmi_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+static int lt9611_hdmi_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms)
{
@@ -974,8 +975,8 @@ static int lt9611_hdmi_audio_prepare(struct drm_connector *connector,
&hparms->cea);
}
-static void lt9611_hdmi_audio_shutdown(struct drm_connector *connector,
- struct drm_bridge *bridge)
+static void lt9611_hdmi_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
@@ -1072,9 +1073,10 @@ static int lt9611_probe(struct i2c_client *client)
return -ENODEV;
}
- lt9611 = devm_kzalloc(dev, sizeof(*lt9611), GFP_KERNEL);
- if (!lt9611)
- return -ENOMEM;
+ lt9611 = devm_drm_bridge_alloc(dev, struct lt9611, bridge,
+ &lt9611_bridge_funcs);
+ if (IS_ERR(lt9611))
+ return PTR_ERR(lt9611);
lt9611->dev = dev;
lt9611->client = client;
@@ -1127,7 +1129,6 @@ static int lt9611_probe(struct i2c_client *client)
/* Disable Audio InfoFrame, enabled by default */
regmap_update_bits(lt9611->regmap, 0x843d, LT9611_INFOFRAME_AUDIO, 0);
- lt9611->bridge.funcs = &lt9611_bridge_funcs;
lt9611->bridge.of_node = client->dev.of_node;
lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES |
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 766da2cb45a7..38fb8776c0f4 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -353,7 +353,8 @@ static void lt9611uxc_bridge_mode_set(struct drm_bridge *bridge,
lt9611uxc_unlock(lt9611uxc);
}
-static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+lt9611uxc_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
unsigned int reg_val = 0;
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 1646e454e0b0..e6a7147e141b 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -118,9 +118,10 @@ static int lvds_codec_probe(struct platform_device *pdev)
u32 val;
int ret;
- lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
- if (!lvds_codec)
- return -ENOMEM;
+ lvds_codec = devm_drm_bridge_alloc(dev, struct lvds_codec, bridge,
+ &funcs);
+ if (IS_ERR(lvds_codec))
+ return PTR_ERR(lvds_codec);
lvds_codec->dev = &pdev->dev;
lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev);
@@ -156,8 +157,6 @@ static int lvds_codec_probe(struct platform_device *pdev)
if (IS_ERR(lvds_codec->panel_bridge))
return PTR_ERR(lvds_codec->panel_bridge);
- lvds_codec->bridge.funcs = &funcs;
-
/*
* Decoder input LVDS format is a property of the decoder chip or even
* its strapping. Handle data-mapping the same way lvds-panel does. In
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 15a5a1f644fc..c9e6505cbd88 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -120,7 +120,8 @@ drm_connector_helper_funcs ge_b850v3_lvds_connector_helper_funcs = {
.get_modes = ge_b850v3_lvds_get_modes,
};
-static enum drm_connector_status ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct i2c_client *stdp4028_i2c =
ge_b850v3_lvds_ptr->stdp4028_i2c;
@@ -141,7 +142,7 @@ static enum drm_connector_status ge_b850v3_lvds_bridge_detect(struct drm_bridge
static enum drm_connector_status ge_b850v3_lvds_detect(struct drm_connector *connector,
bool force)
{
- return ge_b850v3_lvds_bridge_detect(&ge_b850v3_lvds_ptr->bridge);
+ return ge_b850v3_lvds_bridge_detect(&ge_b850v3_lvds_ptr->bridge, connector);
}
static const struct drm_connector_funcs ge_b850v3_lvds_connector_funcs = {
@@ -225,13 +226,11 @@ static int ge_b850v3_lvds_init(struct device *dev)
if (ge_b850v3_lvds_ptr)
goto success;
- ge_b850v3_lvds_ptr = devm_kzalloc(dev,
- sizeof(*ge_b850v3_lvds_ptr),
- GFP_KERNEL);
-
- if (!ge_b850v3_lvds_ptr) {
+ ge_b850v3_lvds_ptr = devm_drm_bridge_alloc(dev, struct ge_b850v3_lvds, bridge,
+ &ge_b850v3_lvds_funcs);
+ if (IS_ERR(ge_b850v3_lvds_ptr)) {
mutex_unlock(&ge_b850v3_lvds_dev_mutex);
- return -ENOMEM;
+ return PTR_ERR(ge_b850v3_lvds_ptr);
}
success:
@@ -264,7 +263,6 @@ static int ge_b850v3_register(void)
struct device *dev = &stdp4028_i2c->dev;
/* drm bridge initialization */
- ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs;
ge_b850v3_lvds_ptr->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID;
ge_b850v3_lvds_ptr->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c
index 1d4ae0097df8..9f4ff82bc6b4 100644
--- a/drivers/gpu/drm/bridge/microchip-lvds.c
+++ b/drivers/gpu/drm/bridge/microchip-lvds.c
@@ -157,9 +157,10 @@ static int mchp_lvds_probe(struct platform_device *pdev)
if (!dev->of_node)
return -ENODEV;
- lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (!lvds)
- return -ENOMEM;
+ lvds = devm_drm_bridge_alloc(&pdev->dev, struct mchp_lvds, bridge,
+ &mchp_lvds_bridge_funcs);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
lvds->dev = dev;
@@ -192,7 +193,6 @@ static int mchp_lvds_probe(struct platform_device *pdev)
lvds->bridge.of_node = dev->of_node;
lvds->bridge.type = DRM_MODE_CONNECTOR_LVDS;
- lvds->bridge.funcs = &mchp_lvds_bridge_funcs;
dev_set_drvdata(dev, lvds);
ret = devm_pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 55912ae11f46..2f7429b24fc2 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -1149,9 +1149,10 @@ static int nwl_dsi_probe(struct platform_device *pdev)
struct nwl_dsi *dsi;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct nwl_dsi, bridge,
+ &nwl_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dsi->dev = dev;
@@ -1180,7 +1181,6 @@ static int nwl_dsi_probe(struct platform_device *pdev)
dsi->quirks = (uintptr_t)attr->data;
dsi->bridge.driver_private = dsi;
- dsi->bridge.funcs = &nwl_dsi_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.timings = &nwl_dsi_timings;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 25d7c415478b..7acb11f16dc1 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -261,10 +261,10 @@ static int ptn3460_probe(struct i2c_client *client)
struct drm_bridge *panel_bridge;
int ret;
- ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL);
- if (!ptn_bridge) {
- return -ENOMEM;
- }
+ ptn_bridge = devm_drm_bridge_alloc(dev, struct ptn3460_bridge, bridge,
+ &ptn3460_bridge_funcs);
+ if (IS_ERR(ptn_bridge))
+ return PTR_ERR(ptn_bridge);
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(panel_bridge))
@@ -300,7 +300,6 @@ static int ptn3460_probe(struct i2c_client *client)
return ret;
}
- ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs;
ptn_bridge->bridge.ops = DRM_BRIDGE_OP_EDID;
ptn_bridge->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ptn_bridge->bridge.of_node = dev->of_node;
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 29b0358a7b6d..184a8b7049a7 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -5,6 +5,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -287,15 +288,14 @@ struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
if (!panel)
return ERR_PTR(-EINVAL);
- panel_bridge = devm_kzalloc(panel->dev, sizeof(*panel_bridge),
- GFP_KERNEL);
- if (!panel_bridge)
- return ERR_PTR(-ENOMEM);
+ panel_bridge = devm_drm_bridge_alloc(panel->dev, struct panel_bridge, bridge,
+ &panel_bridge_bridge_funcs);
+ if (IS_ERR(panel_bridge))
+ return (void *)panel_bridge;
panel_bridge->connector_type = connector_type;
panel_bridge->panel = panel;
- panel_bridge->bridge.funcs = &panel_bridge_bridge_funcs;
panel_bridge->bridge.of_node = panel->dev->of_node;
panel_bridge->bridge.ops = DRM_BRIDGE_OP_MODES;
panel_bridge->bridge.type = connector_type;
@@ -328,7 +328,8 @@ void drm_panel_bridge_remove(struct drm_bridge *bridge)
panel_bridge = drm_bridge_to_panel_bridge(bridge);
drm_bridge_remove(bridge);
- devm_kfree(panel_bridge->panel->dev, bridge);
+ /* TODO remove this after reworking panel_bridge lifetime */
+ devm_drm_put_bridge(panel_bridge->panel->dev, bridge);
}
EXPORT_SYMBOL(drm_panel_bridge_remove);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 8726fefc5c65..f879a1df077d 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -449,9 +449,10 @@ static int ps8622_probe(struct i2c_client *client)
struct drm_bridge *panel_bridge;
int ret;
- ps8622 = devm_kzalloc(dev, sizeof(*ps8622), GFP_KERNEL);
- if (!ps8622)
- return -ENOMEM;
+ ps8622 = devm_drm_bridge_alloc(dev, struct ps8622_bridge, bridge,
+ &ps8622_bridge_funcs);
+ if (IS_ERR(ps8622))
+ return PTR_ERR(ps8622);
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(panel_bridge))
@@ -509,7 +510,6 @@ static int ps8622_probe(struct i2c_client *client)
ps8622->bl->props.brightness = PS8622_MAX_BRIGHTNESS;
}
- ps8622->bridge.funcs = &ps8622_bridge_funcs;
ps8622->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ps8622->bridge.of_node = dev->of_node;
drm_bridge_add(&ps8622->bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 2422ff68c104..825777a5758f 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -636,9 +636,10 @@ static int ps8640_probe(struct i2c_client *client)
int ret;
u32 i;
- ps_bridge = devm_kzalloc(dev, sizeof(*ps_bridge), GFP_KERNEL);
- if (!ps_bridge)
- return -ENOMEM;
+ ps_bridge = devm_drm_bridge_alloc(dev, struct ps8640, bridge,
+ &ps8640_bridge_funcs);
+ if (IS_ERR(ps_bridge))
+ return PTR_ERR(ps_bridge);
mutex_init(&ps_bridge->aux_lock);
@@ -662,7 +663,6 @@ static int ps8640_probe(struct i2c_client *client)
if (IS_ERR(ps_bridge->gpio_reset))
return PTR_ERR(ps_bridge->gpio_reset);
- ps_bridge->bridge.funcs = &ps8640_bridge_funcs;
ps_bridge->bridge.of_node = dev->of_node;
ps_bridge->bridge.type = DRM_MODE_CONNECTOR_eDP;
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index bccc88d25948..b5dd71f6a990 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -14,11 +14,13 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/irq.h>
#include <linux/media-bus-format.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/units.h>
#include <video/mipi_display.h>
@@ -557,10 +559,6 @@ static void samsung_dsim_reset(struct samsung_dsim *dsi)
samsung_dsim_write(dsi, DSIM_SWRST_REG, reset_val);
}
-#ifndef MHZ
-#define MHZ (1000 * 1000)
-#endif
-
static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
unsigned long fin,
unsigned long fout,
@@ -574,8 +572,8 @@ static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
u16 _m, best_m;
u8 _s, best_s;
- p_min = DIV_ROUND_UP(fin, (driver_data->pll_fin_max * MHZ));
- p_max = fin / (driver_data->pll_fin_min * MHZ);
+ p_min = DIV_ROUND_UP(fin, (driver_data->pll_fin_max * HZ_PER_MHZ));
+ p_max = fin / (driver_data->pll_fin_min * HZ_PER_MHZ);
for (_p = p_min; _p <= p_max; ++_p) {
for (_s = 0; _s <= 5; ++_s) {
@@ -590,8 +588,8 @@ static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
tmp = (u64)_m * fin;
do_div(tmp, _p);
- if (tmp < driver_data->min_freq * MHZ ||
- tmp > driver_data->max_freq * MHZ)
+ if (tmp < driver_data->min_freq * HZ_PER_MHZ ||
+ tmp > driver_data->max_freq * HZ_PER_MHZ)
continue;
tmp = (u64)_m * fin;
@@ -634,7 +632,7 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
* limit.
*/
fin = clk_get_rate(clk_get_parent(dsi->pll_clk));
- while (fin > driver_data->pll_fin_max * MHZ)
+ while (fin > driver_data->pll_fin_max * HZ_PER_MHZ)
fin /= 2;
clk_set_rate(dsi->pll_clk, fin);
@@ -660,10 +658,11 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
if (driver_data->has_freqband) {
static const unsigned long freq_bands[] = {
- 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
- 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
- 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
- 770 * MHZ, 870 * MHZ, 950 * MHZ,
+ 100 * HZ_PER_MHZ, 120 * HZ_PER_MHZ, 160 * HZ_PER_MHZ,
+ 200 * HZ_PER_MHZ, 270 * HZ_PER_MHZ, 320 * HZ_PER_MHZ,
+ 390 * HZ_PER_MHZ, 450 * HZ_PER_MHZ, 510 * HZ_PER_MHZ,
+ 560 * HZ_PER_MHZ, 640 * HZ_PER_MHZ, 690 * HZ_PER_MHZ,
+ 770 * HZ_PER_MHZ, 870 * HZ_PER_MHZ, 950 * HZ_PER_MHZ,
};
int band;
@@ -723,7 +722,7 @@ static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate);
esc_clk = byte_clk / esc_div;
- if (esc_clk > 20 * MHZ) {
+ if (esc_clk > 20 * HZ_PER_MHZ) {
++esc_div;
esc_clk = byte_clk / esc_div;
}
@@ -898,8 +897,6 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
* The user manual describes that following bits are ignored in
* command mode.
*/
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
- reg |= DSIM_MFLUSH_VS;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
reg |= DSIM_SYNC_INFORM;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
@@ -1235,43 +1232,34 @@ static void samsung_dsim_transfer_start(struct samsung_dsim *dsi)
{
unsigned long flags;
struct samsung_dsim_transfer *xfer;
- bool start = false;
-again:
spin_lock_irqsave(&dsi->transfer_lock, flags);
- if (list_empty(&dsi->transfer_list)) {
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- return;
- }
+ while (!list_empty(&dsi->transfer_list)) {
+ xfer = list_first_entry(&dsi->transfer_list,
+ struct samsung_dsim_transfer, list);
- xfer = list_first_entry(&dsi->transfer_list,
- struct samsung_dsim_transfer, list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- if (xfer->packet.payload_length &&
- xfer->tx_done == xfer->packet.payload_length)
- /* waiting for RX */
- return;
+ if (xfer->packet.payload_length &&
+ xfer->tx_done == xfer->packet.payload_length)
+ /* waiting for RX */
+ return;
- samsung_dsim_send_to_fifo(dsi, xfer);
+ samsung_dsim_send_to_fifo(dsi, xfer);
- if (xfer->packet.payload_length || xfer->rx_len)
- return;
+ if (xfer->packet.payload_length || xfer->rx_len)
+ return;
- xfer->result = 0;
- complete(&xfer->completed);
+ xfer->result = 0;
+ complete(&xfer->completed);
- spin_lock_irqsave(&dsi->transfer_lock, flags);
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
- list_del_init(&xfer->list);
- start = !list_empty(&dsi->transfer_list);
+ list_del_init(&xfer->list);
+ }
spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (start)
- goto again;
}
static bool samsung_dsim_transfer_finish(struct samsung_dsim *dsi)
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 6de61d9fe064..d537b1d036fb 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -458,7 +458,8 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
return 0;
}
-static enum drm_connector_status sii902x_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+sii902x_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
@@ -1135,7 +1136,6 @@ static int sii902x_init(struct sii902x *sii902x)
if (ret)
goto err_unreg_audio;
- sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
sii902x->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
@@ -1170,9 +1170,9 @@ static int sii902x_probe(struct i2c_client *client)
return -EIO;
}
- sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL);
- if (!sii902x)
- return -ENOMEM;
+ sii902x = devm_drm_bridge_alloc(dev, struct sii902x, bridge, &sii902x_bridge_funcs);
+ if (IS_ERR(sii902x))
+ return PTR_ERR(sii902x);
sii902x->i2c = client;
sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config);
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index cd7837c9a6e0..bb1bed03eb5b 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -888,9 +888,10 @@ static int sii9234_probe(struct i2c_client *client)
struct device *dev = &client->dev;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sii9234, bridge,
+ &sii9234_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
mutex_init(&ctx->lock);
@@ -921,7 +922,6 @@ static int sii9234_probe(struct i2c_client *client)
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sii9234_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 3af650dc92a1..9e48ad39e1cc 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2291,9 +2291,10 @@ static int sii8620_probe(struct i2c_client *client)
struct sii8620 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sii8620, bridge,
+ &sii8620_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
mutex_init(&ctx->lock);
@@ -2336,7 +2337,6 @@ static int sii8620_probe(struct i2c_client *client)
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sii8620_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index 70db5b99e5bb..3d15ddd39470 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -90,7 +90,7 @@ simple_bridge_connector_detect(struct drm_connector *connector, bool force)
{
struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
- return drm_bridge_detect(sbridge->next_bridge);
+ return drm_bridge_detect(sbridge->next_bridge, connector);
}
static const struct drm_connector_funcs simple_bridge_con_funcs = {
@@ -168,9 +168,10 @@ static int simple_bridge_probe(struct platform_device *pdev)
struct simple_bridge *sbridge;
struct device_node *remote;
- sbridge = devm_kzalloc(&pdev->dev, sizeof(*sbridge), GFP_KERNEL);
- if (!sbridge)
- return -ENOMEM;
+ sbridge = devm_drm_bridge_alloc(&pdev->dev, struct simple_bridge,
+ bridge, &simple_bridge_bridge_funcs);
+ if (IS_ERR(sbridge))
+ return PTR_ERR(sbridge);
sbridge->info = of_device_get_match_data(&pdev->dev);
@@ -204,7 +205,6 @@ static int simple_bridge_probe(struct platform_device *pdev)
"Unable to retrieve enable GPIO\n");
/* Register the bridge. */
- sbridge->bridge.funcs = &simple_bridge_bridge_funcs;
sbridge->bridge.of_node = pdev->dev.of_node;
sbridge->bridge.timings = sbridge->info->timings;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index 5e5f8c2f95be..39332c57f2c5 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -8,6 +8,7 @@
*/
#include <linux/completion.h>
#include <linux/hdmi.h>
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/module.h>
@@ -439,8 +440,8 @@ static void dw_hdmi_qp_set_sample_rate(struct dw_hdmi_qp *hdmi, unsigned long lo
dw_hdmi_qp_set_cts_n(hdmi, cts, n);
}
-static int dw_hdmi_qp_audio_enable(struct drm_connector *connector,
- struct drm_bridge *bridge)
+static int dw_hdmi_qp_audio_enable(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
@@ -450,8 +451,8 @@ static int dw_hdmi_qp_audio_enable(struct drm_connector *connector,
return 0;
}
-static int dw_hdmi_qp_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+static int dw_hdmi_qp_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms)
{
@@ -496,8 +497,8 @@ static void dw_hdmi_qp_audio_disable_regs(struct dw_hdmi_qp *hdmi)
AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, GLOBAL_SWDISABLE);
}
-static void dw_hdmi_qp_audio_disable(struct drm_connector *connector,
- struct drm_bridge *bridge)
+static void dw_hdmi_qp_audio_disable(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
@@ -875,7 +876,7 @@ static void dw_hdmi_qp_bridge_atomic_disable(struct drm_bridge *bridge,
}
static enum drm_connector_status
-dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge)
+dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
@@ -1045,9 +1046,10 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
return ERR_PTR(-ENODEV);
}
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return ERR_PTR(-ENOMEM);
+ hdmi = devm_drm_bridge_alloc(dev, struct dw_hdmi_qp, bridge,
+ &dw_hdmi_qp_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return ERR_CAST(hdmi);
hdmi->dev = dev;
@@ -1073,7 +1075,6 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
return ERR_PTR(ret);
hdmi->bridge.driver_private = hdmi;
- hdmi->bridge.funcs = &dw_hdmi_qp_bridge_funcs;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HDMI |
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 8791408dd1ff..206b099a35e9 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/irq.h>
@@ -2977,7 +2978,8 @@ static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
mutex_unlock(&hdmi->mutex);
}
-static enum drm_connector_status dw_hdmi_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+dw_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct dw_hdmi *hdmi = bridge->driver_private;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index b08ada920a50..8fc2e282ff11 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/iopoll.h>
#include <linux/math64.h>
#include <linux/media-bus-format.h>
@@ -1194,9 +1195,10 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
struct dw_mipi_dsi *dsi;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return ERR_PTR(-ENOMEM);
+ dsi = devm_drm_bridge_alloc(dev, struct dw_mipi_dsi, bridge,
+ &dw_mipi_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return ERR_CAST(dsi);
dsi->dev = dev;
dsi->plat_data = plat_data;
@@ -1265,7 +1267,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
}
dsi->bridge.driver_private = dsi;
- dsi->bridge.funcs = &dw_mipi_dsi_bridge_funcs;
dsi->bridge.of_node = pdev->dev.of_node;
return dsi;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
index c76f5f2e74d1..5926a3a05d79 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
@@ -9,6 +9,7 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/export.h>
#include <linux/iopoll.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
@@ -914,9 +915,10 @@ __dw_mipi_dsi2_probe(struct platform_device *pdev,
struct dw_mipi_dsi2 *dsi2;
int ret;
- dsi2 = devm_kzalloc(dev, sizeof(*dsi2), GFP_KERNEL);
- if (!dsi2)
- return ERR_PTR(-ENOMEM);
+ dsi2 = devm_drm_bridge_alloc(dev, struct dw_mipi_dsi2, bridge,
+ &dw_mipi_dsi2_bridge_funcs);
+ if (IS_ERR(dsi2))
+ return ERR_CAST(dsi2);
dsi2->dev = dev;
dsi2->plat_data = plat_data;
@@ -981,7 +983,6 @@ __dw_mipi_dsi2_probe(struct platform_device *pdev,
}
dsi2->bridge.driver_private = dsi2;
- dsi2->bridge.funcs = &dw_mipi_dsi2_bridge_funcs;
dsi2->bridge.of_node = pdev->dev.of_node;
return dsi2;
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index edf01476f2ef..98df3e667d4a 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -265,9 +265,10 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
struct tc358762 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct tc358762), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct tc358762, bridge,
+ &tc358762_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -288,7 +289,6 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- ctx->bridge.funcs = &tc358762_bridge_funcs;
ctx->bridge.type = DRM_MODE_CONNECTOR_DPI;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 3f76c890fad9..084e9d898e22 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -347,9 +347,10 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
struct tc358764 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct tc358764), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct tc358764, bridge,
+ &tc358764_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -368,7 +369,6 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- ctx->bridge.funcs = &tc358764_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 7e5449fb86a3..4097fef4b86b 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -344,6 +344,14 @@
#define COLOR_BAR_MODE_BARS 2
#define PLL_DBG 0x0a04
+enum tc_mode {
+ mode_dpi_to_edp = BIT(1) | BIT(2),
+ mode_dpi_to_dp = BIT(1),
+ mode_dsi_to_edp = BIT(0) | BIT(2),
+ mode_dsi_to_dp = BIT(0),
+ mode_dsi_to_dpi = BIT(0) | BIT(1),
+};
+
static bool tc_test_pattern;
module_param_named(test, tc_test_pattern, bool, 0644);
@@ -1752,7 +1760,8 @@ static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
.get_modes = tc_connector_get_modes,
};
-static enum drm_connector_status tc_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+tc_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct tc_data *tc = bridge_to_tc(bridge);
bool conn;
@@ -1777,7 +1786,7 @@ tc_connector_detect(struct drm_connector *connector, bool force)
struct tc_data *tc = connector_to_tc(connector);
if (tc->hpd_pin >= 0)
- return tc_bridge_detect(&tc->bridge);
+ return tc_bridge_detect(&tc->bridge, connector);
if (tc->panel_bridge)
return connector_status_connected;
@@ -2327,7 +2336,6 @@ static int tc_probe_dpi_bridge_endpoint(struct tc_data *tc)
if (bridge) {
tc->panel_bridge = bridge;
tc->bridge.type = DRM_MODE_CONNECTOR_DPI;
- tc->bridge.funcs = &tc_dpi_bridge_funcs;
return 0;
}
@@ -2360,7 +2368,6 @@ static int tc_probe_edp_bridge_endpoint(struct tc_data *tc)
tc->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
}
- tc->bridge.funcs = &tc_edp_bridge_funcs;
if (tc->hpd_pin >= 0)
tc->bridge.ops |= DRM_BRIDGE_OP_DETECT;
tc->bridge.ops |= DRM_BRIDGE_OP_EDID;
@@ -2368,17 +2375,11 @@ static int tc_probe_edp_bridge_endpoint(struct tc_data *tc)
return 0;
}
-static int tc_probe_bridge_endpoint(struct tc_data *tc)
+static enum tc_mode tc_probe_get_mode(struct device *dev)
{
- struct device *dev = tc->dev;
struct of_endpoint endpoint;
struct device_node *node = NULL;
- const u8 mode_dpi_to_edp = BIT(1) | BIT(2);
- const u8 mode_dpi_to_dp = BIT(1);
- const u8 mode_dsi_to_edp = BIT(0) | BIT(2);
- const u8 mode_dsi_to_dp = BIT(0);
- const u8 mode_dsi_to_dpi = BIT(0) | BIT(1);
- u8 mode = 0;
+ enum tc_mode mode = 0;
/*
* Determine bridge configuration.
@@ -2401,7 +2402,28 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
return -EINVAL;
}
mode |= BIT(endpoint.port);
+ }
+
+ if (mode != mode_dpi_to_edp &&
+ mode != mode_dpi_to_dp &&
+ mode != mode_dsi_to_dpi &&
+ mode != mode_dsi_to_edp &&
+ mode != mode_dsi_to_dp) {
+ dev_warn(dev, "Invalid mode (0x%x) is not supported!\n", mode);
+ return -EINVAL;
+ }
+
+ return mode;
+}
+static int tc_probe_bridge_endpoint(struct tc_data *tc, enum tc_mode mode)
+{
+ struct device *dev = tc->dev;
+ struct of_endpoint endpoint;
+ struct device_node *node = NULL;
+
+ for_each_endpoint_of_node(dev->of_node, node) {
+ of_graph_parse_endpoint(node, &endpoint);
if (endpoint.port == 2) {
of_property_read_u8_array(node, "toshiba,pre-emphasis",
tc->pre_emphasis,
@@ -2427,24 +2449,28 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
return tc_probe_edp_bridge_endpoint(tc);
}
- dev_warn(dev, "Invalid mode (0x%x) is not supported!\n", mode);
-
+ /* Should never happen, mode was validated by tc_probe_get_mode() */
return -EINVAL;
}
static int tc_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
+ const struct drm_bridge_funcs *funcs;
struct tc_data *tc;
+ int mode;
int ret;
- tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
- if (!tc)
- return -ENOMEM;
+ mode = tc_probe_get_mode(dev);
+ funcs = (mode == mode_dsi_to_dpi) ? &tc_dpi_bridge_funcs : &tc_edp_bridge_funcs;
+
+ tc = devm_drm_bridge_alloc(dev, struct tc_data, bridge, funcs);
+ if (IS_ERR(tc))
+ return PTR_ERR(tc);
tc->dev = dev;
- ret = tc_probe_bridge_endpoint(tc);
+ ret = tc_probe_bridge_endpoint(tc, mode);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index 063f217a17b6..fbdc44e16229 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -1287,9 +1287,10 @@ static int tc358768_i2c_probe(struct i2c_client *client)
if (!np)
return -ENODEV;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_bridge_alloc(dev, struct tc358768_priv, bridge,
+ &tc358768_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dev_set_drvdata(dev, priv);
priv->dev = dev;
@@ -1321,7 +1322,6 @@ static int tc358768_i2c_probe(struct i2c_client *client)
priv->dsi_host.dev = dev;
priv->dsi_host.ops = &tc358768_dsi_host_ops;
- priv->bridge.funcs = &tc358768_bridge_funcs;
priv->bridge.timings = &default_tc358768_timings;
priv->bridge.of_node = np;
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 1b10e6ee1724..366b12db0e7c 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -659,9 +659,10 @@ static int tc_probe(struct i2c_client *client)
struct tc_data *tc;
int ret;
- tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
- if (!tc)
- return -ENOMEM;
+ tc = devm_drm_bridge_alloc(dev, struct tc_data, bridge,
+ &tc_bridge_funcs);
+ if (IS_ERR(tc))
+ return PTR_ERR(tc);
tc->dev = dev;
tc->i2c = client;
@@ -701,7 +702,6 @@ static int tc_probe(struct i2c_client *client)
return ret;
}
- tc->bridge.funcs = &tc_bridge_funcs;
tc->bridge.of_node = dev->of_node;
tc->bridge.pre_enable_prev_first = true;
drm_bridge_add(&tc->bridge);
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index e2fc78adebcf..2cb7cd0c0608 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -181,9 +181,10 @@ static int thc63_probe(struct platform_device *pdev)
struct thc63_dev *thc63;
int ret;
- thc63 = devm_kzalloc(&pdev->dev, sizeof(*thc63), GFP_KERNEL);
- if (!thc63)
- return -ENOMEM;
+ thc63 = devm_drm_bridge_alloc(&pdev->dev, struct thc63_dev, bridge,
+ &thc63_bridge_func);
+ if (IS_ERR(thc63))
+ return PTR_ERR(thc63);
thc63->dev = &pdev->dev;
platform_set_drvdata(pdev, thc63);
@@ -208,7 +209,6 @@ static int thc63_probe(struct platform_device *pdev)
thc63->bridge.driver_private = thc63;
thc63->bridge.of_node = pdev->dev.of_node;
- thc63->bridge.funcs = &thc63_bridge_func;
thc63->bridge.timings = &thc63->timings;
drm_bridge_add(&thc63->bridge);
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index 47638d1c96ec..b07f7c9d5890 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -348,9 +348,10 @@ static int dlpc3433_probe(struct i2c_client *client)
struct dlpc *dlpc;
int ret;
- dlpc = devm_kzalloc(dev, sizeof(*dlpc), GFP_KERNEL);
- if (!dlpc)
- return -ENOMEM;
+ dlpc = devm_drm_bridge_alloc(dev, struct dlpc, bridge,
+ &dlpc_bridge_funcs);
+ if (IS_ERR(dlpc))
+ return PTR_ERR(dlpc);
dlpc->dev = dev;
@@ -365,7 +366,6 @@ static int dlpc3433_probe(struct i2c_client *client)
dev_set_drvdata(dev, dlpc);
i2c_set_clientdata(client, dlpc);
- dlpc->bridge.funcs = &dlpc_bridge_funcs;
dlpc->bridge.of_node = dev->of_node;
drm_bridge_add(&dlpc->bridge);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 834b42a4d31f..e3a8c0c0c945 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -453,23 +453,6 @@ DEFINE_SHOW_ATTRIBUTE(status);
* Auxiliary Devices (*not* AUX)
*/
-static void ti_sn65dsi86_uninit_aux(void *data)
-{
- auxiliary_device_uninit(data);
-}
-
-static void ti_sn65dsi86_delete_aux(void *data)
-{
- auxiliary_device_delete(data);
-}
-
-static void ti_sn65dsi86_aux_device_release(struct device *dev)
-{
- struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
-
- kfree(aux);
-}
-
static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
struct auxiliary_device **aux_out,
const char *name)
@@ -477,34 +460,16 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
struct device *dev = pdata->dev;
const struct i2c_client *client = to_i2c_client(dev);
struct auxiliary_device *aux;
- int ret;
+ int id;
- aux = kzalloc(sizeof(*aux), GFP_KERNEL);
+ id = (client->adapter->nr << 10) | client->addr;
+ aux = __devm_auxiliary_device_create(dev, KBUILD_MODNAME, name,
+ NULL, id);
if (!aux)
- return -ENOMEM;
-
- aux->name = name;
- aux->id = (client->adapter->nr << 10) | client->addr;
- aux->dev.parent = dev;
- aux->dev.release = ti_sn65dsi86_aux_device_release;
- device_set_of_node_from_dev(&aux->dev, dev);
- ret = auxiliary_device_init(aux);
- if (ret) {
- kfree(aux);
- return ret;
- }
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
- if (ret)
- return ret;
-
- ret = auxiliary_device_add(aux);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
- if (!ret)
- *aux_out = aux;
+ return -ENODEV;
- return ret;
+ *aux_out = aux;
+ return 0;
}
/* -----------------------------------------------------------------------------
@@ -1196,7 +1161,8 @@ static void ti_sn_bridge_atomic_post_disable(struct drm_bridge *bridge,
pm_runtime_put_sync(pdata->dev);
}
-static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+ti_sn_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
int val = 0;
@@ -1758,24 +1724,15 @@ static int ti_sn_bridge_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(SN_GPIO_INPUT_SHIFT + offset));
}
-static void ti_sn_bridge_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int ti_sn_bridge_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip);
- int ret;
-
- if (!test_bit(offset, pdata->gchip_output)) {
- dev_err(pdata->dev, "Ignoring GPIO set while input\n");
- return;
- }
val &= 1;
- ret = regmap_update_bits(pdata->regmap, SN_GPIO_IO_REG,
- BIT(SN_GPIO_OUTPUT_SHIFT + offset),
- val << (SN_GPIO_OUTPUT_SHIFT + offset));
- if (ret)
- dev_warn(pdata->dev,
- "Failed to set bridge GPIO %u: %d\n", offset, ret);
+ return regmap_update_bits(pdata->regmap, SN_GPIO_IO_REG,
+ BIT(SN_GPIO_OUTPUT_SHIFT + offset),
+ val << (SN_GPIO_OUTPUT_SHIFT + offset));
}
static int ti_sn_bridge_gpio_direction_input(struct gpio_chip *chip,
@@ -1879,7 +1836,7 @@ static int ti_sn_gpio_probe(struct auxiliary_device *adev,
pdata->gchip.direction_input = ti_sn_bridge_gpio_direction_input;
pdata->gchip.direction_output = ti_sn_bridge_gpio_direction_output;
pdata->gchip.get = ti_sn_bridge_gpio_get;
- pdata->gchip.set = ti_sn_bridge_gpio_set;
+ pdata->gchip.set_rv = ti_sn_bridge_gpio_set;
pdata->gchip.can_sleep = true;
pdata->gchip.names = ti_sn_bridge_gpio_names;
pdata->gchip.ngpio = SN_NUM_GPIOS;
diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c
index cca75443f012..27053d020df7 100644
--- a/drivers/gpu/drm/bridge/ti-tdp158.c
+++ b/drivers/gpu/drm/bridge/ti-tdp158.c
@@ -68,9 +68,10 @@ static int tdp158_probe(struct i2c_client *client)
struct tdp158 *tdp158;
struct device *dev = &client->dev;
- tdp158 = devm_kzalloc(dev, sizeof(*tdp158), GFP_KERNEL);
- if (!tdp158)
- return -ENOMEM;
+ tdp158 = devm_drm_bridge_alloc(dev, struct tdp158, bridge,
+ &tdp158_bridge_funcs);
+ if (IS_ERR(tdp158))
+ return PTR_ERR(tdp158);
tdp158->next = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
if (IS_ERR(tdp158->next))
@@ -89,7 +90,6 @@ static int tdp158_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(tdp158->enable), "enable");
tdp158->bridge.of_node = dev->of_node;
- tdp158->bridge.funcs = &tdp158_bridge_funcs;
tdp158->bridge.driver_private = tdp158;
tdp158->dev = dev;
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index e15d232ddbac..b80ee089f880 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -89,7 +89,7 @@ tfp410_connector_detect(struct drm_connector *connector, bool force)
{
struct tfp410 *dvi = drm_connector_to_tfp410(connector);
- return drm_bridge_detect(dvi->next_bridge);
+ return drm_bridge_detect(dvi->next_bridge, connector);
}
static const struct drm_connector_funcs tfp410_con_funcs = {
@@ -341,14 +341,14 @@ static int tfp410_init(struct device *dev, bool i2c)
return -ENXIO;
}
- dvi = devm_kzalloc(dev, sizeof(*dvi), GFP_KERNEL);
- if (!dvi)
- return -ENOMEM;
+ dvi = devm_drm_bridge_alloc(dev, struct tfp410, bridge,
+ &tfp410_bridge_funcs);
+ if (IS_ERR(dvi))
+ return PTR_ERR(dvi);
dvi->dev = dev;
dev_set_drvdata(dev, dvi);
- dvi->bridge.funcs = &tfp410_bridge_funcs;
dvi->bridge.of_node = dev->of_node;
dvi->bridge.timings = &dvi->timings;
dvi->bridge.type = DRM_MODE_CONNECTOR_DVID;
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
index 1c289051a598..dcf686c4e73d 100644
--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -77,6 +77,12 @@ static enum drm_connector_status tpd12s015_detect(struct drm_bridge *bridge)
return connector_status_disconnected;
}
+static enum drm_connector_status
+tpd12s015_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
+{
+ return tpd12s015_detect(bridge);
+}
+
static void tpd12s015_hpd_enable(struct drm_bridge *bridge)
{
struct tpd12s015_device *tpd = to_tpd12s015(bridge);
@@ -94,7 +100,7 @@ static void tpd12s015_hpd_disable(struct drm_bridge *bridge)
static const struct drm_bridge_funcs tpd12s015_bridge_funcs = {
.attach = tpd12s015_attach,
.detach = tpd12s015_detach,
- .detect = tpd12s015_detect,
+ .detect = tpd12s015_bridge_detect,
.hpd_enable = tpd12s015_hpd_enable,
.hpd_disable = tpd12s015_hpd_disable,
};
@@ -116,13 +122,13 @@ static int tpd12s015_probe(struct platform_device *pdev)
struct gpio_desc *gpio;
int ret;
- tpd = devm_kzalloc(&pdev->dev, sizeof(*tpd), GFP_KERNEL);
- if (!tpd)
- return -ENOMEM;
+ tpd = devm_drm_bridge_alloc(&pdev->dev, struct tpd12s015_device,
+ bridge, &tpd12s015_bridge_funcs);
+ if (IS_ERR(tpd))
+ return PTR_ERR(tpd);
platform_set_drvdata(pdev, tpd);
- tpd->bridge.funcs = &tpd12s015_bridge_funcs;
tpd->bridge.of_node = pdev->dev.of_node;
tpd->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
tpd->bridge.ops = DRM_BRIDGE_OP_DETECT;
diff --git a/drivers/gpu/drm/ci/build-igt.sh b/drivers/gpu/drm/ci/build-igt.sh
index caa2f4804ed5..eddb5f782a5e 100644
--- a/drivers/gpu/drm/ci/build-igt.sh
+++ b/drivers/gpu/drm/ci/build-igt.sh
@@ -71,4 +71,4 @@ tar -cf artifacts/igt.tar /igt
# Pass needed files to the test stage
S3_ARTIFACT_NAME="igt.tar.gz"
gzip -c artifacts/igt.tar > ${S3_ARTIFACT_NAME}
-s3_upload ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${KERNEL_ARCH}/
+ci-fairy s3cp --token-file "${S3_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${KERNEL_ARCH}/${S3_ARTIFACT_NAME}
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
index 6fb74c51abe2..ac5e7ed195cf 100644
--- a/drivers/gpu/drm/ci/build.sh
+++ b/drivers/gpu/drm/ci/build.sh
@@ -113,17 +113,6 @@ mkdir -p install/modules/
INSTALL_MOD_PATH=install/modules/ make modules_install
if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
- make Image.lzma
- mkimage \
- -f auto \
- -A arm \
- -O linux \
- -d arch/arm64/boot/Image.lzma \
- -C lzma\
- -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
- /kernel/cheza-kernel
- KERNEL_IMAGE_NAME+=" cheza-kernel"
-
# Make a gzipped copy of the Image for db410c.
gzip -k /kernel/Image
KERNEL_IMAGE_NAME+=" Image.gz"
@@ -148,13 +137,13 @@ if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
ls -l "${S3_JWT_FILE}"
for f in $FILES_TO_UPLOAD; do
- s3_upload /kernel/$f \
- https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/
+ ci-fairy s3cp --token-file "${S3_JWT_FILE}" /kernel/$f \
+ https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/$f
done
S3_ARTIFACT_NAME="kernel-files.tar.zst"
tar --zstd -cf $S3_ARTIFACT_NAME install
- s3_upload ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/
+ ci-fairy s3cp --token-file "${S3_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/${S3_ARTIFACT_NAME}
echo "Download vmlinux.xz from https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/vmlinux.xz"
fi
diff --git a/drivers/gpu/drm/ci/build.yml b/drivers/gpu/drm/ci/build.yml
index 8eb56ebcf4aa..af27ff5de369 100644
--- a/drivers/gpu/drm/ci/build.yml
+++ b/drivers/gpu/drm/ci/build.yml
@@ -13,7 +13,7 @@
- .build
- .use-debian/arm64_build
tags:
- - aarch64
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
variables:
DEFCONFIG: "arch/arm/configs/multi_v7_defconfig"
KERNEL_IMAGE_NAME: "zImage"
@@ -24,7 +24,7 @@
- .build
- .use-debian/arm64_build
tags:
- - aarch64
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
variables:
DEFCONFIG: "arch/arm64/configs/defconfig"
KERNEL_IMAGE_NAME: "Image"
@@ -44,16 +44,22 @@
igt:arm32:
extends: .build:arm32
+ variables:
+ GIT_DEPTH: 10
script:
- FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
igt:arm64:
extends: .build:arm64
+ variables:
+ GIT_DEPTH: 10
script:
- FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
igt:x86_64:
extends: .build:x86_64
+ variables:
+ GIT_DEPTH: 10
script:
- FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
diff --git a/drivers/gpu/drm/ci/check-devicetrees.yml b/drivers/gpu/drm/ci/check-devicetrees.yml
new file mode 100644
index 000000000000..727bd56018b8
--- /dev/null
+++ b/drivers/gpu/drm/ci/check-devicetrees.yml
@@ -0,0 +1,50 @@
+.dt-check-base:
+ stage: static-checks
+ timeout: "30m"
+ variables:
+ GIT_DEPTH: 1
+ FF_USE_NEW_BASH_EVAL_STRATEGY: 'true'
+ SCHEMA: "display:gpu"
+ VENV_PATH: "/tmp/dtcheck-venv"
+ before_script:
+ - apt-get update -qq
+ # Minimum supported version of LLVM for building x86 kernels is 15.0.0.
+ # In mesa-ci containers, LLVM_VERSION is defined as a container-level property and is currently set to 19.
+ - apt-get install -y --no-install-recommends clang-${LLVM_VERSION} lld-${LLVM_VERSION} llvm-${LLVM_VERSION} python3-dev python3-venv python3-pip yamllint
+ - python3 -m venv "${VENV_PATH}"
+ - source "${VENV_PATH}/bin/activate"
+ - pip3 install dtschema
+ script:
+ - drivers/gpu/drm/ci/${SCRIPT_NAME}
+ artifacts:
+ when: on_failure
+ paths:
+ - ${ARTIFACT_FILE}
+ allow_failure:
+ exit_codes:
+ - 102
+
+dtbs-check:arm32:
+ extends:
+ - .build:arm32
+ - .dt-check-base
+ variables:
+ SCRIPT_NAME: "dtbs-check.sh"
+ ARTIFACT_FILE: "dtbs-check.log"
+
+dtbs-check:arm64:
+ extends:
+ - .build:arm64
+ - .dt-check-base
+ variables:
+ SCRIPT_NAME: "dtbs-check.sh"
+ ARTIFACT_FILE: "dtbs-check.log"
+
+dt-binding-check:
+ extends:
+ - .build
+ - .use-debian/x86_64_build
+ - .dt-check-base
+ variables:
+ SCRIPT_NAME: "dt-binding-check.sh"
+ ARTIFACT_FILE: "dt-binding-check.log"
diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml
index 56c95c2f91ae..5f90508578a3 100644
--- a/drivers/gpu/drm/ci/container.yml
+++ b/drivers/gpu/drm/ci/container.yml
@@ -20,31 +20,15 @@ debian/arm64_build:
EXTRA_LOCAL_PACKAGES: "jq libasound2 libcairo2 libdw1 libglib2.0-0 libjson-c5"
# Disable container jobs that we won't use
-alpine/x86_64_build:
- rules:
- - when: never
-
-debian/arm32_test-base:
- rules:
- - when: never
-
-debian/arm32_test-gl:
- rules:
- - when: never
-
-debian/arm32_test-vk:
- rules:
- - when: never
-
-debian/arm64_test-gl:
+debian/arm64_test-vk:
rules:
- when: never
-debian/arm64_test-vk:
+debian/baremetal_arm32_test-gl:
rules:
- when: never
-debian/baremetal_arm32_test:
+debian/baremetal_arm64_test-vk:
rules:
- when: never
@@ -64,19 +48,19 @@ debian/x86_64_test-android:
rules:
- when: never
-debian/x86_64_test-vk:
+debian/x86_64_test-video:
rules:
- when: never
-fedora/x86_64_build:
+debian/x86_64_test-vk:
rules:
- when: never
-debian/android_build:
+fedora/x86_64_build:
rules:
- when: never
-.debian/x86_64_test-android:
+debian/android_build:
rules:
- when: never
diff --git a/drivers/gpu/drm/ci/dt-binding-check.sh b/drivers/gpu/drm/ci/dt-binding-check.sh
new file mode 100755
index 000000000000..99e1c0df84b7
--- /dev/null
+++ b/drivers/gpu/drm/ci/dt-binding-check.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -euxo pipefail
+
+VENV_PATH="${VENV_PATH:-/tmp/dtschema-venv}"
+source "${VENV_PATH}/bin/activate"
+
+if ! make -j"${FDO_CI_CONCURRENT:-4}" dt_binding_check \
+ DT_SCHEMA_FILES="${SCHEMA:-}" 2>dt-binding-check.log; then
+ echo "ERROR: 'make dt_binding_check' failed. Please check dt-binding-check.log for details."
+ exit 1
+fi
+
+if [[ -s dt-binding-check.log ]]; then
+ echo "WARNING: dt_binding_check reported warnings. Please check dt-binding-check.log" \
+ "for details."
+ exit 102
+fi
diff --git a/drivers/gpu/drm/ci/dtbs-check.sh b/drivers/gpu/drm/ci/dtbs-check.sh
new file mode 100755
index 000000000000..57842c452439
--- /dev/null
+++ b/drivers/gpu/drm/ci/dtbs-check.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -euxo pipefail
+
+: "${KERNEL_ARCH:?ERROR: KERNEL_ARCH must be set}"
+: "${LLVM_VERSION:?ERROR: LLVM_VERSION must be set}"
+
+./drivers/gpu/drm/ci/setup-llvm-links.sh
+
+make LLVM=1 ARCH="${KERNEL_ARCH}" defconfig
+
+if ! make -j"${FDO_CI_CONCURRENT:-4}" ARCH="${KERNEL_ARCH}" LLVM=1 dtbs_check \
+ DT_SCHEMA_FILES="${SCHEMA:-}" 2>dtbs-check.log; then
+ echo "ERROR: 'make dtbs_check' failed. Please check dtbs-check.log for details."
+ exit 1
+fi
+
+if [[ -s dtbs-check.log ]]; then
+ echo "WARNING: dtbs_check reported warnings. Please check dtbs-check.log for details."
+ exit 102
+fi
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index ba75b3a7eca4..d502d146b177 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,17 +1,17 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha f73132f1215a37ce8ffc711a0136c90649aaf128
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 02337aec715c25dae7ff2479d986f831c77fe536
UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git
TARGET_BRANCH: drm-next
- IGT_VERSION: 04bedb9238586b81d4d4ca62b02e584f6cfc77af
+ IGT_VERSION: 129d5b10baaadea1d6cd6377341c4cb42e7ee6fd
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.20.0
FDO_UPSTREAM_REPO: helen.fornazier/linux # The repo where the git-archive daily runs
- MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb
+ MESA_TEMPLATES_COMMIT: &ci-templates-commit c6aeb16f86e32525fa630fb99c66c4f3e62fc3cb
DRM_CI_PROJECT_URL: https://gitlab.freedesktop.org/${DRM_CI_PROJECT_PATH}
CI_PRE_CLONE_SCRIPT: |-
set -o xtrace
@@ -20,10 +20,8 @@ variables:
rm download-git-cache.sh
set +o xtrace
S3_JWT_FILE: /s3_jwt
- S3_JWT_HEADER_FILE: /s3_jwt_header
S3_JWT_FILE_SCRIPT: |-
echo -n '${S3_JWT}' > '${S3_JWT_FILE}' &&
- echo -n "Authorization: Bearer ${S3_JWT}" > '${S3_JWT_HEADER_FILE}' &&
unset CI_JOB_JWT S3_JWT # Unsetting vulnerable env variables
S3_HOST: s3.freedesktop.org
# This bucket is used to fetch the kernel image
@@ -38,7 +36,11 @@ variables:
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${KERNEL_TAG}
LAVA_TAGS: subset-1-gfx
- LAVA_JOB_PRIORITY: 30
+ # Default priority for non-merge pipelines
+ FDO_RUNNER_JOB_PRIORITY_TAG_X86_64: "" # Empty tags are ignored by gitlab
+ FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM: kvm
+ FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64: aarch64
+ JOB_PRIORITY: 30
ARTIFACTS_BASE_URL: https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts
# Python scripts for structured logger
PYTHONPATH: "$PYTHONPATH:$CI_PROJECT_DIR/install"
@@ -73,15 +75,12 @@ default:
include:
- project: 'freedesktop/ci-templates'
- ref: 16bc29078de5e0a067ff84a1a199a3760d3b3811
- file:
- - '/templates/ci-fairy.yml'
- - project: 'freedesktop/ci-templates'
ref: *ci-templates-commit
file:
- '/templates/alpine.yml'
- '/templates/debian.yml'
- '/templates/fedora.yml'
+ - '/templates/ci-fairy.yml'
- project: *drm-ci-project-path
ref: *drm-ci-commit-sha
file:
@@ -105,20 +104,26 @@ include:
- '/src/microsoft/ci/gitlab-ci-inc.yml'
- '/src/nouveau/ci/gitlab-ci-inc.yml'
- '/src/virtio/ci/gitlab-ci-inc.yml'
+ - 'docs/gitlab-ci.yml'
- drivers/gpu/drm/ci/image-tags.yml
- drivers/gpu/drm/ci/container.yml
- drivers/gpu/drm/ci/static-checks.yml
- drivers/gpu/drm/ci/build.yml
- drivers/gpu/drm/ci/test.yml
+ - drivers/gpu/drm/ci/check-devicetrees.yml
+ - drivers/gpu/drm/ci/kunit.yml
- 'https://gitlab.freedesktop.org/gfx-ci/lab-status/-/raw/main/lab-status.yml'
stages:
- sanity
- container
+ - deploy
- git-archive
- build-for-tests
- build-only
+ - static-checks
+ - kunit
- code-validation
- amdgpu
- i915
@@ -232,16 +237,20 @@ stages:
- _build/meson-logs/strace
+python-artifacts:
+ variables:
+ GIT_DEPTH: 10
+
+
# Git archive
-make git archive:
+make-git-archive:
extends:
- .fdo.ci-fairy
stage: git-archive
rules:
- !reference [.scheduled_pipeline-rules, rules]
- # ensure we are running on packet
tags:
- - packet.net
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
script:
# Remove drm-ci files we just added
- rm -rf .gitlab-ci.*
@@ -253,7 +262,7 @@ make git archive:
- tar -cvzf ../$CI_PROJECT_NAME.tar.gz .
# Use id_tokens for JWT auth
- - s3_upload ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/${S3_GITCACHE_BUCKET}/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/
+ - ci-fairy s3cp --token-file "${S3_JWT_FILE}" ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/${S3_GITCACHE_BUCKET}/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz
# Sanity checks of MR settings and commit logs
@@ -261,6 +270,8 @@ sanity:
extends:
- .fdo.ci-fairy
stage: sanity
+ tags:
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
rules:
- if: *is-pre-merge
when: on_success
@@ -279,7 +290,6 @@ sanity:
DEBIAN_BUILD_TAG
DEBIAN_PYUTILS_TAG
DEBIAN_TEST_GL_TAG
- KERNEL_ROOTFS_TAG
KERNEL_TAG
PKG_REPO_REV
)
@@ -295,14 +305,14 @@ sanity:
when: on_failure
reports:
junit: check-*.xml
- tags:
- - placeholder-job
mr-label-maker-test:
extends:
- .fdo.ci-fairy
stage: sanity
+ tags:
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
rules:
- !reference [.mr-label-maker-rules, rules]
variables:
@@ -325,3 +335,15 @@ mr-label-maker-test:
optional: true
- job: toml-lint
optional: true
+
+deploy-docs:
+ rules:
+ - when: never
+
+linkcheck-docs:
+ rules:
+ - when: never
+
+test-docs:
+ rules:
+ - when: never
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
index 2a0599f12c58..b24d4bc53cda 100755
--- a/drivers/gpu/drm/ci/igt_runner.sh
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -19,6 +19,7 @@ set +e
cat /sys/kernel/debug/dri/*/state
set -e
+mkdir -p /lib/modules
case "$DRIVER_NAME" in
amdgpu|vkms)
# Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib
diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
index 53fe34b86578..7acc2e2a8eaa 100644
--- a/drivers/gpu/drm/ci/image-tags.yml
+++ b/drivers/gpu/drm/ci/image-tags.yml
@@ -1,24 +1,18 @@
variables:
- CONTAINER_TAG: "20250328-mesa-uprev"
- DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
+ CONTAINER_TAG: "20250502-mesa-uprev"
DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
-
- DEBIAN_X86_64_BUILD_IMAGE_PATH: "debian/x86_64_build"
DEBIAN_BUILD_TAG: "${CONTAINER_TAG}"
- KERNEL_ROOTFS_TAG: "${CONTAINER_TAG}"
+ DEBIAN_TEST_GL_TAG: "${CONTAINER_TAG}"
# default kernel for rootfs before injecting the current kernel tree
- KERNEL_TAG: "v6.13-rc4-mesa-5e77"
+ KERNEL_TAG: "v6.14-mesa-0bdd"
KERNEL_REPO: "gfx-ci/linux"
- PKG_REPO_REV: "bca9635d"
-
- DEBIAN_X86_64_TEST_BASE_IMAGE: "debian/x86_64_test-base"
- DEBIAN_X86_64_TEST_IMAGE_GL_PATH: "debian/x86_64_test-gl"
- DEBIAN_TEST_GL_TAG: "${CONTAINER_TAG}"
+ PKG_REPO_REV: "95bf62c"
- DEBIAN_PYUTILS_IMAGE: "debian/x86_64_pyutils"
DEBIAN_PYUTILS_TAG: "${CONTAINER_TAG}"
+ ALPINE_X86_64_BUILD_TAG: "${CONTAINER_TAG}"
ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}"
- CONDITIONAL_BUILD_ANGLE_TAG: fec96cc945650c5fe9f7188cabe80d8a
+ CONDITIONAL_BUILD_ANGLE_TAG: 384145a4023315dae658259bee07c43a
+ CONDITIONAL_BUILD_PIGLIT_TAG: a19e424b8a3f020dbf1b9dd29f220a4f
diff --git a/drivers/gpu/drm/ci/kunit.sh b/drivers/gpu/drm/ci/kunit.sh
new file mode 100755
index 000000000000..7a1052fd3f17
--- /dev/null
+++ b/drivers/gpu/drm/ci/kunit.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -euxo pipefail
+
+: "${KERNEL_ARCH:?ERROR: KERNEL_ARCH must be set}"
+: "${LLVM_VERSION:?ERROR: LLVM_VERSION must be set}"
+
+./drivers/gpu/drm/ci/setup-llvm-links.sh
+
+export PATH="/usr/bin:$PATH"
+
+./tools/testing/kunit/kunit.py run \
+ --arch "${KERNEL_ARCH}" \
+ --make_options LLVM=1 \
+ --kunitconfig=drivers/gpu/drm/tests
diff --git a/drivers/gpu/drm/ci/kunit.yml b/drivers/gpu/drm/ci/kunit.yml
new file mode 100644
index 000000000000..0d5b2c4433d2
--- /dev/null
+++ b/drivers/gpu/drm/ci/kunit.yml
@@ -0,0 +1,37 @@
+.kunit-packages: &kunit-packages
+ - apt-get update -qq
+ # Minimum supported version of LLVM for building x86 kernels is 15.0.0.
+ # In mesa-ci containers, LLVM_VERSION is defined as a container-level property and is currently set to 19.
+ - apt-get install -y --no-install-recommends clang-${LLVM_VERSION} lld-${LLVM_VERSION} llvm-${LLVM_VERSION}
+
+.kunit-base:
+ stage: kunit
+ timeout: "30m"
+ variables:
+ GIT_DEPTH: 1
+ script:
+ - drivers/gpu/drm/ci/kunit.sh
+
+kunit:arm32:
+ extends:
+ - .build:arm32
+ - .kunit-base
+ before_script:
+ - *kunit-packages
+ - apt-get install -y --no-install-recommends qemu-system-arm
+
+kunit:arm64:
+ extends:
+ - .build:arm64
+ - .kunit-base
+ before_script:
+ - *kunit-packages
+ - apt-get install -y --no-install-recommends qemu-system-aarch64
+
+kunit:x86_64:
+ extends:
+ - .build:x86_64
+ - .kunit-base
+ before_script:
+ - *kunit-packages
+ - apt-get install -y --no-install-recommends qemu-system-x86
diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh
index a1e8b34fb2d4..a295102c3468 100755
--- a/drivers/gpu/drm/ci/lava-submit.sh
+++ b/drivers/gpu/drm/ci/lava-submit.sh
@@ -41,7 +41,6 @@ section_start prepare_rootfs "Preparing root filesystem"
set -ex
-section_switch rootfs "Assembling root filesystem"
ROOTFS_URL="$(get_path_to_artifact lava-rootfs.tar.zst)"
[ $? != 1 ] || exit 1
@@ -54,7 +53,7 @@ cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
-s3_upload job-rootfs-overlay.tar.gz "https://${JOB_ARTIFACTS_BASE}"
+ci-fairy s3cp --token-file "${S3_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY_PATH}"
# Prepare env vars for upload.
section_switch variables "Environment variables passed through to device:"
@@ -64,6 +63,9 @@ section_switch lava_submit "Submitting job for scheduling"
touch results/lava.log
tail -f results/lava.log &
+# Ensure that we are printing the commands that are being executed,
+# making it easier to debug the job in case it fails.
+set -x
PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
--farm "${FARM}" \
--device-type "${DEVICE_TYPE}" \
diff --git a/drivers/gpu/drm/ci/setup-llvm-links.sh b/drivers/gpu/drm/ci/setup-llvm-links.sh
new file mode 100755
index 000000000000..ace33af82a3f
--- /dev/null
+++ b/drivers/gpu/drm/ci/setup-llvm-links.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: MIT
+set -euo pipefail
+
+ln -svf "$(which clang++-${LLVM_VERSION})" /usr/bin/clang++
+ln -svf "$(which clang-${LLVM_VERSION})" /usr/bin/clang
+ln -svf "$(which ld.lld-${LLVM_VERSION})" /usr/bin/ld.lld
+ln -svf "$(which lld-${LLVM_VERSION})" /usr/bin/lld
+ln -svf "$(which llvm-ar-${LLVM_VERSION})" /usr/bin/llvm-ar
+ln -svf "$(which llvm-nm-${LLVM_VERSION})" /usr/bin/llvm-nm
+ln -svf "$(which llvm-objcopy-${LLVM_VERSION})" /usr/bin/llvm-objcopy
+ln -svf "$(which llvm-readelf-${LLVM_VERSION})" /usr/bin/llvm-readelf
+ln -svf "$(which llvm-strip-${LLVM_VERSION})" /usr/bin/llvm-strip
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index 84a25f0e783b..81147e86bfd0 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -24,7 +24,7 @@
.lava-igt:arm32:
extends:
- - .lava-test:arm32
+ - .lava-arm32-test-gl
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
DEBIAN_ARCH: "armhf"
@@ -33,15 +33,14 @@
- testing:arm32
needs:
- alpine/x86_64_lava_ssh_client
- - kernel+rootfs_arm32
- - debian/x86_64_build
+ - debian/arm32_test-gl
- python-artifacts
- testing:arm32
- igt:arm32
.lava-igt:arm64:
extends:
- - .lava-test:arm64
+ - .lava-arm64-test-gl
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
DEBIAN_ARCH: "arm64"
@@ -50,15 +49,14 @@
- testing:arm64
needs:
- alpine/x86_64_lava_ssh_client
- - kernel+rootfs_arm64
- - debian/x86_64_build
+ - debian/arm64_test-gl
- python-artifacts
- testing:arm64
- igt:arm64
.lava-igt:x86_64:
extends:
- - .lava-test:x86_64
+ - .lava-x86_64-test-gl
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
DEBIAN_ARCH: "amd64"
@@ -67,16 +65,15 @@
- testing:x86_64
needs:
- alpine/x86_64_lava_ssh_client
- - kernel+rootfs_x86_64
- - debian/x86_64_build
+ - debian/x86_64_test-gl
- python-artifacts
- testing:x86_64
- igt:x86_64
.baremetal-igt-arm64:
extends:
- - .baremetal-test-arm64
- - .use-debian/baremetal_arm64_test
+ - .baremetal-test-arm64-gl
+ - .use-debian/baremetal_arm64_test-gl
- .allow_failure_lockdep
timeout: "1h30m"
rules:
@@ -91,7 +88,7 @@
BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS"
FARM: google
needs:
- - debian/baremetal_arm64_test
+ - debian/baremetal_arm64_test-gl
- job: testing:arm64
artifacts: false
- igt:arm64
@@ -101,19 +98,21 @@
.software-driver:
stage: software-driver
extends:
+ - .test-gl
- .allow_failure_lockdep
timeout: "1h30m"
rules:
- !reference [.scheduled_pipeline-rules, rules]
- when: on_success
- extends:
- - .test-gl
tags:
- - kvm
+ - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM
+ before_script:
+ - !reference [default, before_script]
+ - rm -rf install
+ - tar -xf artifacts/install.tar
script:
- ln -sf $CI_PROJECT_DIR/install /install
- mv install/bzImage /kernel/bzImage
- - mkdir -p /lib/modules
- install/crosvm-runner.sh install/igt_runner.sh
needs:
- debian/x86_64_test-gl
@@ -180,20 +179,6 @@ msm:apq8096:
script:
- ./install/bare-metal/fastboot.sh || exit $?
-msm:sdm845:
- extends:
- - .baremetal-igt-arm64
- stage: msm
- parallel: 6
- variables:
- DEVICE_TYPE: sdm845-cheza-r3
- DRIVER_NAME: msm
- BM_KERNEL: https://${PIPELINE_ARTIFACTS_BASE}/arm64/cheza-kernel
- GPU_VERSION: sdm845
- RUNNER_TAG: google-freedreno-cheza
- script:
- - ./install/bare-metal/cros-servo.sh || exit $?
-
msm:sm8350-hdk:
extends:
- .lava-igt:arm64
@@ -323,7 +308,7 @@ i915:cml:
variables:
DEVICE_TYPE: asus-C436FA-Flip-hatch
GPU_VERSION: cml
- RUNNER_TAG: mesa-ci-x86-64-lava-asus-C436FA-flip-hatch
+ RUNNER_TAG: mesa-ci-x86-64-lava-asus-C436FA-Flip-hatch
i915:tgl:
extends:
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
index d4b8ba3a54a9..154b047787b2 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
@@ -32,3 +32,8 @@ kms_display_modes@mst-extended-mode-negative
# It causes other tests to fail, so skip it.
kms_invalid_mode@overflow-vrefresh
+
+# sc7180 does not have APRIV, so memptrs is not protected.
+# (Preemption is not supported on devices that do not have
+# APRIV, so this is ok)
+msm/msm_mapping@memptrs
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
index 022db559cc7d..a9bb3e1ad75c 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
@@ -23,3 +23,8 @@ core_hotunplug.*
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
kms_display_modes@extended-mode-basic
kms_display_modes@mst-extended-mode-negative
+
+# sc7180 does not have APRIV, so memptrs is not protected.
+# (Preemption is not supported on devices that do not have
+# APRIV, so this is ok)
+msm/msm_mapping@memptrs
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
deleted file mode 100644
index 7a2ab58b706f..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-kms_color@ctm-0-25,Fail
-kms_color@ctm-0-50,Fail
-kms_color@ctm-0-75,Fail
-kms_color@ctm-blue-to-red,Fail
-kms_color@ctm-green-to-red,Fail
-kms_color@ctm-negative,Fail
-kms_color@ctm-red-to-blue,Fail
-kms_color@ctm-signed,Fail
-kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
-kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
-kms_cursor_legacy@cursor-vs-flip-atomic,Fail
-kms_cursor_legacy@cursor-vs-flip-atomic-transitions,Fail
-kms_cursor_legacy@cursor-vs-flip-atomic-transitions-varying-size,Fail
-kms_cursor_legacy@cursor-vs-flip-legacy,Fail
-kms_cursor_legacy@cursor-vs-flip-toggle,Fail
-kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
-kms_cursor_legacy@flip-vs-cursor-atomic,Fail
-kms_cursor_legacy@flip-vs-cursor-crc-atomic,Fail
-kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
-kms_cursor_legacy@flip-vs-cursor-legacy,Fail
-kms_flip@flip-vs-modeset-vs-hang,Fail
-kms_flip@flip-vs-panning-vs-hang,Fail
-kms_lease@lease-uevent,Fail
-kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
-kms_plane_alpha_blend@alpha-7efc,Fail
-kms_plane_alpha_blend@coverage-7efc,Fail
-kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_plane_cursor@overlay,Fail
-kms_plane_cursor@viewport,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
deleted file mode 100644
index e32d73c6c98e..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
+++ /dev/null
@@ -1,139 +0,0 @@
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-after-cursor-atomic
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-after-cursor-legacy
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-after-cursor-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@basic-flip-before-cursor-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@flip-vs-cursor-atomic-transitions
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@flip-vs-cursor-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-after-cursor-atomic-transitions
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-after-cursor-toggle
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-msm/msm_shrink@copy-gpu-32
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
-msm/msm_shrink@copy-gpu-oom-32
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-gf13702b8e
-# Linux Version: 6.10.0-rc5
-kms_cursor_legacy@short-flip-before-cursor-toggle
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-gf13702b8e
-# Linux Version: 6.10.0-rc5
-kms_cursor_legacy@flip-vs-cursor-toggle
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-gf13702b8e
-# Linux Version: 6.10.0-rc5
-msm/msm_shrink@copy-mmap-oom-8
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/64bc4bcf-de51-4e60-a9f7-1295a1e64c65@collabora.com/T/#t
-# Failure Rate: 50
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.11.0-rc2
-kms_lease@page-flip-implicit-plane
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://lore.kernel.org/linux-arm-msm/64bc4bcf-de51-4e60-a9f7-1295a1e64c65@collabora.com/T/#t
-# Failure Rate: 50
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.11.0-rc5
-kms_flip@flip-vs-expired-vblank
-
-# Board Name: sdm845-cheza-r3
-# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/75
-# Failure Rate: 20
-# IGT Version: 1.30-g04bedb923
-# Linux Version: 6.14.0-rc4
-kms_flip@plain-flip-ts-check-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
deleted file mode 100644
index 6c86d1953e11..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ /dev/null
@@ -1,350 +0,0 @@
-# Hangs machine
-kms_bw.*
-
-# Failing due to a bootloader/fw issue. The workaround in mesa CI involves these two patches
-# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/4b49f902ec6f2bb382cbbf489870573f4b43371e
-# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/38cdf4c5559771e2474ae0fecef8469f65147bc1
-msm/msm_mapping@*
-
-# Skip driver specific tests
-^amdgpu.*
-nouveau_.*
-^panfrost.*
-^v3d.*
-^vc4.*
-^vmwgfx*
-
-# Skip intel specific tests
-gem_.*
-i915_.*
-tools_test.*
-kms_dp_link_training.*
-
-# Currently fails and causes coverage loss for other tests
-# since core_getversion also fails.
-core_hotunplug.*
-
-# Whole machine hangs
-kms_cursor_crc.*
-
-# IGT test crash
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.11.0-rc2
-kms_content_protection@uevent
-
-# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
-# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
-kms_display_modes@extended-mode-basic
-kms_display_modes@mst-extended-mode-negative
-
-# Kernel panic
-msm/msm_recovery@hangcheck
-# DEBUG - Begin test msm/msm_recovery@hangcheck
-# Console: switching to colour dummy device 80x25
-# [ 489.526286] [IGT] msm_recovery: executing
-# [ 489.531926] [IGT] msm_recovery: starting subtest hangcheck
-# [ 492.808574] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 492.820358] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 492.831154] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 493.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 493.844177] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 493.854971] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 494.824633] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 494.836237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 494.847034] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 495.816570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 495.828170] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 495.838966] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 496.804643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 496.816246] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 496.827041] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 497.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 497.844170] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 497.854963] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 498.820636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 498.832232] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 498.843024] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 499.816568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 499.828163] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 499.838958] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 500.808570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 500.820165] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 500.830960] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 501.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 501.844175] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 501.854965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 502.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 502.836171] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 502.846965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 503.816570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 503.828176] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 503.838969] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 504.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 504.816237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 504.827033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 505.828643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 505.840247] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 505.851043] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 506.820637] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 506.832233] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 506.843026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 507.816567] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 507.828171] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 507.838965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 508.808568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 508.820173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 508.830969] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 509.832568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 509.844173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 509.854967] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 510.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 510.836162] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 510.846954] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 511.816569] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 511.828173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 511.838968] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 512.804641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 512.816246] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 512.827040] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 513.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 513.840239] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 513.851035] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 514.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 514.836164] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 514.846959] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 515.812640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 515.824235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 515.835030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 515.912427] rcu: INFO: rcu_preempt self-detected stall on CPU
-# [ 515.918398] rcu: 0-....: (6452 ticks this GP) idle=6afc/1/0x4000000000000000 softirq=12492/12697 fqs=3179
-# [ 515.929296] rcu: (t=6505 jiffies g=36205 q=58 ncpus=8)
-# [ 515.934709] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W 6.14.0-rc4-gdddf15cff632 #1
-# [ 515.934727] Tainted: [W]=WARN
-# [ 515.934732] Hardware name: Google Cheza (rev3+) (DT)
-# [ 515.934739] pstate: 00400009 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
-# [ 515.934751] pc : rcu_core+0x59c/0xe68
-# [ 515.934769] lr : rcu_core+0x74/0xe68
-# [ 515.934781] sp : ffff800080003e50
-# [ 515.934785] x29: ffff800080003e50 x28: ffff225d038e9bc0 x27: 0000000000000002
-# [ 515.934805] x26: ffffc171a8ee6108 x25: ffffc171a85bc2c0 x24: ffff60ecd691e000
-# [ 515.934820] x23: ffffc171a85d15c0 x22: ffffc171a8f8d780 x21: ffff225e7eeef5c0
-# [ 515.934835] x20: ffffc171a8ef0e80 x19: ffffc171a85d15d1 x18: ffffc171a9461e70
-# [ 515.934850] x17: ffff60ecd691e000 x16: ffff800080000000 x15: 0000000000000000
-# [ 515.934866] x14: ffffc171a85d0780 x13: 0000000000000400 x12: 0000000000000000
-# [ 515.934880] x11: ffffc171a85ce900 x10: ffffc171a8ef5000 x9 : ffffc171a8ef0000
-# [ 515.934894] x8 : ffff800080003d88 x7 : ffffc171a8ee6100 x6 : ffff800080003de0
-# [ 515.934909] x5 : ffff800080003dc8 x4 : 0000000000000003 x3 : 0000000000000000
-# [ 515.934923] x2 : 0000000000000101 x1 : 0000000000000000 x0 : ffff225d038e9bc0
-# [ 515.934939] Call trace:
-# [ 515.934945] rcu_core+0x59c/0xe68 (P)
-# [ 515.934962] rcu_core_si+0x10/0x1c
-# [ 515.934976] handle_softirqs+0x118/0x4b8
-# [ 515.934994] __do_softirq+0x14/0x20
-# [ 515.935007] ____do_softirq+0x10/0x1c
-# [ 515.935021] call_on_irq_stack+0x24/0x4c
-# [ 515.935034] do_softirq_own_stack+0x1c/0x28
-# [ 515.935048] __irq_exit_rcu+0x174/0x1b4
-# [ 515.935063] irq_exit_rcu+0x10/0x38
-# [ 515.935077] el1_interrupt+0x38/0x64
-# [ 515.935092] el1h_64_irq_handler+0x18/0x24
-# [ 515.935104] el1h_64_irq+0x6c/0x70
-# [ 515.935115] lock_acquire+0x1e0/0x338 (P)
-# [ 515.935129] __mutex_lock+0xa8/0x4b8
-# [ 515.935144] mutex_lock_nested+0x24/0x30
-# [ 515.935159] _find_opp_table_unlocked+0x40/0xfc
-# [ 515.935174] _find_key+0x64/0x16c
-# [ 515.935184] dev_pm_opp_find_freq_exact+0x4c/0x74
-# [ 515.935197] qcom_cpufreq_hw_target_index+0xe8/0x128
-# [ 515.935211] __cpufreq_driver_target+0x144/0x29c
-# [ 515.935227] sugov_work+0x58/0x74
-# [ 515.935239] kthread_worker_fn+0xf4/0x324
-# [ 515.935254] kthread+0x12c/0x208
-# [ 515.935266] ret_from_fork+0x10/0x20
-# [ 516.808569] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 516.820174] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 516.830968] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 517.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 517.840236] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 517.851032] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 518.820642] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 518.832237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 518.843030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 519.812636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 519.824231] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 519.835026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 520.808570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 520.820165] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 520.830959] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 521.828643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 521.840238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 521.851033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 522.820636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 522.832232] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 522.843027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 523.812639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 523.824239] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 523.835034] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 524.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 524.816235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 524.827026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 525.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 525.840236] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 525.851031] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 526.820641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 526.832244] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 526.843041] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 527.812642] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 527.824242] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 527.835038] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 528.804639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 528.816234] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 528.827027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 529.832634] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 529.844231] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 529.855017] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 530.820646] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 530.832270] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 530.843065] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 531.812640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 531.824238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 531.835030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 532.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 532.816237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 532.827031] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 533.828640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 533.840243] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 533.851037] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 534.820640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 534.832245] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 534.843038] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 535.812641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 535.824238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 535.835033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 536.804639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 536.816235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 536.827030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 537.828640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 537.840234] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 537.851020] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 538.820640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 538.832235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 538.843027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 539.812644] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
-# [ 539.824247] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
-# [ 539.835040] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
-# [ 540.124426] watchdog: BUG: soft lockup - CPU#0 stuck for 49s! [sugov:0:126]
-# [ 540.124439] Modules linked in:
-# [ 540.124448] irq event stamp: 9912389
-# [ 540.124453] hardirqs last enabled at (9912388): [<ffffc171a767a24c>] exit_to_kernel_mode+0x38/0x130
-# [ 540.124473] hardirqs last disabled at (9912389): [<ffffc171a767a368>] el1_interrupt+0x24/0x64
-# [ 540.124486] softirqs last enabled at (9898068): [<ffffc171a62bc290>] handle_softirqs+0x4a0/0x4b8
-# [ 540.124505] softirqs last disabled at (9898071): [<ffffc171a62105b0>] __do_softirq+0x14/0x20
-# [ 540.124525] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W 6.14.0-rc4-gdddf15cff632 #1
-# [ 540.124540] Tainted: [W]=WARN
-# [ 540.124544] Hardware name: Google Cheza (rev3+) (DT)
-# [ 540.124549] pstate: 60400009 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
-# [ 540.124560] pc : xhci_urb_enqueue+0xbc/0x32c
-# [ 540.124573] lr : xhci_urb_enqueue+0xb4/0x32c
-# [ 540.124581] sp : ffff800080003c20
-# [ 540.124586] x29: ffff800080003c20 x28: 0000000000000000 x27: ffff225d00b1e6a0
-# [ 540.124602] x26: ffff225d01c3d800 x25: 0000000000000001 x24: 0000000000000006
-# [ 540.124617] x23: ffff225d044dc000 x22: ffff225d044dc000 x21: 0000000000000001
-# [ 540.124632] x20: ffff225d002d7280 x19: ffff225d0573a780 x18: ffff225e7eff0f50
-# [ 540.124647] x17: 000000000000cab0 x16: 0000000000000000 x15: ffff225d0353a000
-# [ 540.124661] x14: 0000000000000000 x13: 0000000000000820 x12: 0000000000000000
-# [ 540.124674] x11: ffff800080003a30 x10: 0000000000000001 x9 : 0000000000000000
-# [ 540.124689] x8 : ffff225d002d7300 x7 : 0000000000000000 x6 : 000000000000003f
-# [ 540.124702] x5 : 00000000ffffffff x4 : 0000000000000920 x3 : 0000000000000080
-# [ 540.124716] x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff225d002d7280
-# [ 540.124731] Call trace:
-# [ 540.124736] xhci_urb_enqueue+0xbc/0x32c (P)
-# [ 540.124751] usb_hcd_submit_urb+0x98/0x7fc
-# [ 540.124766] usb_submit_urb+0x294/0x560
-# [ 540.124780] intr_callback+0x78/0x1fc
-# [ 540.124798] __usb_hcd_giveback_urb+0x68/0x128
-# [ 540.124812] usb_giveback_urb_bh+0xa8/0x140
-# [ 540.124825] process_one_work+0x208/0x5e8
-# [ 540.124840] bh_worker+0x1a8/0x20c
-# [ 540.124853] workqueue_softirq_action+0x78/0x88
-# [ 540.124868] tasklet_hi_action+0x14/0x3c
-# [ 540.124883] handle_softirqs+0x118/0x4b8
-# [ 540.124897] __do_softirq+0x14/0x20
-# [ 540.124908] ____do_softirq+0x10/0x1c
-# [ 540.124922] call_on_irq_stack+0x24/0x4c
-# [ 540.124934] do_softirq_own_stack+0x1c/0x28
-# [ 540.124947] __irq_exit_rcu+0x174/0x1b4
-# [ 540.124961] irq_exit_rcu+0x10/0x38
-# [ 540.124976] el1_interrupt+0x38/0x64
-# [ 540.124987] el1h_64_irq_handler+0x18/0x24
-# [ 540.124998] el1h_64_irq+0x6c/0x70
-# [ 540.125009] lock_acquire+0x1e0/0x338 (P)
-# [ 540.125023] __mutex_lock+0xa8/0x4b8
-# [ 540.125038] mutex_lock_nested+0x24/0x30
-# [ 540.125052] _find_opp_table_unlocked+0x40/0xfc
-# [ 540.125067] _find_key+0x64/0x16c
-# [ 540.125078] dev_pm_opp_find_freq_exact+0x4c/0x74
-# [ 540.125090] qcom_cpufreq_hw_target_index+0xe8/0x128
-# [ 540.125105] __cpufreq_driver_target+0x144/0x29c
-# [ 540.125121] sugov_work+0x58/0x74
-# [ 540.125133] kthread_worker_fn+0xf4/0x324
-# [ 540.125148] kthread+0x12c/0x208
-# [ 540.125160] ret_from_fork+0x10/0x20
-# [ 540.125176] Kernel panic - not syncing: softlockup: hung tasks
-# [ 540.423567] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W L 6.14.0-rc4-gdddf15cff632 #1
-# [ 540.433411] Tainted: [W]=WARN, [L]=SOFTLOCKUP
-# [ 540.437901] Hardware name: Google Cheza (rev3+) (DT)
-# [ 540.443022] Call trace:
-# [ 540.445559] show_stack+0x18/0x24 (C)
-# [ 540.449357] dump_stack_lvl+0x38/0xd0
-# [ 540.453157] dump_stack+0x18/0x24
-# [ 540.456599] panic+0x3bc/0x41c
-# [ 540.459767] watchdog_timer_fn+0x254/0x2e4
-# [ 540.464005] __hrtimer_run_queues+0x3c4/0x440
-# [ 540.468508] hrtimer_interrupt+0xe4/0x244
-# [ 540.472662] arch_timer_handler_phys+0x2c/0x44
-# [ 540.477256] handle_percpu_devid_irq+0x90/0x1f0
-# [ 540.481943] handle_irq_desc+0x40/0x58
-# [ 540.485829] generic_handle_domain_irq+0x1c/0x28
-# [ 540.490604] gic_handle_irq+0x4c/0x11c
-# [ 540.494483] do_interrupt_handler+0x50/0x84
-# [ 540.498811] el1_interrupt+0x34/0x64
-# [ 540.502518] el1h_64_irq_handler+0x18/0x24
-# [ 540.506758] el1h_64_irq+0x6c/0x70
-# [ 540.510279] xhci_urb_enqueue+0xbc/0x32c (P)
-# [ 540.514693] usb_hcd_submit_urb+0x98/0x7fc
-# [ 540.518932] usb_submit_urb+0x294/0x560
-# [ 540.522901] intr_callback+0x78/0x1fc
-# [ 540.526700] __usb_hcd_giveback_urb+0x68/0x128
-# [ 540.531288] usb_giveback_urb_bh+0xa8/0x140
-# [ 540.535614] process_one_work+0x208/0x5e8
-# [ 540.539769] bh_worker+0x1a8/0x20c
-# [ 540.543293] workqueue_softirq_action+0x78/0x88
-# [ 540.547980] tasklet_hi_action+0x14/0x3c
-# [ 540.552038] handle_softirqs+0x118/0x4b8
-# [ 540.556096] __do_softirq+0x14/0x20
-# [ 540.559705] ____do_softirq+0x10/0x1c
-# [ 540.563500] call_on_irq_stack+0x24/0x4c
-# [ 540.567554] do_softirq_own_stack+0x1c/0x28
-# [ 540.571878] __irq_exit_rcu+0x174/0x1b4
-# [ 540.575849] irq_exit_rcu+0x10/0x38
-# [ 540.579462] el1_interrupt+0x38/0x64
-# [ 540.583158] el1h_64_irq_handler+0x18/0x24
-# [ 540.587397] el1h_64_irq+0x6c/0x70
-# [ 540.590918] lock_acquire+0x1e0/0x338 (P)
-# [ 540.595060] __mutex_lock+0xa8/0x4b8
-# [ 540.598760] mutex_lock_nested+0x24/0x30
-# [ 540.602818] _find_opp_table_unlocked+0x40/0xfc
-# [ 540.607503] _find_key+0x64/0x16c
-# [ 540.610940] dev_pm_opp_find_freq_exact+0x4c/0x74
-# [ 540.615798] qcom_cpufreq_hw_target_index+0xe8/0x128
-# [ 540.620924] __cpufreq_driver_target+0x144/0x29c
-# [ 540.625698] sugov_work+0x58/0x74
-# [ 540.629134] kthread_worker_fn+0xf4/0x324
-# [ 540.633278] kthread+0x12c/0x208
-# [ 540.636619] ret_from_fork+0x10/0x20
-# [ 540.640321] SMP: stopping secondary CPUs
-# [ 540.644518] Kernel Offset: 0x417126200000 from 0xffff800080000000
-# [ 540.650848] PHYS_OFFSET: 0xfff0dda400000000
-# [ 540.655170] CPU features: 0x000,00000100,00901250,8200721b
-# [ 540.660829] Memory Limit: none
-# [ 540.663999] ---[ end Kernel panic - not syncing: softlockup: hung tasks ]---
diff --git a/drivers/gpu/drm/clients/drm_client_setup.c b/drivers/gpu/drm/clients/drm_client_setup.c
index e17265039ca8..72480db1f00d 100644
--- a/drivers/gpu/drm/clients/drm_client_setup.c
+++ b/drivers/gpu/drm/clients/drm_client_setup.c
@@ -1,7 +1,10 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
+
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
@@ -31,6 +34,10 @@ MODULE_PARM_DESC(active,
*/
void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format)
{
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_dbg(dev, "driver does not support mode-setting, skipping DRM clients\n");
+ return;
+ }
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (!strcmp(drm_client_default, "fbdev")) {
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index 8d22b7627d41..df09cf9a8ca1 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -8,6 +8,7 @@ config DRM_DISPLAY_DP_AUX_BUS
config DRM_DISPLAY_HELPER
tristate
depends on DRM
+ select CEC_CORE if DRM_DISPLAY_DP_AUX_CEC || DRM_DISPLAY_HDMI_CEC_HELPER || CEC_NOTIFIER
help
DRM helpers for display adapters.
@@ -16,6 +17,7 @@ if DRM_DISPLAY_HELPER
config DRM_BRIDGE_CONNECTOR
bool
select DRM_DISPLAY_HDMI_AUDIO_HELPER
+ select DRM_DISPLAY_HDMI_CEC_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
help
DRM connector implementation terminating DRM bridge chains.
@@ -23,7 +25,6 @@ config DRM_BRIDGE_CONNECTOR
config DRM_DISPLAY_DP_AUX_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
select DRM_DISPLAY_DP_HELPER
- select CEC_CORE
help
Choose this option if you want to enable HDMI CEC support for
DisplayPort/USB-C to HDMI adapters.
@@ -82,6 +83,16 @@ config DRM_DISPLAY_HDMI_AUDIO_HELPER
DRM display helpers for HDMI Audio functionality (generic HDMI Codec
implementation).
+config DRM_DISPLAY_HDMI_CEC_HELPER
+ bool
+ help
+ DRM display helpers for HDMI CEC implementation.
+
+config DRM_DISPLAY_HDMI_CEC_NOTIFIER_HELPER
+ def_bool CEC_NOTIFIER
+ help
+ DRM display helpers for HDMI CEC notifiers implementation.
+
config DRM_DISPLAY_HDMI_HELPER
bool
help
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index b17879b957d5..0ff4a1ad0222 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -16,6 +16,10 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DSC_HELPER) += \
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_AUDIO_HELPER) += \
drm_hdmi_audio_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_CEC_HELPER) += \
+ drm_hdmi_cec_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_CEC_NOTIFIER_HELPER) += \
+ drm_hdmi_cec_notifier_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
drm_scdc_helper.o
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 262e93e07a28..5eb7e9bfe361 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -3,6 +3,7 @@
* Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -20,6 +21,7 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
@@ -113,6 +115,13 @@ struct drm_bridge_connector {
* &DRM_BRIDGE_OP_DP_AUDIO).
*/
struct drm_bridge *bridge_dp_audio;
+ /**
+ * @bridge_hdmi_cec:
+ *
+ * The bridge in the chain that implements CEC support, if any (see
+ * DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER).
+ */
+ struct drm_bridge *bridge_hdmi_cec;
};
#define to_drm_bridge_connector(x) \
@@ -201,7 +210,7 @@ drm_bridge_connector_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
if (detect) {
- status = detect->funcs->detect(detect);
+ status = detect->funcs->detect(detect, connector);
if (hdmi)
drm_atomic_helper_connector_hdmi_hotplug(connector, status);
@@ -454,7 +463,7 @@ static int drm_bridge_connector_audio_startup(struct drm_connector *connector)
if (!bridge->funcs->hdmi_audio_startup)
return 0;
- return bridge->funcs->hdmi_audio_startup(connector, bridge);
+ return bridge->funcs->hdmi_audio_startup(bridge, connector);
}
if (bridge_connector->bridge_dp_audio) {
@@ -463,7 +472,7 @@ static int drm_bridge_connector_audio_startup(struct drm_connector *connector)
if (!bridge->funcs->dp_audio_startup)
return 0;
- return bridge->funcs->dp_audio_startup(connector, bridge);
+ return bridge->funcs->dp_audio_startup(bridge, connector);
}
return -EINVAL;
@@ -480,13 +489,13 @@ static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
if (bridge_connector->bridge_hdmi_audio) {
bridge = bridge_connector->bridge_hdmi_audio;
- return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+ return bridge->funcs->hdmi_audio_prepare(bridge, connector, fmt, hparms);
}
if (bridge_connector->bridge_dp_audio) {
bridge = bridge_connector->bridge_dp_audio;
- return bridge->funcs->dp_audio_prepare(connector, bridge, fmt, hparms);
+ return bridge->funcs->dp_audio_prepare(bridge, connector, fmt, hparms);
}
return -EINVAL;
@@ -500,12 +509,12 @@ static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
if (bridge_connector->bridge_hdmi_audio) {
bridge = bridge_connector->bridge_hdmi_audio;
- bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+ bridge->funcs->hdmi_audio_shutdown(bridge, connector);
}
if (bridge_connector->bridge_dp_audio) {
bridge = bridge_connector->bridge_dp_audio;
- bridge->funcs->dp_audio_shutdown(connector, bridge);
+ bridge->funcs->dp_audio_shutdown(bridge, connector);
}
}
@@ -522,7 +531,7 @@ static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connecto
if (!bridge->funcs->hdmi_audio_mute_stream)
return -ENOTSUPP;
- return bridge->funcs->hdmi_audio_mute_stream(connector, bridge,
+ return bridge->funcs->hdmi_audio_mute_stream(bridge, connector,
enable, direction);
}
@@ -532,7 +541,7 @@ static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connecto
if (!bridge->funcs->dp_audio_mute_stream)
return -ENOTSUPP;
- return bridge->funcs->dp_audio_mute_stream(connector, bridge,
+ return bridge->funcs->dp_audio_mute_stream(bridge, connector,
enable, direction);
}
@@ -546,6 +555,65 @@ static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_aud
.mute_stream = drm_bridge_connector_audio_mute_stream,
};
+static int drm_bridge_connector_hdmi_cec_enable(struct drm_connector *connector, bool enable)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ return bridge->funcs->hdmi_cec_enable(bridge, enable);
+}
+
+static int drm_bridge_connector_hdmi_cec_log_addr(struct drm_connector *connector, u8 logical_addr)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ return bridge->funcs->hdmi_cec_log_addr(bridge, logical_addr);
+}
+
+static int drm_bridge_connector_hdmi_cec_transmit(struct drm_connector *connector,
+ u8 attempts,
+ u32 signal_free_time,
+ struct cec_msg *msg)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ return bridge->funcs->hdmi_cec_transmit(bridge, attempts,
+ signal_free_time,
+ msg);
+}
+
+static int drm_bridge_connector_hdmi_cec_init(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ if (!bridge->funcs->hdmi_cec_init)
+ return 0;
+
+ return bridge->funcs->hdmi_cec_init(bridge, connector);
+}
+
+static const struct drm_connector_hdmi_cec_funcs drm_bridge_connector_hdmi_cec_funcs = {
+ .init = drm_bridge_connector_hdmi_cec_init,
+ .enable = drm_bridge_connector_hdmi_cec_enable,
+ .log_addr = drm_bridge_connector_hdmi_cec_log_addr,
+ .transmit = drm_bridge_connector_hdmi_cec_transmit,
+};
+
/* -----------------------------------------------------------------------------
* Bridge Connector Initialisation
*/
@@ -662,6 +730,25 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
bridge_connector->bridge_dp_audio = bridge;
}
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
+ if (bridge_connector->bridge_hdmi_cec)
+ return ERR_PTR(-EBUSY);
+
+ bridge_connector->bridge_hdmi_cec = bridge;
+ }
+
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
+ if (bridge_connector->bridge_hdmi_cec)
+ return ERR_PTR(-EBUSY);
+
+ bridge_connector->bridge_hdmi_cec = bridge;
+
+ if (!bridge->funcs->hdmi_cec_enable ||
+ !bridge->funcs->hdmi_cec_log_addr ||
+ !bridge->funcs->hdmi_cec_transmit)
+ return ERR_PTR(-EINVAL);
+ }
+
if (!drm_bridge_get_next_bridge(bridge))
connector_type = bridge->type;
@@ -720,12 +807,33 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
ret = drm_connector_hdmi_audio_init(connector, dev,
&drm_bridge_connector_hdmi_audio_funcs,
bridge->hdmi_audio_max_i2s_playback_channels,
+ bridge->hdmi_audio_i2s_formats,
bridge->hdmi_audio_spdif_playback,
bridge->hdmi_audio_dai_port);
if (ret)
return ERR_PTR(ret);
}
+ if (bridge_connector->bridge_hdmi_cec &&
+ bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
+ ret = drmm_connector_hdmi_cec_notifier_register(connector,
+ NULL,
+ bridge->hdmi_cec_dev);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ if (bridge_connector->bridge_hdmi_cec &&
+ bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
+ ret = drmm_connector_hdmi_cec_register(connector,
+ &drm_bridge_connector_hdmi_cec_funcs,
+ bridge->hdmi_cec_adapter_name,
+ bridge->hdmi_cec_available_las,
+ bridge->hdmi_cec_dev);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
if (bridge_connector->bridge_hpd)
diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c
index 718c9122bc3a..2d279e82922f 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c
@@ -12,6 +12,7 @@
* to perform transactions on that bus.
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index ed31471bd0e2..3b50d817c839 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -5,6 +5,7 @@
* Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index ea78c6c8ca7a..1ecc3df7e316 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/dynamic_debug.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/iopoll.h>
@@ -692,6 +693,34 @@ void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered)
EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
/**
+ * drm_dp_dpcd_set_probe() - Set whether a probing before DPCD access is done
+ * @aux: DisplayPort AUX channel
+ * @enable: Enable the probing if required
+ */
+void drm_dp_dpcd_set_probe(struct drm_dp_aux *aux, bool enable)
+{
+ WRITE_ONCE(aux->dpcd_probe_disabled, !enable);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_set_probe);
+
+static bool dpcd_access_needs_probe(struct drm_dp_aux *aux)
+{
+ /*
+ * HP ZR24w corrupts the first DPCD access after entering power save
+ * mode. Eg. on a read, the entire buffer will be filled with the same
+ * byte. Do a throw away read to avoid corrupting anything we care
+ * about. Afterwards things will work correctly until the monitor
+ * gets woken up and subsequently re-enters power save mode.
+ *
+ * The user pressing any button on the monitor is enough to wake it
+ * up, so there is no particularly good place to do the workaround.
+ * We just have to do it before any DPCD access and hope that the
+ * monitor doesn't power down exactly after the throw away read.
+ */
+ return !aux->is_remote && !READ_ONCE(aux->dpcd_probe_disabled);
+}
+
+/**
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
* @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to read
@@ -712,19 +741,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
{
int ret;
- /*
- * HP ZR24w corrupts the first DPCD access after entering power save
- * mode. Eg. on a read, the entire buffer will be filled with the same
- * byte. Do a throw away read to avoid corrupting anything we care
- * about. Afterwards things will work correctly until the monitor
- * gets woken up and subsequently re-enters power save mode.
- *
- * The user pressing any button on the monitor is enough to wake it
- * up, so there is no particularly good place to do the workaround.
- * We just have to do it before any DPCD access and hope that the
- * monitor doesn't power down exactly after the throw away read.
- */
- if (!aux->is_remote) {
+ if (dpcd_access_needs_probe(aux)) {
ret = drm_dp_dpcd_probe(aux, DP_TRAINING_PATTERN_SET);
if (ret < 0)
return ret;
@@ -3940,23 +3957,31 @@ EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr);
* Returns: %0 on success, negative error code on failure
*/
int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
- u16 level)
+ u32 level)
{
int ret;
- u8 buf[2] = { 0 };
+ unsigned int offset = DP_EDP_BACKLIGHT_BRIGHTNESS_MSB;
+ u8 buf[3] = { 0 };
/* The panel uses the PWM for controlling brightness levels */
- if (!bl->aux_set)
+ if (!(bl->aux_set || bl->luminance_set))
return 0;
- if (bl->lsb_reg_used) {
+ if (bl->luminance_set) {
+ level = level * 1000;
+ level &= 0xffffff;
+ buf[0] = (level & 0x0000ff);
+ buf[1] = (level & 0x00ff00) >> 8;
+ buf[2] = (level & 0xff0000) >> 16;
+ offset = DP_EDP_PANEL_TARGET_LUMINANCE_VALUE;
+ } else if (bl->lsb_reg_used) {
buf[0] = (level & 0xff00) >> 8;
buf[1] = (level & 0x00ff);
} else {
buf[0] = level;
}
- ret = drm_dp_dpcd_write_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write_data(aux, offset, buf, sizeof(buf));
if (ret < 0) {
drm_err(aux->drm_dev,
"%s: Failed to write aux backlight level: %d\n",
@@ -4019,7 +4044,7 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
* Returns: %0 on success, negative error code on failure.
*/
int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
- const u16 level)
+ const u32 level)
{
int ret;
u8 dpcd_buf;
@@ -4029,6 +4054,9 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
else
dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_PWM;
+ if (bl->luminance_set)
+ dpcd_buf |= DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE;
+
if (bl->pwmgen_bit_count) {
ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
if (ret < 0)
@@ -4192,7 +4220,7 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
u8 *current_mode)
{
int ret;
- u8 buf[2];
+ u8 buf[3];
u8 mode_reg;
ret = drm_dp_dpcd_read_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg);
@@ -4209,17 +4237,37 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
int size = 1 + bl->lsb_reg_used;
- ret = drm_dp_dpcd_read_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size);
- if (ret < 0) {
- drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight level: %d\n",
- aux->name, ret);
- return ret;
- }
+ if (bl->luminance_set) {
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE,
+ buf, sizeof(buf));
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev,
+ "%s: Failed to read backlight level: %d\n",
+ aux->name, ret);
+ return ret;
+ }
- if (bl->lsb_reg_used)
- return (buf[0] << 8) | buf[1];
- else
- return buf[0];
+ /*
+ * Incase luminance is set we want to send the value back in nits but
+ * since DP_EDP_PANEL_TARGET_LUMINANCE stores values in millinits we
+ * need to divide by 1000.
+ */
+ return (buf[0] | buf[1] << 8 | buf[2] << 16) / 1000;
+ } else {
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
+ buf, size);
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev,
+ "%s: Failed to read backlight level: %d\n",
+ aux->name, ret);
+ return ret;
+ }
+
+ if (bl->lsb_reg_used)
+ return (buf[0] << 8) | buf[1];
+ else
+ return buf[0];
+ }
}
/*
@@ -4234,10 +4282,12 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
* interface.
* @aux: The DP aux device to use for probing
* @bl: The &drm_edp_backlight_info struct to fill out with information on the backlight
+ * @max_luminance: max luminance when need luminance is set as true
* @driver_pwm_freq_hz: Optional PWM frequency from the driver in hz
* @edp_dpcd: A cached copy of the eDP DPCD
* @current_level: Where to store the probed brightness level, if any
* @current_mode: Where to store the currently set backlight control mode
+ * @need_luminance: Tells us if a we want to manipulate backlight using luminance values
*
* Initializes a &drm_edp_backlight_info struct by probing @aux for it's backlight capabilities,
* along with also probing the current and maximum supported brightness levels.
@@ -4249,8 +4299,9 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
*/
int
drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
+ u32 max_luminance,
u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE],
- u16 *current_level, u8 *current_mode)
+ u32 *current_level, u8 *current_mode, bool need_luminance)
{
int ret;
@@ -4260,18 +4311,26 @@ drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl
bl->aux_set = true;
if (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
bl->lsb_reg_used = true;
+ if ((edp_dpcd[0] & DP_EDP_15) && edp_dpcd[3] &
+ (DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) && need_luminance)
+ bl->luminance_set = true;
/* Sanity check caps */
- if (!bl->aux_set && !(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) {
+ if (!bl->aux_set && !(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP) &&
+ !bl->luminance_set) {
drm_dbg_kms(aux->drm_dev,
- "%s: Panel supports neither AUX or PWM brightness control? Aborting\n",
+ "%s: Panel does not support AUX, PWM or luminance-based brightness control. Aborting\n",
aux->name);
return -EINVAL;
}
- ret = drm_edp_backlight_probe_max(aux, bl, driver_pwm_freq_hz, edp_dpcd);
- if (ret < 0)
- return ret;
+ if (bl->luminance_set) {
+ bl->max = max_luminance;
+ } else {
+ ret = drm_edp_backlight_probe_max(aux, bl, driver_pwm_freq_hz, edp_dpcd);
+ if (ret < 0)
+ return ret;
+ }
ret = drm_edp_backlight_probe_state(aux, bl, current_mode);
if (ret < 0)
@@ -4350,7 +4409,7 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
{
struct dp_aux_backlight *bl;
struct backlight_properties props = { 0 };
- u16 current_level;
+ u32 current_level;
u8 current_mode;
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
int ret;
@@ -4374,8 +4433,8 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
bl->aux = aux;
- ret = drm_edp_backlight_init(aux, &bl->info, 0, edp_dpcd,
- &current_level, &current_mode);
+ ret = drm_edp_backlight_init(aux, &bl->info, 0, 0, edp_dpcd,
+ &current_level, &current_mode, false);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index a89f38fd3218..64e5c176d5cc 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -23,6 +23,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
index 076edf161048..43f13a7c79b9 100644
--- a/drivers/gpu/drm/display/drm_dp_tunnel.c
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -3,6 +3,7 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/export.h>
#include <linux/ref_tracker.h>
#include <linux/types.h>
@@ -1920,7 +1921,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
}
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
- ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun");
+ ref_tracker_dir_init(&mgr->ref_tracker, 16, "drm_dptun");
#endif
for (i = 0; i < max_group_count; i++) {
diff --git a/drivers/gpu/drm/display/drm_dsc_helper.c b/drivers/gpu/drm/display/drm_dsc_helper.c
index 6900f4dac520..05996c526a8a 100644
--- a/drivers/gpu/drm/display/drm_dsc_helper.c
+++ b/drivers/gpu/drm/display/drm_dsc_helper.c
@@ -6,6 +6,7 @@
* Manasi Navare <manasi.d.navare@intel.com>
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
index ae8a0cf595fc..7d78b02c1446 100644
--- a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
@@ -3,6 +3,7 @@
* Copyright (c) 2024 Linaro Ltd
*/
+#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
@@ -143,6 +144,7 @@ static const struct hdmi_codec_ops drm_connector_hdmi_audio_ops = {
* @hdmi_codec_dev: device to be used as a parent for the HDMI Codec
* @funcs: callbacks for this HDMI Codec
* @max_i2s_playback_channels: maximum number of playback I2S channels
+ * @i2s_formats: set of I2S formats (use 0 for a bus-specific set)
* @spdif_playback: set if HDMI codec has S/PDIF playback port
* @dai_port: sound DAI port, -1 if it is not enabled
*
@@ -155,6 +157,7 @@ int drm_connector_hdmi_audio_init(struct drm_connector *connector,
struct device *hdmi_codec_dev,
const struct drm_connector_hdmi_audio_funcs *funcs,
unsigned int max_i2s_playback_channels,
+ u64 i2s_formats,
bool spdif_playback,
int dai_port)
{
@@ -162,6 +165,7 @@ int drm_connector_hdmi_audio_init(struct drm_connector *connector,
.ops = &drm_connector_hdmi_audio_ops,
.max_i2s_channels = max_i2s_playback_channels,
.i2s = !!max_i2s_playback_channels,
+ .i2s_formats = i2s_formats,
.spdif = spdif_playback,
.no_i2s_capture = true,
.no_spdif_capture = true,
diff --git a/drivers/gpu/drm/display/drm_hdmi_cec_helper.c b/drivers/gpu/drm/display/drm_hdmi_cec_helper.c
new file mode 100644
index 000000000000..3651ad0f76e0
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_hdmi_cec_helper.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2024 Linaro Ltd
+ */
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_managed.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+
+#include <media/cec.h>
+
+struct drm_connector_hdmi_cec_data {
+ struct cec_adapter *adapter;
+ const struct drm_connector_hdmi_cec_funcs *funcs;
+};
+
+static int drm_connector_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct drm_connector *connector = cec_get_drvdata(adap);
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ return data->funcs->enable(connector, enable);
+}
+
+static int drm_connector_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct drm_connector *connector = cec_get_drvdata(adap);
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ return data->funcs->log_addr(connector, logical_addr);
+}
+
+static int drm_connector_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct drm_connector *connector = cec_get_drvdata(adap);
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ return data->funcs->transmit(connector, attempts, signal_free_time, msg);
+}
+
+static const struct cec_adap_ops drm_connector_hdmi_cec_adap_ops = {
+ .adap_enable = drm_connector_hdmi_cec_adap_enable,
+ .adap_log_addr = drm_connector_hdmi_cec_adap_log_addr,
+ .adap_transmit = drm_connector_hdmi_cec_adap_transmit,
+};
+
+static void drm_connector_hdmi_cec_adapter_phys_addr_invalidate(struct drm_connector *connector)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_phys_addr_invalidate(data->adapter);
+}
+
+static void drm_connector_hdmi_cec_adapter_phys_addr_set(struct drm_connector *connector,
+ u16 addr)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_s_phys_addr(data->adapter, addr, false);
+}
+
+static void drm_connector_hdmi_cec_adapter_unregister(struct drm_device *dev, void *res)
+{
+ struct drm_connector *connector = res;
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_unregister_adapter(data->adapter);
+
+ if (data->funcs->uninit)
+ data->funcs->uninit(connector);
+
+ kfree(data);
+ connector->cec.data = NULL;
+}
+
+static struct drm_connector_cec_funcs drm_connector_hdmi_cec_adapter_funcs = {
+ .phys_addr_invalidate = drm_connector_hdmi_cec_adapter_phys_addr_invalidate,
+ .phys_addr_set = drm_connector_hdmi_cec_adapter_phys_addr_set,
+};
+
+int drmm_connector_hdmi_cec_register(struct drm_connector *connector,
+ const struct drm_connector_hdmi_cec_funcs *funcs,
+ const char *name,
+ u8 available_las,
+ struct device *dev)
+{
+ struct drm_connector_hdmi_cec_data *data;
+ struct cec_connector_info conn_info;
+ struct cec_adapter *cec_adap;
+ int ret;
+
+ if (!funcs->init || !funcs->enable || !funcs->log_addr || !funcs->transmit)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->funcs = funcs;
+
+ cec_adap = cec_allocate_adapter(&drm_connector_hdmi_cec_adap_ops, connector, name,
+ CEC_CAP_DEFAULTS | CEC_CAP_CONNECTOR_INFO,
+ available_las ? : CEC_MAX_LOG_ADDRS);
+ ret = PTR_ERR_OR_ZERO(cec_adap);
+ if (ret < 0)
+ goto err_free;
+
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+ cec_s_conn_info(cec_adap, &conn_info);
+
+ data->adapter = cec_adap;
+
+ mutex_lock(&connector->cec.mutex);
+
+ connector->cec.data = data;
+ connector->cec.funcs = &drm_connector_hdmi_cec_adapter_funcs;
+
+ ret = funcs->init(connector);
+ if (ret < 0)
+ goto err_delete_adapter;
+
+ /*
+ * NOTE: the CEC adapter will be unregistered by drmm cleanup from
+ * drm_managed_release(), which is called from drm_dev_release()
+ * during device unbind.
+ *
+ * However, the CEC framework cleans up the CEC adapter only when the
+ * last user has closed its file descriptor, so we don't need to handle
+ * it in DRM.
+ *
+ * Before that CEC framework makes sure that even if the userspace
+ * still holds CEC device open, all calls will be shortcut via
+ * cec_is_registered(), making sure that there is no access to the
+ * freed memory.
+ */
+ ret = cec_register_adapter(cec_adap, dev);
+ if (ret < 0)
+ goto err_delete_adapter;
+
+ mutex_unlock(&connector->cec.mutex);
+
+ return drmm_add_action_or_reset(connector->dev,
+ drm_connector_hdmi_cec_adapter_unregister,
+ connector);
+
+err_delete_adapter:
+ cec_delete_adapter(cec_adap);
+
+ connector->cec.data = NULL;
+
+ mutex_unlock(&connector->cec.mutex);
+
+err_free:
+ kfree(data);
+
+ return ret;
+}
+EXPORT_SYMBOL(drmm_connector_hdmi_cec_register);
+
+void drm_connector_hdmi_cec_received_msg(struct drm_connector *connector,
+ struct cec_msg *msg)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_received_msg(data->adapter, msg);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_cec_received_msg);
+
+void drm_connector_hdmi_cec_transmit_attempt_done(struct drm_connector *connector,
+ u8 status)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_transmit_attempt_done(data->adapter, status);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_cec_transmit_attempt_done);
+
+void drm_connector_hdmi_cec_transmit_done(struct drm_connector *connector,
+ u8 status,
+ u8 arb_lost_cnt, u8 nack_cnt,
+ u8 low_drive_cnt, u8 error_cnt)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_transmit_done(data->adapter, status,
+ arb_lost_cnt, nack_cnt, low_drive_cnt, error_cnt);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_cec_transmit_done);
diff --git a/drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c b/drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c
new file mode 100644
index 000000000000..31b8e4a93e24
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2024 Linaro Ltd
+ */
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_managed.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+
+static void drm_connector_hdmi_cec_notifier_phys_addr_invalidate(struct drm_connector *connector)
+{
+ cec_notifier_phys_addr_invalidate(connector->cec.data);
+}
+
+static void drm_connector_hdmi_cec_notifier_phys_addr_set(struct drm_connector *connector,
+ u16 addr)
+{
+ cec_notifier_set_phys_addr(connector->cec.data, addr);
+}
+
+static void drm_connector_hdmi_cec_notifier_unregister(struct drm_device *dev, void *res)
+{
+ struct drm_connector *connector = res;
+
+ cec_notifier_conn_unregister(connector->cec.data);
+ connector->cec.data = NULL;
+}
+
+static const struct drm_connector_cec_funcs drm_connector_cec_notifier_funcs = {
+ .phys_addr_invalidate = drm_connector_hdmi_cec_notifier_phys_addr_invalidate,
+ .phys_addr_set = drm_connector_hdmi_cec_notifier_phys_addr_set,
+};
+
+int drmm_connector_hdmi_cec_notifier_register(struct drm_connector *connector,
+ const char *port_name,
+ struct device *dev)
+{
+ struct cec_connector_info conn_info;
+ struct cec_notifier *notifier;
+
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ notifier = cec_notifier_conn_register(dev, port_name, &conn_info);
+ if (!notifier)
+ return -ENOMEM;
+
+ mutex_lock(&connector->cec.mutex);
+
+ connector->cec.data = notifier;
+ connector->cec.funcs = &drm_connector_cec_notifier_funcs;
+
+ mutex_unlock(&connector->cec.mutex);
+
+ return drmm_add_action_or_reset(connector->dev,
+ drm_connector_hdmi_cec_notifier_unregister,
+ connector);
+}
+EXPORT_SYMBOL(drmm_connector_hdmi_cec_notifier_register);
diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c
index 855cb02b827d..a237dc55805d 100644
--- a/drivers/gpu/drm/display/drm_hdmi_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_helper.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/module.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -44,7 +45,7 @@ int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
/* Sink EOTF is Bit map while infoframe is absolute values */
if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf,
- connector->hdr_sink_metadata.hdmi_type1.eotf))
+ connector->display_info.hdr_sink_metadata.hdmi_type1.eotf))
DRM_DEBUG_KMS("Unknown EOTF %d\n", hdr_metadata->hdmi_metadata_type1.eotf);
err = hdmi_drm_infoframe_init(frame);
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
index d9d9948b29e9..a561f124be99 100644
--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -1,11 +1,15 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modes.h>
#include <drm/drm_print.h>
#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
@@ -407,6 +411,11 @@ sink_supports_format_bpc(const struct drm_connector *connector,
return false;
}
+ if (drm_mode_is_420_only(info, mode) && format != HDMI_COLORSPACE_YUV420) {
+ drm_dbg_kms(dev, "Mode can be only supported in YUV420 format.\n");
+ return false;
+ }
+
switch (format) {
case HDMI_COLORSPACE_RGB:
drm_dbg_kms(dev, "RGB Format, checking the constraints.\n");
@@ -437,9 +446,36 @@ sink_supports_format_bpc(const struct drm_connector *connector,
return true;
case HDMI_COLORSPACE_YUV420:
- /* TODO: YUV420 is unsupported at the moment. */
- drm_dbg_kms(dev, "YUV420 format isn't supported yet.\n");
- return false;
+ drm_dbg_kms(dev, "YUV420 format, checking the constraints.\n");
+
+ if (!(info->color_formats & DRM_COLOR_FORMAT_YCBCR420)) {
+ drm_dbg_kms(dev, "Sink doesn't support YUV420.\n");
+ return false;
+ }
+
+ if (!drm_mode_is_420(info, mode)) {
+ drm_dbg_kms(dev, "Mode cannot be supported in YUV420 format.\n");
+ return false;
+ }
+
+ if (bpc == 10 && !(info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)) {
+ drm_dbg_kms(dev, "10 BPC but sink doesn't support Deep Color 30.\n");
+ return false;
+ }
+
+ if (bpc == 12 && !(info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)) {
+ drm_dbg_kms(dev, "12 BPC but sink doesn't support Deep Color 36.\n");
+ return false;
+ }
+
+ if (bpc == 16 && !(info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)) {
+ drm_dbg_kms(dev, "16 BPC but sink doesn't support Deep Color 48.\n");
+ return false;
+ }
+
+ drm_dbg_kms(dev, "YUV420 format supported in that configuration.\n");
+
+ return true;
case HDMI_COLORSPACE_YUV422:
drm_dbg_kms(dev, "YUV422 format, checking the constraints.\n");
@@ -545,8 +581,9 @@ hdmi_try_format_bpc(const struct drm_connector *connector,
struct drm_device *dev = connector->dev;
int ret;
- drm_dbg_kms(dev, "Trying %s output format\n",
- drm_hdmi_connector_get_output_format_name(fmt));
+ drm_dbg_kms(dev, "Trying %s output format with %u bpc\n",
+ drm_hdmi_connector_get_output_format_name(fmt),
+ bpc);
if (!sink_supports_format_bpc(connector, info, mode, fmt, bpc)) {
drm_dbg_kms(dev, "%s output format not supported with %u bpc\n",
@@ -563,7 +600,7 @@ hdmi_try_format_bpc(const struct drm_connector *connector,
return false;
}
- drm_dbg_kms(dev, "%s output format supported with %u (TMDS char rate: %llu Hz)\n",
+ drm_dbg_kms(dev, "%s output format supported with %u bpc (TMDS char rate: %llu Hz)\n",
drm_hdmi_connector_get_output_format_name(fmt),
bpc, conn_state->hdmi.tmds_char_rate);
@@ -571,23 +608,35 @@ hdmi_try_format_bpc(const struct drm_connector *connector,
}
static int
-hdmi_compute_format(const struct drm_connector *connector,
- struct drm_connector_state *conn_state,
- const struct drm_display_mode *mode,
- unsigned int bpc)
+hdmi_compute_format_bpc(const struct drm_connector *connector,
+ struct drm_connector_state *conn_state,
+ const struct drm_display_mode *mode,
+ unsigned int max_bpc, enum hdmi_colorspace fmt)
{
struct drm_device *dev = connector->dev;
+ unsigned int bpc;
+ int ret;
+
+ for (bpc = max_bpc; bpc >= 8; bpc -= 2) {
+ ret = hdmi_try_format_bpc(connector, conn_state, mode, bpc, fmt);
+ if (!ret)
+ continue;
+
+ conn_state->hdmi.output_bpc = bpc;
+ conn_state->hdmi.output_format = fmt;
+
+ drm_dbg_kms(dev,
+ "Mode %ux%u @ %uHz: Found configuration: bpc: %u, fmt: %s, clock: %llu\n",
+ mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode),
+ conn_state->hdmi.output_bpc,
+ drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format),
+ conn_state->hdmi.tmds_char_rate);
- /*
- * TODO: Add support for YCbCr420 output for HDMI 2.0 capable
- * devices, for modes that only support YCbCr420.
- */
- if (hdmi_try_format_bpc(connector, conn_state, mode, bpc, HDMI_COLORSPACE_RGB)) {
- conn_state->hdmi.output_format = HDMI_COLORSPACE_RGB;
return 0;
}
- drm_dbg_kms(dev, "Failed. No Format Supported for that bpc count.\n");
+ drm_dbg_kms(dev, "Failed. %s output format not supported for any bpc count.\n",
+ drm_hdmi_connector_get_output_format_name(fmt));
return -EINVAL;
}
@@ -597,33 +646,29 @@ hdmi_compute_config(const struct drm_connector *connector,
struct drm_connector_state *conn_state,
const struct drm_display_mode *mode)
{
- struct drm_device *dev = connector->dev;
unsigned int max_bpc = clamp_t(unsigned int,
conn_state->max_bpc,
8, connector->max_bpc);
- unsigned int bpc;
int ret;
- for (bpc = max_bpc; bpc >= 8; bpc -= 2) {
- drm_dbg_kms(dev, "Trying with a %d bpc output\n", bpc);
-
- ret = hdmi_compute_format(connector, conn_state, mode, bpc);
- if (ret)
- continue;
-
- conn_state->hdmi.output_bpc = bpc;
-
- drm_dbg_kms(dev,
- "Mode %ux%u @ %uHz: Found configuration: bpc: %u, fmt: %s, clock: %llu\n",
- mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode),
- conn_state->hdmi.output_bpc,
- drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format),
- conn_state->hdmi.tmds_char_rate);
-
- return 0;
+ ret = hdmi_compute_format_bpc(connector, conn_state, mode, max_bpc,
+ HDMI_COLORSPACE_RGB);
+ if (ret) {
+ if (connector->ycbcr_420_allowed) {
+ ret = hdmi_compute_format_bpc(connector, conn_state,
+ mode, max_bpc,
+ HDMI_COLORSPACE_YUV420);
+ if (ret)
+ drm_dbg_kms(connector->dev,
+ "YUV420 output format doesn't work.\n");
+ } else {
+ drm_dbg_kms(connector->dev,
+ "YUV420 output format not allowed for connector.\n");
+ ret = -EINVAL;
+ }
}
- return -EINVAL;
+ return ret;
}
static int hdmi_generate_avi_infoframe(const struct drm_connector *connector,
@@ -798,12 +843,12 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
if (!new_conn_state->crtc || !new_conn_state->best_encoder)
return 0;
- new_conn_state->hdmi.is_limited_range = hdmi_is_limited_range(connector, new_conn_state);
-
ret = hdmi_compute_config(connector, new_conn_state, mode);
if (ret)
return ret;
+ new_conn_state->hdmi.is_limited_range = hdmi_is_limited_range(connector, new_conn_state);
+
ret = hdmi_generate_infoframes(connector, new_conn_state);
if (ret)
return ret;
@@ -1081,9 +1126,10 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
const struct drm_edid *drm_edid;
if (status == connector_status_disconnected) {
- // TODO: also handle CEC and scramber, HDMI sink disconnected.
+ // TODO: also handle scramber, HDMI sink disconnected.
drm_connector_hdmi_audio_plugged_notify(connector, false);
drm_edid_connector_update(connector, NULL);
+ drm_connector_cec_phys_addr_invalidate(connector);
return;
}
@@ -1097,8 +1143,9 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
drm_edid_free(drm_edid);
if (status == connector_status_connected) {
- // TODO: also handle CEC and scramber, HDMI sink is now connected.
+ // TODO: also handle scramber, HDMI sink is now connected.
drm_connector_hdmi_audio_plugged_notify(connector, true);
+ drm_connector_cec_phys_addr_set(connector);
}
}
diff --git a/drivers/gpu/drm/display/drm_scdc_helper.c b/drivers/gpu/drm/display/drm_scdc_helper.c
index 6d2f244e5830..df878aad4a36 100644
--- a/drivers/gpu/drm/display/drm_scdc_helper.c
+++ b/drivers/gpu/drm/display/drm_scdc_helper.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 0138cf0b8b63..cd15cf52f0c9 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -26,7 +26,7 @@
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
-
+#include <linux/export.h>
#include <linux/sync_file.h>
#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ee64ca1b1bec..ef56b474acf5 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -25,6 +25,7 @@
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
+#include <linux/export.h>
#include <linux/dma-fence.h>
#include <linux/ktime.h>
@@ -1160,11 +1161,10 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
}
static void
-disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
+encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
- struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
@@ -1224,9 +1224,15 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
else if (funcs->dpms)
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
-
- drm_atomic_bridge_chain_post_disable(bridge, state);
}
+}
+
+static void
+crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
@@ -1274,6 +1280,68 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
}
}
+static void
+encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
+
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+
+ /*
+ * Shut down everything that's in the changeset and currently
+ * still on. So need to check the old, saved state.
+ */
+ if (!old_conn_state->crtc)
+ continue;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
+
+ if (new_conn_state->crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_conn_state->crtc);
+ else
+ new_crtc_state = NULL;
+
+ if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
+ !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
+ continue;
+
+ encoder = old_conn_state->best_encoder;
+
+ /*
+ * We shouldn't get this far if we didn't previously have
+ * an encoder.. but WARN_ON() rather than explode.
+ */
+ if (WARN_ON(!encoder))
+ continue;
+
+ drm_dbg_atomic(dev, "post-disabling bridges [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call disable hooks twice.
+ */
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_atomic_bridge_chain_post_disable(bridge, state);
+ }
+}
+
+static void
+disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ encoder_bridge_disable(dev, state);
+
+ crtc_disable(dev, state);
+
+ encoder_bridge_post_disable(dev, state);
+}
+
/**
* drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
* @dev: DRM device
@@ -1483,28 +1551,44 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
}
}
-/**
- * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
- * @dev: DRM device
- * @state: atomic state object being committed
- *
- * This function enables all the outputs with the new configuration which had to
- * be turned off for the update.
- *
- * For compatibility with legacy CRTC helpers this should be called after
- * drm_atomic_helper_commit_planes(), which is what the default commit function
- * does. But drivers with different needs can group the modeset commits together
- * and do the plane commits at the end. This is useful for drivers doing runtime
- * PM since planes updates then only happen when the CRTC is actually enabled.
- */
-void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
- struct drm_atomic_state *state)
+static void
+encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
+
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+
+ if (!new_conn_state->best_encoder)
+ continue;
+
+ if (!new_conn_state->crtc->state->active ||
+ !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
+ continue;
+
+ encoder = new_conn_state->best_encoder;
+
+ drm_dbg_atomic(dev, "pre-enabling bridges [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call enable hooks twice.
+ */
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_atomic_bridge_chain_pre_enable(bridge, state);
+ }
+}
+
+static void
+crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
- struct drm_connector *connector;
- struct drm_connector_state *new_conn_state;
int i;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -1528,6 +1612,14 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
funcs->commit(crtc);
}
}
+}
+
+static void
+encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
@@ -1552,7 +1644,6 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
* it away), so we won't call enable hooks twice.
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
- drm_atomic_bridge_chain_pre_enable(bridge, state);
if (funcs) {
if (funcs->atomic_enable)
@@ -1565,6 +1656,30 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
drm_atomic_bridge_chain_enable(bridge, state);
}
+}
+
+/**
+ * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This function enables all the outputs with the new configuration which had to
+ * be turned off for the update.
+ *
+ * For compatibility with legacy CRTC helpers this should be called after
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
+ */
+void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ encoder_bridge_pre_enable(dev, state);
+
+ crtc_enable(dev, state);
+
+ encoder_bridge_enable(dev, state);
drm_atomic_helper_commit_writebacks(dev, state);
}
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 519228eb1095..7142e163e618 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -37,6 +37,7 @@
#include <drm/drm_vblank.h>
#include <drm/drm_writeback.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/dma-fence.h>
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index c2726af6698e..ecc73d52bfae 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -36,6 +36,7 @@
#include <drm/drm_writeback.h>
#include <drm/drm_vblank.h>
+#include <linux/export.h>
#include <linux/dma-fence.h>
#include <linux/uaccess.h>
#include <linux/sync_file.h>
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 22aa015df387..a2556d16bed6 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -28,6 +28,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <drm/drm_auth.h>
@@ -95,7 +96,7 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
struct drm_auth *auth = data;
int ret = 0;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
if (!file_priv->magic) {
ret = idr_alloc(&file_priv->master->magic_map, file_priv,
1, 0, GFP_KERNEL);
@@ -103,7 +104,6 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
file_priv->magic = ret;
}
auth->magic = file_priv->magic;
- mutex_unlock(&dev->master_mutex);
drm_dbg_core(dev, "%u\n", auth->magic);
@@ -118,13 +118,12 @@ int drm_authmagic(struct drm_device *dev, void *data,
drm_dbg_core(dev, "%u\n", auth->magic);
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
file = idr_find(&file_priv->master->magic_map, auth->magic);
if (file) {
file->authenticated = 1;
idr_replace(&file_priv->master->magic_map, NULL, auth->magic);
}
- mutex_unlock(&dev->master_mutex);
return file ? 0 : -EINVAL;
}
@@ -248,41 +247,33 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
{
int ret;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
ret = drm_master_check_perm(dev, file_priv);
if (ret)
- goto out_unlock;
+ return ret;
if (drm_is_current_master_locked(file_priv))
- goto out_unlock;
+ return ret;
- if (dev->master) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ if (dev->master)
+ return -EBUSY;
- if (!file_priv->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!file_priv->master)
+ return -EINVAL;
- if (!file_priv->is_master) {
- ret = drm_new_set_master(dev, file_priv);
- goto out_unlock;
- }
+ if (!file_priv->is_master)
+ return drm_new_set_master(dev, file_priv);
if (file_priv->master->lessor != NULL) {
drm_dbg_lease(dev,
"Attempt to set lessee %d as master\n",
file_priv->master->lessee_id);
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
drm_set_master(dev, file_priv, false);
-out_unlock:
- mutex_unlock(&dev->master_mutex);
+
return ret;
}
@@ -299,33 +290,27 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
{
int ret;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
ret = drm_master_check_perm(dev, file_priv);
if (ret)
- goto out_unlock;
+ return ret;
- if (!drm_is_current_master_locked(file_priv)) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!drm_is_current_master_locked(file_priv))
+ return -EINVAL;
- if (!dev->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!dev->master)
+ return -EINVAL;
if (file_priv->master->lessor != NULL) {
drm_dbg_lease(dev,
"Attempt to drop lessee %d as master\n",
file_priv->master->lessee_id);
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
drm_drop_master(dev, file_priv);
-out_unlock:
- mutex_unlock(&dev->master_mutex);
+
return ret;
}
@@ -337,7 +322,7 @@ int drm_master_open(struct drm_file *file_priv)
/* if there is no current master make this fd it, but do not create
* any master object for render clients
*/
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
if (!dev->master) {
ret = drm_new_set_master(dev, file_priv);
} else {
@@ -345,7 +330,6 @@ int drm_master_open(struct drm_file *file_priv)
file_priv->master = drm_master_get(dev->master);
spin_unlock(&file_priv->master_lookup_lock);
}
- mutex_unlock(&dev->master_mutex);
return ret;
}
@@ -355,7 +339,7 @@ void drm_master_release(struct drm_file *file_priv)
struct drm_device *dev = file_priv->minor->dev;
struct drm_master *master;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
master = file_priv->master;
if (file_priv->magic)
idr_remove(&file_priv->master->magic_map, file_priv->magic);
@@ -376,7 +360,6 @@ out:
/* drop the master reference held by the file priv */
if (file_priv->master)
drm_master_put(&file_priv->master);
- mutex_unlock(&dev->master_mutex);
}
/**
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index b4c89ec01998..dd45d9b504d8 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -23,6 +23,7 @@
#include <linux/debugfs.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -203,6 +204,8 @@ static void __drm_bridge_free(struct kref *kref)
{
struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
+ if (bridge->funcs->destroy)
+ bridge->funcs->destroy(bridge);
kfree(bridge->container);
}
@@ -292,6 +295,11 @@ EXPORT_SYMBOL(__devm_drm_bridge_alloc);
*/
void drm_bridge_add(struct drm_bridge *bridge)
{
+ if (!bridge->container)
+ DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
+
+ drm_bridge_get(bridge);
+
mutex_init(&bridge->hpd_mutex);
if (bridge->ops & DRM_BRIDGE_OP_HDMI)
@@ -339,6 +347,8 @@ void drm_bridge_remove(struct drm_bridge *bridge)
mutex_unlock(&bridge_lock);
mutex_destroy(&bridge->hpd_mutex);
+
+ drm_bridge_put(bridge);
}
EXPORT_SYMBOL(drm_bridge_remove);
@@ -404,11 +414,17 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
if (!encoder || !bridge)
return -EINVAL;
- if (previous && (!previous->dev || previous->encoder != encoder))
- return -EINVAL;
+ drm_bridge_get(bridge);
- if (bridge->dev)
- return -EBUSY;
+ if (previous && (!previous->dev || previous->encoder != encoder)) {
+ ret = -EINVAL;
+ goto err_put_bridge;
+ }
+
+ if (bridge->dev) {
+ ret = -EBUSY;
+ goto err_put_bridge;
+ }
bridge->dev = encoder->dev;
bridge->encoder = encoder;
@@ -457,6 +473,8 @@ err_reset_bridge:
"failed to attach bridge %pOF to encoder %s\n",
bridge->of_node, encoder->name);
+err_put_bridge:
+ drm_bridge_put(bridge);
return ret;
}
EXPORT_SYMBOL(drm_bridge_attach);
@@ -477,6 +495,7 @@ void drm_bridge_detach(struct drm_bridge *bridge)
list_del(&bridge->chain_node);
bridge->dev = NULL;
+ drm_bridge_put(bridge);
}
/**
@@ -1218,12 +1237,13 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
* The detection status on success, or connector_status_unknown if the bridge
* doesn't support output detection.
*/
-enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge)
+enum drm_connector_status
+drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
return connector_status_unknown;
- return bridge->funcs->detect(bridge);
+ return bridge->funcs->detect(bridge, connector);
}
EXPORT_SYMBOL_GPL(drm_bridge_detect);
@@ -1392,6 +1412,23 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
+/**
+ * devm_drm_put_bridge - Release a bridge reference obtained via devm
+ * @dev: device that got the bridge via devm
+ * @bridge: pointer to a struct drm_bridge obtained via devm
+ *
+ * Same as drm_bridge_put() for bridge pointers obtained via devm functions
+ * such as devm_drm_bridge_alloc().
+ *
+ * This function is a temporary workaround and MUST NOT be used. Manual
+ * handling of bridge lifetime is inherently unsafe.
+ */
+void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge)
+{
+ devm_release_action(dev, drm_bridge_put_void, bridge);
+}
+EXPORT_SYMBOL(devm_drm_put_bridge);
+
static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
struct drm_bridge *bridge,
unsigned int idx)
diff --git a/drivers/gpu/drm/drm_bridge_helper.c b/drivers/gpu/drm/drm_bridge_helper.c
index af80d2496194..420f29cf3e54 100644
--- a/drivers/gpu/drm/drm_bridge_helper.c
+++ b/drivers/gpu/drm/drm_bridge_helper.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 66aff35f8647..a94061f373de 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -5,6 +5,7 @@
#include <kunit/test-bug.h>
+#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/module.h>
#include <linux/sizes.h>
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index f1de7faf9fb4..3fa38d4ac70b 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -3,6 +3,7 @@
* Copyright 2018 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/iosys-map.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -303,34 +304,17 @@ EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
-int
-drm_client_buffer_vmap(struct drm_client_buffer *buffer,
- struct iosys_map *map_copy)
+int drm_client_buffer_vmap(struct drm_client_buffer *buffer,
+ struct iosys_map *map_copy)
{
- struct drm_gem_object *gem = buffer->gem;
- struct iosys_map *map = &buffer->map;
int ret;
- drm_gem_lock(gem);
-
- ret = drm_gem_pin_locked(gem);
- if (ret)
- goto err_drm_gem_pin_locked;
- ret = drm_gem_vmap_locked(gem, map);
+ ret = drm_gem_vmap(buffer->gem, &buffer->map);
if (ret)
- goto err_drm_gem_vmap;
-
- drm_gem_unlock(gem);
-
- *map_copy = *map;
+ return ret;
+ *map_copy = buffer->map;
return 0;
-
-err_drm_gem_vmap:
- drm_gem_unpin_locked(buffer->gem);
-err_drm_gem_pin_locked:
- drm_gem_unlock(gem);
- return ret;
}
EXPORT_SYMBOL(drm_client_buffer_vmap);
@@ -344,13 +328,7 @@ EXPORT_SYMBOL(drm_client_buffer_vmap);
*/
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{
- struct drm_gem_object *gem = buffer->gem;
- struct iosys_map *map = &buffer->map;
-
- drm_gem_lock(gem);
- drm_gem_vunmap_locked(gem, map);
- drm_gem_unpin_locked(gem);
- drm_gem_unlock(gem);
+ drm_gem_vunmap(buffer->gem, &buffer->map);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap);
diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c
index bd93cd93d519..c83196ad8b59 100644
--- a/drivers/gpu/drm/drm_client_event.c
+++ b/drivers/gpu/drm/drm_client_event.c
@@ -3,6 +3,7 @@
* Copyright 2018 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 0f9d5ba36c81..9c2c3b0c8c47 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -8,6 +8,8 @@
*/
#include "drm/drm_modeset_lock.h"
+
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 3969dc548cff..37a3270bc3c2 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/uaccess.h>
#include <drm/drm_atomic.h>
@@ -28,6 +29,7 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
+#include <kunit/visibility.h>
#include "drm_crtc_internal.h"
@@ -494,6 +496,7 @@ const char *drm_get_color_encoding_name(enum drm_color_encoding encoding)
return color_encoding_name[encoding];
}
+EXPORT_SYMBOL_IF_KUNIT(drm_get_color_encoding_name);
/**
* drm_get_color_range_name - return a string for color range
@@ -509,6 +512,7 @@ const char *drm_get_color_range_name(enum drm_color_range range)
return color_range_name[range];
}
+EXPORT_SYMBOL_IF_KUNIT(drm_get_color_range_name);
/**
* drm_plane_create_color_properties - color encoding related plane properties
@@ -630,3 +634,209 @@ int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests)
return 0;
}
EXPORT_SYMBOL(drm_color_lut_check);
+
+/*
+ * Gamma-LUT programming
+ */
+
+/**
+ * drm_crtc_load_gamma_888 - Programs gamma ramp for RGB888-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The gamma ramp to program
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs the gamma ramp specified in @lut to hardware. The input gamma
+ * ramp must have 256 entries per color component.
+ */
+void drm_crtc_load_gamma_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ set_gamma(crtc, i, lut[i].red, lut[i].green, lut[i].blue);
+}
+EXPORT_SYMBOL(drm_crtc_load_gamma_888);
+
+/**
+ * drm_crtc_load_gamma_565_from_888 - Programs gamma ramp for RGB565-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The gamma ramp to program
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs the gamma ramp specified in @lut to hardware. The input gamma
+ * ramp must have 256 entries per color component. The helper interpolates
+ * the individual color components to reduce the number of entries to 5/6/5.
+ */
+void drm_crtc_load_gamma_565_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+ u16 r, g, b;
+
+ for (i = 0; i < 32; ++i) {
+ r = lut[i * 8 + i / 4].red;
+ g = lut[i * 4 + i / 16].green;
+ b = lut[i * 8 + i / 4].blue;
+ set_gamma(crtc, i, r, g, b);
+ }
+ /* Green has one more bit, so add padding with 0 for red and blue. */
+ for (i = 32; i < 64; ++i) {
+ g = lut[i * 4 + i / 16].green;
+ set_gamma(crtc, i, 0, g, 0);
+ }
+}
+EXPORT_SYMBOL(drm_crtc_load_gamma_565_from_888);
+
+/**
+ * drm_crtc_load_gamma_555_from_888 - Programs gamma ramp for RGB555-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The gamma ramp to program
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs the gamma ramp specified in @lut to hardware. The input gamma
+ * ramp must have 256 entries per color component. The helper interpolates
+ * the individual color components to reduce the number of entries to 5/5/5.
+ */
+void drm_crtc_load_gamma_555_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+ u16 r, g, b;
+
+ for (i = 0; i < 32; ++i) {
+ r = lut[i * 8 + i / 4].red;
+ g = lut[i * 8 + i / 4].green;
+ b = lut[i * 8 + i / 4].blue;
+ set_gamma(crtc, i, r, g, b);
+ }
+}
+EXPORT_SYMBOL(drm_crtc_load_gamma_555_from_888);
+
+static void fill_gamma_888(struct drm_crtc *crtc, unsigned int i, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_gamma)
+{
+ r = (r << 8) | r;
+ g = (g << 8) | g;
+ b = (b << 8) | b;
+
+ set_gamma(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_gamma_888 - Programs a default gamma ramp for RGB888-like formats
+ * @crtc: The displaying CRTC
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default gamma ramp to hardware.
+ */
+void drm_crtc_fill_gamma_888(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ fill_gamma_888(crtc, i, i, i, i, set_gamma);
+}
+EXPORT_SYMBOL(drm_crtc_fill_gamma_888);
+
+static void fill_gamma_565(struct drm_crtc *crtc, unsigned int i, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_gamma)
+{
+ r = (r << 11) | (r << 6) | (r << 1) | (r >> 4);
+ g = (g << 10) | (g << 4) | (g >> 2);
+ b = (b << 11) | (b << 6) | (b << 1) | (b >> 4);
+
+ set_gamma(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_gamma_565 - Programs a default gamma ramp for RGB565-like formats
+ * @crtc: The displaying CRTC
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default gamma ramp to hardware.
+ */
+void drm_crtc_fill_gamma_565(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 32; ++i)
+ fill_gamma_565(crtc, i, i, i, i, set_gamma);
+ /* Green has one more bit, so add padding with 0 for red and blue. */
+ for (i = 32; i < 64; ++i)
+ fill_gamma_565(crtc, i, 0, i, 0, set_gamma);
+}
+EXPORT_SYMBOL(drm_crtc_fill_gamma_565);
+
+static void fill_gamma_555(struct drm_crtc *crtc, unsigned int i, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_gamma)
+{
+ r = (r << 11) | (r << 6) | (r << 1) | (r >> 4);
+ g = (g << 11) | (g << 6) | (g << 1) | (g >> 4);
+ b = (b << 11) | (b << 6) | (b << 1) | (r >> 4);
+
+ set_gamma(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_gamma_555 - Programs a default gamma ramp for RGB555-like formats
+ * @crtc: The displaying CRTC
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default gamma ramp to hardware.
+ */
+void drm_crtc_fill_gamma_555(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 32; ++i)
+ fill_gamma_555(crtc, i, i, i, i, set_gamma);
+}
+EXPORT_SYMBOL(drm_crtc_fill_gamma_555);
+
+/*
+ * Color-LUT programming
+ */
+
+/**
+ * drm_crtc_load_palette_8 - Programs palette for C8-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The palette to program
+ * @set_palette: Callback for programming the hardware palette
+ *
+ * Programs the palette specified in @lut to hardware. The input palette
+ * must have 256 entries per color component.
+ */
+void drm_crtc_load_palette_8(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_palette)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ set_palette(crtc, i, lut[i].red, lut[i].green, lut[i].blue);
+}
+EXPORT_SYMBOL(drm_crtc_load_palette_8);
+
+static void fill_palette_8(struct drm_crtc *crtc, unsigned int i,
+ drm_crtc_set_lut_func set_palette)
+{
+ u16 Y = (i << 8) | i; // relative luminance
+
+ set_palette(crtc, i, Y, Y, Y);
+}
+
+/**
+ * drm_crtc_fill_palette_8 - Programs a default palette for C8-like formats
+ * @crtc: The displaying CRTC
+ * @set_palette: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default palette to hardware.
+ */
+void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ fill_palette_8(crtc, i, set_palette);
+}
+EXPORT_SYMBOL(drm_crtc_fill_palette_8);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 48b08c9611a7..272d6254ea47 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -33,6 +33,7 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_utils.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/uaccess.h>
@@ -279,6 +280,7 @@ static int drm_connector_init_only(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
+ mutex_init(&connector->cec.mutex);
mutex_init(&connector->eld_mutex);
mutex_init(&connector->edid_override_mutex);
mutex_init(&connector->hdmi.infoframes.lock);
@@ -702,6 +704,46 @@ static void drm_mode_remove(struct drm_connector *connector,
}
/**
+ * drm_connector_cec_phys_addr_invalidate - invalidate CEC physical address
+ * @connector: connector undergoing CEC operation
+ *
+ * Invalidated CEC physical address set for this DRM connector.
+ */
+void drm_connector_cec_phys_addr_invalidate(struct drm_connector *connector)
+{
+ mutex_lock(&connector->cec.mutex);
+
+ if (connector->cec.funcs &&
+ connector->cec.funcs->phys_addr_invalidate)
+ connector->cec.funcs->phys_addr_invalidate(connector);
+
+ mutex_unlock(&connector->cec.mutex);
+}
+EXPORT_SYMBOL(drm_connector_cec_phys_addr_invalidate);
+
+/**
+ * drm_connector_cec_phys_addr_set - propagate CEC physical address
+ * @connector: connector undergoing CEC operation
+ *
+ * Propagate CEC physical address from the display_info to this DRM connector.
+ */
+void drm_connector_cec_phys_addr_set(struct drm_connector *connector)
+{
+ u16 addr;
+
+ mutex_lock(&connector->cec.mutex);
+
+ addr = connector->display_info.source_physical_address;
+
+ if (connector->cec.funcs &&
+ connector->cec.funcs->phys_addr_set)
+ connector->cec.funcs->phys_addr_set(connector, addr);
+
+ mutex_unlock(&connector->cec.mutex);
+}
+EXPORT_SYMBOL(drm_connector_cec_phys_addr_set);
+
+/**
* drm_connector_cleanup - cleans up an initialised connector
* @connector: connector to cleanup
*
@@ -1645,7 +1687,7 @@ EXPORT_SYMBOL(drm_hdmi_connector_get_output_format_name);
* structure from userspace. This is received as blob and stored in
* &drm_connector_state.hdr_output_metadata. It parses EDID and saves the
* sink metadata in &struct hdr_sink_metadata, as
- * &drm_connector.hdr_sink_metadata. Driver uses
+ * &drm_connector.display_info.hdr_sink_metadata. Driver uses
* drm_hdmi_infoframe_set_hdr_metadata() helper to set the HDR metadata,
* hdmi_drm_infoframe_pack() to pack the infoframe as per spec, in case of
* HDMI encoder.
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index 44a5a36806e3..6a49e7a0ab84 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -30,6 +30,8 @@
*
**************************************************************************/
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 3dfd8b34dceb..365cf337529f 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -44,6 +44,9 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
+static struct dentry *accel_debugfs_root;
+static struct dentry *drm_debugfs_root;
+
/***************************************************
* Initialization, etc.
**************************************************/
@@ -77,14 +80,15 @@ static int drm_clients_info(struct seq_file *m, void *data)
kuid_t uid;
seq_printf(m,
- "%20s %5s %3s master a %5s %10s %*s\n",
+ "%20s %5s %3s master a %5s %10s %*s %20s\n",
"command",
"tgid",
"dev",
"uid",
"magic",
DRM_CLIENT_NAME_MAX_LEN,
- "name");
+ "name",
+ "id");
/* dev->filelist is sorted youngest first, but we want to present
* oldest first (i.e. kernel, servers, clients), so walk backwardss.
@@ -100,7 +104,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
pid = rcu_dereference(priv->pid);
task = pid_task(pid, PIDTYPE_TGID);
uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
- seq_printf(m, "%20s %5d %3d %c %c %5d %10u %*s\n",
+ seq_printf(m, "%20s %5d %3d %c %c %5d %10u %*s %20llu\n",
task ? task->comm : "<unknown>",
pid_vnr(pid),
priv->minor->index,
@@ -109,7 +113,8 @@ static int drm_clients_info(struct seq_file *m, void *data)
from_kuid_munged(seq_user_ns(m), uid),
priv->magic,
DRM_CLIENT_NAME_MAX_LEN,
- priv->client_name ? priv->client_name : "<unset>");
+ priv->client_name ? priv->client_name : "<unset>",
+ priv->client_id);
rcu_read_unlock();
mutex_unlock(&priv->client_name_lock);
}
@@ -285,16 +290,120 @@ int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
}
EXPORT_SYMBOL(drm_debugfs_remove_files);
+void drm_debugfs_bridge_params(void)
+{
+ drm_bridge_debugfs_params(drm_debugfs_root);
+}
+
+void drm_debugfs_init_root(void)
+{
+ drm_debugfs_root = debugfs_create_dir("dri", NULL);
+#if IS_ENABLED(CONFIG_DRM_ACCEL)
+ accel_debugfs_root = debugfs_create_dir("accel", NULL);
+#endif
+}
+
+void drm_debugfs_remove_root(void)
+{
+#if IS_ENABLED(CONFIG_DRM_ACCEL)
+ debugfs_remove(accel_debugfs_root);
+#endif
+ debugfs_remove(drm_debugfs_root);
+}
+
+static int drm_debugfs_proc_info_show(struct seq_file *m, void *unused)
+{
+ struct pid *pid;
+ struct task_struct *task;
+ struct drm_file *file = m->private;
+
+ if (!file)
+ return -EINVAL;
+
+ rcu_read_lock();
+ pid = rcu_dereference(file->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+
+ seq_printf(m, "pid: %d\n", task ? task->pid : 0);
+ seq_printf(m, "comm: %s\n", task ? task->comm : "Unset");
+ rcu_read_unlock();
+ return 0;
+}
+
+static int drm_debufs_proc_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, drm_debugfs_proc_info_show, inode->i_private);
+}
+
+static const struct file_operations drm_debugfs_proc_info_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_debufs_proc_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * drm_debugfs_clients_add - Add a per client debugfs directory
+ * @file: drm_file for a client
+ *
+ * Create the debugfs directory for each client. This will be used to populate
+ * driver specific data for each client.
+ *
+ * Also add the process information debugfs file for each client to tag
+ * which client belongs to which process.
+ */
+void drm_debugfs_clients_add(struct drm_file *file)
+{
+ char *client;
+
+ client = kasprintf(GFP_KERNEL, "client-%llu", file->client_id);
+ if (!client)
+ return;
+
+ /* Create a debugfs directory for the client in root on drm debugfs */
+ file->debugfs_client = debugfs_create_dir(client, drm_debugfs_root);
+ kfree(client);
+
+ debugfs_create_file("proc_info", 0444, file->debugfs_client, file,
+ &drm_debugfs_proc_info_fops);
+
+ client = kasprintf(GFP_KERNEL, "../%s", file->minor->dev->unique);
+ if (!client)
+ return;
+
+ /* Create a link from client_id to the drm device this client id belongs to */
+ debugfs_create_symlink("device", file->debugfs_client, client);
+ kfree(client);
+}
+
+/**
+ * drm_debugfs_clients_remove - removes all debugfs directories and files
+ * @file: drm_file for a client
+ *
+ * Removes the debugfs directories recursively from the client directory.
+ *
+ * There is also a possibility that debugfs files are open while the drm_file
+ * is released.
+ */
+void drm_debugfs_clients_remove(struct drm_file *file)
+{
+ debugfs_remove_recursive(file->debugfs_client);
+ file->debugfs_client = NULL;
+}
+
/**
* drm_debugfs_dev_init - create debugfs directory for the device
* @dev: the device which we want to create the directory for
- * @root: the parent directory depending on the device type
*
* Creates the debugfs directory for the device under the given root directory.
*/
-void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root)
+void drm_debugfs_dev_init(struct drm_device *dev)
{
- dev->debugfs_root = debugfs_create_dir(dev->unique, root);
+ if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ dev->debugfs_root = debugfs_create_dir(dev->unique, accel_debugfs_root);
+ else
+ dev->debugfs_root = debugfs_create_dir(dev->unique, drm_debugfs_root);
}
/**
@@ -321,14 +430,13 @@ void drm_debugfs_dev_register(struct drm_device *dev)
drm_atomic_debugfs_init(dev);
}
-int drm_debugfs_register(struct drm_minor *minor, int minor_id,
- struct dentry *root)
+int drm_debugfs_register(struct drm_minor *minor, int minor_id)
{
struct drm_device *dev = minor->dev;
char name[64];
sprintf(name, "%d", minor_id);
- minor->debugfs_symlink = debugfs_create_symlink(name, root,
+ minor->debugfs_symlink = debugfs_create_symlink(name, drm_debugfs_root,
dev->unique);
/* TODO: Only for compatibility with drivers */
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index bbc3bc4ba844..6b43b1cf2327 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -29,6 +29,7 @@
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 56dd61f8e05a..cdd591b11488 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -29,11 +29,13 @@
#include <linux/bitops.h>
#include <linux/cgroup_dmem.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/sprintf.h>
#include <linux/srcu.h>
@@ -70,8 +72,6 @@ DEFINE_XARRAY_ALLOC(drm_minors_xa);
*/
static bool drm_core_init_complete;
-static struct dentry *drm_debugfs_root;
-
DEFINE_STATIC_SRCU(drm_unplug_srcu);
/*
@@ -184,8 +184,7 @@ static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
return 0;
if (minor->type != DRM_MINOR_ACCEL) {
- ret = drm_debugfs_register(minor, minor->index,
- drm_debugfs_root);
+ ret = drm_debugfs_register(minor, minor->index);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
goto err_debugfs;
@@ -538,10 +537,15 @@ static const char *drm_get_wedge_recovery(unsigned int opt)
}
}
+#define WEDGE_STR_LEN 32
+#define PID_STR_LEN 15
+#define COMM_STR_LEN (TASK_COMM_LEN + 5)
+
/**
* drm_dev_wedged_event - generate a device wedged uevent
* @dev: DRM device
* @method: method(s) to be used for recovery
+ * @info: optional information about the guilty task
*
* This generates a device wedged uevent for the DRM device specified by @dev.
* Recovery @method\(s) of choice will be sent in the uevent environment as
@@ -554,13 +558,13 @@ static const char *drm_get_wedge_recovery(unsigned int opt)
*
* Returns: 0 on success, negative error code otherwise.
*/
-int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
+int drm_dev_wedged_event(struct drm_device *dev, unsigned long method,
+ struct drm_wedge_task_info *info)
{
+ char event_string[WEDGE_STR_LEN], pid_string[PID_STR_LEN], comm_string[COMM_STR_LEN];
+ char *envp[] = { event_string, NULL, NULL, NULL };
const char *recovery = NULL;
unsigned int len, opt;
- /* Event string length up to 28+ characters with available methods */
- char event_string[32];
- char *envp[] = { event_string, NULL };
len = scnprintf(event_string, sizeof(event_string), "%s", "WEDGED=");
@@ -582,6 +586,13 @@ int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
drm_info(dev, "device wedged, %s\n", method == DRM_WEDGE_RECOVERY_NONE ?
"but recovered through reset" : "needs recovery");
+ if (info && (info->comm[0] != '\0') && (info->pid >= 0)) {
+ snprintf(pid_string, sizeof(pid_string), "PID=%u", info->pid);
+ snprintf(comm_string, sizeof(comm_string), "TASK=%s", info->comm);
+ envp[1] = pid_string;
+ envp[2] = comm_string;
+ }
+
return kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(drm_dev_wedged_event);
@@ -773,10 +784,7 @@ static int drm_dev_init(struct drm_device *dev,
goto err;
}
- if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
- accel_debugfs_init(dev);
- else
- drm_debugfs_dev_init(dev, drm_debugfs_root);
+ drm_debugfs_dev_init(dev);
return 0;
@@ -1216,7 +1224,7 @@ static void drm_core_exit(void)
drm_panic_exit();
accel_core_exit();
unregister_chrdev(DRM_MAJOR, "drm");
- debugfs_remove(drm_debugfs_root);
+ drm_debugfs_remove_root();
drm_sysfs_destroy();
WARN_ON(!xa_empty(&drm_minors_xa));
drm_connector_ida_destroy();
@@ -1235,8 +1243,8 @@ static int __init drm_core_init(void)
goto error;
}
- drm_debugfs_root = debugfs_create_dir("dri", NULL);
- drm_bridge_debugfs_params(drm_debugfs_root);
+ drm_debugfs_init_root();
+ drm_debugfs_bridge_params();
ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
if (ret < 0)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 74e77742b2bd..e2e85345aa9a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -31,6 +31,7 @@
#include <linux/bitfield.h>
#include <linux/byteorder/generic.h>
#include <linux/cec.h>
+#include <linux/export.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
@@ -66,34 +67,36 @@ static int oui(u8 first, u8 second, u8 third)
* on as many displays as possible).
*/
-/* First detailed mode wrong, use largest 60Hz mode */
-#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
-/* Reported 135MHz pixel clock is too high, needs adjustment */
-#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
-/* Prefer the largest mode at 75 Hz */
-#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
-/* Detail timing is in cm not mm */
-#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
-/* Detailed timing descriptors have bogus size values, so just take the
- * maximum size and use that.
- */
-#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
-/* use +hsync +vsync for detailed mode */
-#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
-/* Force reduced-blanking timings for detailed modes */
-#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
-/* Force 8bpc */
-#define EDID_QUIRK_FORCE_8BPC (1 << 8)
-/* Force 12bpc */
-#define EDID_QUIRK_FORCE_12BPC (1 << 9)
-/* Force 6bpc */
-#define EDID_QUIRK_FORCE_6BPC (1 << 10)
-/* Force 10bpc */
-#define EDID_QUIRK_FORCE_10BPC (1 << 11)
-/* Non desktop display (i.e. HMD) */
-#define EDID_QUIRK_NON_DESKTOP (1 << 12)
-/* Cap the DSC target bitrate to 15bpp */
-#define EDID_QUIRK_CAP_DSC_15BPP (1 << 13)
+enum drm_edid_internal_quirk {
+ /* First detailed mode wrong, use largest 60Hz mode */
+ EDID_QUIRK_PREFER_LARGE_60 = DRM_EDID_QUIRK_NUM,
+ /* Reported 135MHz pixel clock is too high, needs adjustment */
+ EDID_QUIRK_135_CLOCK_TOO_HIGH,
+ /* Prefer the largest mode at 75 Hz */
+ EDID_QUIRK_PREFER_LARGE_75,
+ /* Detail timing is in cm not mm */
+ EDID_QUIRK_DETAILED_IN_CM,
+ /* Detailed timing descriptors have bogus size values, so just take the
+ * maximum size and use that.
+ */
+ EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE,
+ /* use +hsync +vsync for detailed mode */
+ EDID_QUIRK_DETAILED_SYNC_PP,
+ /* Force reduced-blanking timings for detailed modes */
+ EDID_QUIRK_FORCE_REDUCED_BLANKING,
+ /* Force 8bpc */
+ EDID_QUIRK_FORCE_8BPC,
+ /* Force 12bpc */
+ EDID_QUIRK_FORCE_12BPC,
+ /* Force 6bpc */
+ EDID_QUIRK_FORCE_6BPC,
+ /* Force 10bpc */
+ EDID_QUIRK_FORCE_10BPC,
+ /* Non desktop display (i.e. HMD) */
+ EDID_QUIRK_NON_DESKTOP,
+ /* Cap the DSC target bitrate to 15bpp */
+ EDID_QUIRK_CAP_DSC_15BPP,
+};
#define MICROSOFT_IEEE_OUI 0xca125c
@@ -128,124 +131,132 @@ static const struct edid_quirk {
u32 quirks;
} edid_quirk_list[] = {
/* Acer AL1706 */
- EDID_QUIRK('A', 'C', 'R', 44358, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('A', 'C', 'R', 44358, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Acer F51 */
- EDID_QUIRK('A', 'P', 'I', 0x7602, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('A', 'P', 'I', 0x7602, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('A', 'E', 'O', 0, BIT(EDID_QUIRK_FORCE_6BPC)),
/* BenQ GW2765 */
- EDID_QUIRK('B', 'N', 'Q', 0x78d6, EDID_QUIRK_FORCE_8BPC),
+ EDID_QUIRK('B', 'N', 'Q', 0x78d6, BIT(EDID_QUIRK_FORCE_8BPC)),
/* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('B', 'O', 'E', 0x78b, BIT(EDID_QUIRK_FORCE_6BPC)),
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('C', 'P', 'T', 0x17df, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('C', 'P', 'T', 0x17df, BIT(EDID_QUIRK_FORCE_6BPC)),
/* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('S', 'D', 'C', 0x3652, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('S', 'D', 'C', 0x3652, BIT(EDID_QUIRK_FORCE_6BPC)),
/* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
- EDID_QUIRK('B', 'O', 'E', 0x0771, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('B', 'O', 'E', 0x0771, BIT(EDID_QUIRK_FORCE_6BPC)),
/* Belinea 10 15 55 */
- EDID_QUIRK('M', 'A', 'X', 1516, EDID_QUIRK_PREFER_LARGE_60),
- EDID_QUIRK('M', 'A', 'X', 0x77e, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('M', 'A', 'X', 1516, BIT(EDID_QUIRK_PREFER_LARGE_60)),
+ EDID_QUIRK('M', 'A', 'X', 0x77e, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Envision Peripherals, Inc. EN-7100e */
- EDID_QUIRK('E', 'P', 'I', 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH),
+ EDID_QUIRK('E', 'P', 'I', 59264, BIT(EDID_QUIRK_135_CLOCK_TOO_HIGH)),
/* Envision EN2028 */
- EDID_QUIRK('E', 'P', 'I', 8232, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('E', 'P', 'I', 8232, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Funai Electronics PM36B */
- EDID_QUIRK('F', 'C', 'M', 13600, EDID_QUIRK_PREFER_LARGE_75 |
- EDID_QUIRK_DETAILED_IN_CM),
+ EDID_QUIRK('F', 'C', 'M', 13600, BIT(EDID_QUIRK_PREFER_LARGE_75) |
+ BIT(EDID_QUIRK_DETAILED_IN_CM)),
/* LG 27GP950 */
- EDID_QUIRK('G', 'S', 'M', 0x5bbf, EDID_QUIRK_CAP_DSC_15BPP),
+ EDID_QUIRK('G', 'S', 'M', 0x5bbf, BIT(EDID_QUIRK_CAP_DSC_15BPP)),
/* LG 27GN950 */
- EDID_QUIRK('G', 'S', 'M', 0x5b9a, EDID_QUIRK_CAP_DSC_15BPP),
+ EDID_QUIRK('G', 'S', 'M', 0x5b9a, BIT(EDID_QUIRK_CAP_DSC_15BPP)),
/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
- EDID_QUIRK('L', 'G', 'D', 764, EDID_QUIRK_FORCE_10BPC),
+ EDID_QUIRK('L', 'G', 'D', 764, BIT(EDID_QUIRK_FORCE_10BPC)),
/* LG Philips LCD LP154W01-A5 */
- EDID_QUIRK('L', 'P', 'L', 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
- EDID_QUIRK('L', 'P', 'L', 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
+ EDID_QUIRK('L', 'P', 'L', 0, BIT(EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE)),
+ EDID_QUIRK('L', 'P', 'L', 0x2a00, BIT(EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE)),
/* Samsung SyncMaster 205BW. Note: irony */
- EDID_QUIRK('S', 'A', 'M', 541, EDID_QUIRK_DETAILED_SYNC_PP),
+ EDID_QUIRK('S', 'A', 'M', 541, BIT(EDID_QUIRK_DETAILED_SYNC_PP)),
/* Samsung SyncMaster 22[5-6]BW */
- EDID_QUIRK('S', 'A', 'M', 596, EDID_QUIRK_PREFER_LARGE_60),
- EDID_QUIRK('S', 'A', 'M', 638, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('S', 'A', 'M', 596, BIT(EDID_QUIRK_PREFER_LARGE_60)),
+ EDID_QUIRK('S', 'A', 'M', 638, BIT(EDID_QUIRK_PREFER_LARGE_60)),
/* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
- EDID_QUIRK('S', 'N', 'Y', 0x2541, EDID_QUIRK_FORCE_12BPC),
+ EDID_QUIRK('S', 'N', 'Y', 0x2541, BIT(EDID_QUIRK_FORCE_12BPC)),
/* ViewSonic VA2026w */
- EDID_QUIRK('V', 'S', 'C', 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING),
+ EDID_QUIRK('V', 'S', 'C', 5020, BIT(EDID_QUIRK_FORCE_REDUCED_BLANKING)),
/* Medion MD 30217 PG */
- EDID_QUIRK('M', 'E', 'D', 0x7b8, EDID_QUIRK_PREFER_LARGE_75),
+ EDID_QUIRK('M', 'E', 'D', 0x7b8, BIT(EDID_QUIRK_PREFER_LARGE_75)),
/* Lenovo G50 */
- EDID_QUIRK('S', 'D', 'C', 18514, EDID_QUIRK_FORCE_6BPC),
+ EDID_QUIRK('S', 'D', 'C', 18514, BIT(EDID_QUIRK_FORCE_6BPC)),
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
- EDID_QUIRK('S', 'E', 'C', 0xd033, EDID_QUIRK_FORCE_8BPC),
+ EDID_QUIRK('S', 'E', 'C', 0xd033, BIT(EDID_QUIRK_FORCE_8BPC)),
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
- EDID_QUIRK('E', 'T', 'R', 13896, EDID_QUIRK_FORCE_8BPC),
+ EDID_QUIRK('E', 'T', 'R', 13896, BIT(EDID_QUIRK_FORCE_8BPC)),
/* Valve Index Headset */
- EDID_QUIRK('V', 'L', 'V', 0x91a8, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b0, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b1, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b2, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b3, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b4, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b5, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b6, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b7, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b8, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91b9, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91ba, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bb, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bc, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bd, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91be, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('V', 'L', 'V', 0x91bf, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91a8, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b0, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b1, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b2, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b3, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b4, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b5, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b6, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b7, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b8, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91b9, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91ba, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bb, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bc, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bd, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91be, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('V', 'L', 'V', 0x91bf, BIT(EDID_QUIRK_NON_DESKTOP)),
/* HTC Vive and Vive Pro VR Headsets */
- EDID_QUIRK('H', 'V', 'R', 0xaa01, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('H', 'V', 'R', 0xaa02, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('H', 'V', 'R', 0xaa01, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('H', 'V', 'R', 0xaa02, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
- EDID_QUIRK('O', 'V', 'R', 0x0001, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('O', 'V', 'R', 0x0003, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('O', 'V', 'R', 0x0004, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('O', 'V', 'R', 0x0012, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('O', 'V', 'R', 0x0001, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('O', 'V', 'R', 0x0003, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('O', 'V', 'R', 0x0004, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('O', 'V', 'R', 0x0012, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Windows Mixed Reality Headsets */
- EDID_QUIRK('A', 'C', 'R', 0x7fce, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('L', 'E', 'N', 0x0408, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('F', 'U', 'J', 0x1970, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('D', 'E', 'L', 0x7fce, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('S', 'E', 'C', 0x144a, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('A', 'U', 'S', 0xc102, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('A', 'C', 'R', 0x7fce, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('L', 'E', 'N', 0x0408, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('F', 'U', 'J', 0x1970, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('D', 'E', 'L', 0x7fce, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('S', 'E', 'C', 0x144a, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('A', 'U', 'S', 0xc102, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Sony PlayStation VR Headset */
- EDID_QUIRK('S', 'N', 'Y', 0x0704, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'N', 'Y', 0x0704, BIT(EDID_QUIRK_NON_DESKTOP)),
/* Sensics VR Headsets */
- EDID_QUIRK('S', 'E', 'N', 0x1019, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'E', 'N', 0x1019, BIT(EDID_QUIRK_NON_DESKTOP)),
/* OSVR HDK and HDK2 VR Headsets */
- EDID_QUIRK('S', 'V', 'R', 0x1019, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('A', 'U', 'O', 0x1111, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'V', 'R', 0x1019, BIT(EDID_QUIRK_NON_DESKTOP)),
+ EDID_QUIRK('A', 'U', 'O', 0x1111, BIT(EDID_QUIRK_NON_DESKTOP)),
+
+ /*
+ * @drm_edid_internal_quirk entries end here, following with the
+ * @drm_edid_quirk entries.
+ */
+
+ /* HP ZR24w DP AUX DPCD access requires probing to prevent corruption. */
+ EDID_QUIRK('H', 'W', 'P', 0x2869, BIT(DRM_EDID_QUIRK_DP_DPCD_PROBE)),
};
/*
@@ -2951,6 +2962,18 @@ static u32 edid_get_quirks(const struct drm_edid *drm_edid)
return 0;
}
+static bool drm_edid_has_internal_quirk(struct drm_connector *connector,
+ enum drm_edid_internal_quirk quirk)
+{
+ return connector->display_info.quirks & BIT(quirk);
+}
+
+bool drm_edid_has_quirk(struct drm_connector *connector, enum drm_edid_quirk quirk)
+{
+ return connector->display_info.quirks & BIT(quirk);
+}
+EXPORT_SYMBOL(drm_edid_has_quirk);
+
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
@@ -2960,7 +2983,6 @@ static u32 edid_get_quirks(const struct drm_edid *drm_edid)
*/
static void edid_fixup_preferred(struct drm_connector *connector)
{
- const struct drm_display_info *info = &connector->display_info;
struct drm_display_mode *t, *cur_mode, *preferred_mode;
int target_refresh = 0;
int cur_vrefresh, preferred_vrefresh;
@@ -2968,9 +2990,9 @@ static void edid_fixup_preferred(struct drm_connector *connector)
if (list_empty(&connector->probed_modes))
return;
- if (info->quirks & EDID_QUIRK_PREFER_LARGE_60)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_60))
target_refresh = 60;
- if (info->quirks & EDID_QUIRK_PREFER_LARGE_75)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_75))
target_refresh = 75;
preferred_mode = list_first_entry(&connector->probed_modes,
@@ -3474,7 +3496,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
const struct drm_edid *drm_edid,
const struct detailed_timing *timing)
{
- const struct drm_display_info *info = &connector->display_info;
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
const struct detailed_pixel_timing *pt = &timing->data.pixel_data;
@@ -3508,7 +3529,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
return NULL;
}
- if (info->quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_REDUCED_BLANKING)) {
mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
if (!mode)
return NULL;
@@ -3520,7 +3541,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
if (!mode)
return NULL;
- if (info->quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_135_CLOCK_TOO_HIGH))
mode->clock = 1088 * 10;
else
mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
@@ -3551,7 +3572,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
drm_mode_do_interlace_quirk(mode, pt);
- if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_DETAILED_SYNC_PP)) {
mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
} else {
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
@@ -3564,12 +3585,12 @@ set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
- if (info->quirks & EDID_QUIRK_DETAILED_IN_CM) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_DETAILED_IN_CM)) {
mode->width_mm *= 10;
mode->height_mm *= 10;
}
- if (info->quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE)) {
mode->width_mm = drm_edid->edid->width_cm * 10;
mode->height_mm = drm_edid->edid->height_cm * 10;
}
@@ -5373,7 +5394,8 @@ static void fixup_detailed_cea_mode_clock(struct drm_connector *connector,
static void drm_calculate_luminance_range(struct drm_connector *connector)
{
- struct hdr_static_metadata *hdr_metadata = &connector->hdr_sink_metadata.hdmi_type1;
+ const struct hdr_static_metadata *hdr_metadata =
+ &connector->display_info.hdr_sink_metadata.hdmi_type1;
struct drm_luminance_range_info *luminance_range =
&connector->display_info.luminance_range;
static const u8 pre_computed_values[] = {
@@ -5434,21 +5456,21 @@ static uint8_t hdr_metadata_type(const u8 *edid_ext)
static void
drm_parse_hdr_metadata_block(struct drm_connector *connector, const u8 *db)
{
+ struct hdr_static_metadata *hdr_metadata =
+ &connector->display_info.hdr_sink_metadata.hdmi_type1;
u16 len;
len = cea_db_payload_len(db);
- connector->hdr_sink_metadata.hdmi_type1.eotf =
- eotf_supported(db);
- connector->hdr_sink_metadata.hdmi_type1.metadata_type =
- hdr_metadata_type(db);
+ hdr_metadata->eotf = eotf_supported(db);
+ hdr_metadata->metadata_type = hdr_metadata_type(db);
if (len >= 4)
- connector->hdr_sink_metadata.hdmi_type1.max_cll = db[4];
+ hdr_metadata->max_cll = db[4];
if (len >= 5)
- connector->hdr_sink_metadata.hdmi_type1.max_fall = db[5];
+ hdr_metadata->max_fall = db[5];
if (len >= 6) {
- connector->hdr_sink_metadata.hdmi_type1.min_cll = db[6];
+ hdr_metadata->min_cll = db[6];
/* Calculate only when all values are available */
drm_calculate_luminance_range(connector);
@@ -6596,7 +6618,7 @@ static void drm_reset_display_info(struct drm_connector *connector)
info->has_hdmi_infoframe = false;
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
- memset(&connector->hdr_sink_metadata, 0, sizeof(connector->hdr_sink_metadata));
+ memset(&info->hdr_sink_metadata, 0, sizeof(info->hdr_sink_metadata));
info->edid_hdmi_rgb444_dc_modes = 0;
info->edid_hdmi_ycbcr444_dc_modes = 0;
@@ -6734,26 +6756,26 @@ static void update_display_info(struct drm_connector *connector,
drm_update_mso(connector, drm_edid);
out:
- if (info->quirks & EDID_QUIRK_NON_DESKTOP) {
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_NON_DESKTOP)) {
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Non-desktop display%s\n",
connector->base.id, connector->name,
info->non_desktop ? " (redundant quirk)" : "");
info->non_desktop = true;
}
- if (info->quirks & EDID_QUIRK_CAP_DSC_15BPP)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_CAP_DSC_15BPP))
info->max_dsc_bpp = 15;
- if (info->quirks & EDID_QUIRK_FORCE_6BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_6BPC))
info->bpc = 6;
- if (info->quirks & EDID_QUIRK_FORCE_8BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_8BPC))
info->bpc = 8;
- if (info->quirks & EDID_QUIRK_FORCE_10BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_10BPC))
info->bpc = 10;
- if (info->quirks & EDID_QUIRK_FORCE_12BPC)
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_12BPC))
info->bpc = 12;
/* Depends on info->cea_rev set by drm_parse_cea_ext() above */
@@ -6918,7 +6940,6 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
static int _drm_edid_connector_add_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
- const struct drm_display_info *info = &connector->display_info;
int num_modes = 0;
if (!drm_edid)
@@ -6948,7 +6969,8 @@ static int _drm_edid_connector_add_modes(struct drm_connector *connector,
if (drm_edid->edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ)
num_modes += add_inferred_modes(connector, drm_edid);
- if (info->quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+ if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_60) ||
+ drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector);
return num_modes;
diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c
index 18e366cc4993..8d0601400182 100644
--- a/drivers/gpu/drm/drm_exec.c
+++ b/drivers/gpu/drm/drm_exec.c
@@ -2,7 +2,9 @@
#include <drm/drm_exec.h>
#include <drm/drm_gem.h>
+
#include <linux/dma-resv.h>
+#include <linux/export.h>
/**
* DOC: Overview
diff --git a/drivers/gpu/drm/drm_fb_dma_helper.c b/drivers/gpu/drm/drm_fb_dma_helper.c
index 2c4dc7ebc0c3..fd71969d2fb1 100644
--- a/drivers/gpu/drm/drm_fb_dma_helper.c
+++ b/drivers/gpu/drm/drm_fb_dma_helper.c
@@ -17,7 +17,9 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panic.h>
#include <drm/drm_plane.h>
+
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/module.h>
/**
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 937c3939e502..11a5b60cb9ce 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/console.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/sysrq.h>
#include <linux/vga_switcheroo.h>
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index 02a516e77192..8bd626ef16c7 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/fb.h>
#include <linux/vmalloc.h>
diff --git a/drivers/gpu/drm/drm_fbdev_shmem.c b/drivers/gpu/drm/drm_fbdev_shmem.c
index f824369baacd..1e827bf8b815 100644
--- a/drivers/gpu/drm/drm_fbdev_shmem.c
+++ b/drivers/gpu/drm/drm_fbdev_shmem.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/fb.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/drm_fbdev_ttm.c b/drivers/gpu/drm/drm_fbdev_ttm.c
index 73d35d59590c..85feb55bba11 100644
--- a/drivers/gpu/drm/drm_fbdev_ttm.c
+++ b/drivers/gpu/drm/drm_fbdev_ttm.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 246cf845e2c9..eebd1a05ee97 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -33,6 +33,7 @@
#include <linux/anon_inodes.h>
#include <linux/dma-fence.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -45,6 +46,7 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
+#include <drm/drm_debugfs.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -167,6 +169,9 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
drm_prime_init_file_private(&file->prime);
+ if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ drm_debugfs_clients_add(file);
+
if (dev->driver->open) {
ret = dev->driver->open(dev, file);
if (ret < 0)
@@ -181,6 +186,10 @@ out_prime_destroy:
drm_syncobj_release(file);
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, file);
+
+ if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ drm_debugfs_clients_remove(file);
+
put_pid(rcu_access_pointer(file->pid));
kfree(file);
@@ -235,6 +244,9 @@ void drm_file_free(struct drm_file *file)
(long)old_encode_dev(file->minor->kdev->devt),
atomic_read(&dev->open_count));
+ if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))
+ drm_debugfs_clients_remove(file);
+
drm_events_release(file);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -1017,8 +1029,10 @@ void drm_file_err(struct drm_file *file_priv, const char *fmt, ...)
pid = rcu_dereference(file_priv->pid);
task = pid_task(pid, PIDTYPE_TGID);
- drm_err(dev, "comm: %s pid: %d client: %s ... %pV", task ? task->comm : "Unset",
- task ? task->pid : 0, file_priv->client_name ?: "Unset", &vaf);
+ drm_err(dev, "comm: %s pid: %d client-id:%llu client: %s ... %pV",
+ task ? task->comm : "Unset",
+ task ? task->pid : 0, file_priv->client_id,
+ file_priv->client_name ?: "Unset", &vaf);
va_end(args);
rcu_read_unlock();
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index 8c6090a90d56..f5889dd8e7aa 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -21,6 +21,7 @@
* SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <drm/drm_flip_work.h>
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index d36e6cacc575..8f3daf38ca63 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -8,6 +8,7 @@
* (at your option) any later version.
*/
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/iosys-map.h>
#include <linux/module.h>
@@ -558,18 +559,6 @@ static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigne
drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb565);
}
-static __always_inline u32 drm_xrgb8888_to_rgb565_swab(u32 pix)
-{
- return swab16(drm_pixel_xrgb8888_to_rgb565(pix));
-}
-
-/* TODO: implement this helper as conversion to RGB565|BIG_ENDIAN */
-static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
- unsigned int pixels)
-{
- drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_xrgb8888_to_rgb565_swab);
-}
-
/**
* drm_fb_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer
* @dst: Array of RGB565 destination buffers
@@ -579,7 +568,6 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
* @state: Transform and conversion state
- * @swab: Swap bytes
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
@@ -594,23 +582,56 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
*/
void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, struct drm_format_conv_state *state,
- bool swab)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
};
- void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels);
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_rgb565_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
- if (swab)
- xfrm_line = drm_fb_xrgb8888_to_rgb565_swab_line;
- else
- xfrm_line = drm_fb_xrgb8888_to_rgb565_line;
+static void drm_fb_xrgb8888_to_rgb565be_line(void *dbuf, const void *sbuf,
+ unsigned int pixels)
+{
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb565be);
+}
+
+/**
+ * drm_fb_xrgb8888_to_rgb565be - Convert XRGB8888 to RGB565|DRM_FORMAT_BIG_ENDIAN clip buffer
+ * @dst: Array of RGB565BE destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for RGB565BE devices that don't support XRGB8888 natively.
+ */
+void drm_fb_xrgb8888_to_rgb565be(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, xfrm_line);
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_rgb565be_line);
}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565be);
static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
@@ -857,11 +878,33 @@ static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsig
drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_abgr8888);
}
-static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip,
- struct drm_format_conv_state *state)
+/**
+ * drm_fb_xrgb8888_to_abgr8888 - Convert XRGB8888 to ABGR8888 clip buffer
+ * @dst: Array of ABGR8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ABGR8888 devices that don't support XRGB8888
+ * natively. It sets an opaque alpha channel as part of the conversion.
+ */
+void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
@@ -870,17 +913,40 @@ static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned in
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_abgr8888_line);
}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_abgr8888);
static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xbgr8888);
}
-static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip,
- struct drm_format_conv_state *state)
+/**
+ * drm_fb_xrgb8888_to_xbgr8888 - Convert XRGB8888 to XBGR8888 clip buffer
+ * @dst: Array of XBGR8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for XBGR8888 devices that don't support XRGB8888
+ * natively.
+ */
+void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
@@ -889,6 +955,49 @@ static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned in
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_xbgr8888_line);
}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_xbgr8888);
+
+static void drm_fb_xrgb8888_to_bgrx8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_bgrx8888);
+}
+
+/**
+ * drm_fb_xrgb8888_to_bgrx8888 - Convert XRGB8888 to BGRX8888 clip buffer
+ * @dst: Array of BGRX8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for BGRX8888 devices that don't support XRGB8888
+ * natively.
+ */
+void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_bgrx8888_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgrx8888);
static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
@@ -1099,7 +1208,7 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
return 0;
} else if (fb_format == DRM_FORMAT_XRGB8888) {
if (dst_format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state, false);
+ drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_XRGB1555) {
drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state);
@@ -1250,141 +1359,3 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
}
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
-
-static uint32_t drm_fb_nonalpha_fourcc(uint32_t fourcc)
-{
- /* only handle formats with depth != 0 and alpha channel */
- switch (fourcc) {
- case DRM_FORMAT_ARGB1555:
- return DRM_FORMAT_XRGB1555;
- case DRM_FORMAT_ABGR1555:
- return DRM_FORMAT_XBGR1555;
- case DRM_FORMAT_RGBA5551:
- return DRM_FORMAT_RGBX5551;
- case DRM_FORMAT_BGRA5551:
- return DRM_FORMAT_BGRX5551;
- case DRM_FORMAT_ARGB8888:
- return DRM_FORMAT_XRGB8888;
- case DRM_FORMAT_ABGR8888:
- return DRM_FORMAT_XBGR8888;
- case DRM_FORMAT_RGBA8888:
- return DRM_FORMAT_RGBX8888;
- case DRM_FORMAT_BGRA8888:
- return DRM_FORMAT_BGRX8888;
- case DRM_FORMAT_ARGB2101010:
- return DRM_FORMAT_XRGB2101010;
- case DRM_FORMAT_ABGR2101010:
- return DRM_FORMAT_XBGR2101010;
- case DRM_FORMAT_RGBA1010102:
- return DRM_FORMAT_RGBX1010102;
- case DRM_FORMAT_BGRA1010102:
- return DRM_FORMAT_BGRX1010102;
- }
-
- return fourcc;
-}
-
-static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t fourcc)
-{
- const uint32_t *fourccs_end = fourccs + nfourccs;
-
- while (fourccs < fourccs_end) {
- if (*fourccs == fourcc)
- return true;
- ++fourccs;
- }
- return false;
-}
-
-/**
- * drm_fb_build_fourcc_list - Filters a list of supported color formats against
- * the device's native formats
- * @dev: DRM device
- * @native_fourccs: 4CC codes of natively supported color formats
- * @native_nfourccs: The number of entries in @native_fourccs
- * @fourccs_out: Returns 4CC codes of supported color formats
- * @nfourccs_out: The number of available entries in @fourccs_out
- *
- * This function create a list of supported color format from natively
- * supported formats and additional emulated formats.
- * At a minimum, most userspace programs expect at least support for
- * XRGB8888 on the primary plane. Devices that have to emulate the
- * format, and possibly others, can use drm_fb_build_fourcc_list() to
- * create a list of supported color formats. The returned list can
- * be handed over to drm_universal_plane_init() et al. Native formats
- * will go before emulated formats. Native formats with alpha channel
- * will be replaced by such without, as primary planes usually don't
- * support alpha. Other heuristics might be applied
- * to optimize the order. Formats near the beginning of the list are
- * usually preferred over formats near the end of the list.
- *
- * Returns:
- * The number of color-formats 4CC codes returned in @fourccs_out.
- */
-size_t drm_fb_build_fourcc_list(struct drm_device *dev,
- const u32 *native_fourccs, size_t native_nfourccs,
- u32 *fourccs_out, size_t nfourccs_out)
-{
- /*
- * XRGB8888 is the default fallback format for most of userspace
- * and it's currently the only format that should be emulated for
- * the primary plane. Only if there's ever another default fallback,
- * it should be added here.
- */
- static const uint32_t extra_fourccs[] = {
- DRM_FORMAT_XRGB8888,
- };
- static const size_t extra_nfourccs = ARRAY_SIZE(extra_fourccs);
-
- u32 *fourccs = fourccs_out;
- const u32 *fourccs_end = fourccs_out + nfourccs_out;
- size_t i;
-
- /*
- * The device's native formats go first.
- */
-
- for (i = 0; i < native_nfourccs; ++i) {
- /*
- * Several DTs, boot loaders and firmware report native
- * alpha formats that are non-alpha formats instead. So
- * replace alpha formats by non-alpha formats.
- */
- u32 fourcc = drm_fb_nonalpha_fourcc(native_fourccs[i]);
-
- if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
- continue; /* skip duplicate entries */
- } else if (fourccs == fourccs_end) {
- drm_warn(dev, "Ignoring native format %p4cc\n", &fourcc);
- continue; /* end of available output buffer */
- }
-
- drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc);
-
- *fourccs = fourcc;
- ++fourccs;
- }
-
- /*
- * The extra formats, emulated by the driver, go second.
- */
-
- for (i = 0; (i < extra_nfourccs) && (fourccs < fourccs_end); ++i) {
- u32 fourcc = extra_fourccs[i];
-
- if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
- continue; /* skip duplicate and native entries */
- } else if (fourccs == fourccs_end) {
- drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc);
- continue; /* end of available output buffer */
- }
-
- drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc);
-
- *fourccs = fourcc;
- ++fourccs;
- }
-
- return fourccs - fourccs_out;
-}
-EXPORT_SYMBOL(drm_fb_build_fourcc_list);
diff --git a/drivers/gpu/drm/drm_format_internal.h b/drivers/gpu/drm/drm_format_internal.h
index 9f857bfa368d..ce29dd05bcc5 100644
--- a/drivers/gpu/drm/drm_format_internal.h
+++ b/drivers/gpu/drm/drm_format_internal.h
@@ -5,6 +5,7 @@
#include <linux/bits.h>
#include <linux/types.h>
+#include <linux/swab.h>
/*
* Each pixel-format conversion helper takes a raw pixel in a
@@ -42,7 +43,7 @@ static inline u32 drm_pixel_xrgb8888_to_r8_bt601(u32 pix)
u32 b = pix & 0x000000ff;
/* ITU-R BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
- return (3 * r + 6 * g + b) / 10;
+ return (77 * r + 150 * g + 29 * b) / 256;
}
static inline u32 drm_pixel_xrgb8888_to_rgb332(u32 pix)
@@ -59,6 +60,11 @@ static inline u32 drm_pixel_xrgb8888_to_rgb565(u32 pix)
((pix & 0x000000f8) >> 3);
}
+static inline u32 drm_pixel_xrgb8888_to_rgb565be(u32 pix)
+{
+ return swab16(drm_pixel_xrgb8888_to_rgb565(pix));
+}
+
static inline u32 drm_pixel_xrgb8888_to_rgbx5551(u32 pix)
{
return ((pix & 0x00f80000) >> 8) |
@@ -111,6 +117,14 @@ static inline u32 drm_pixel_xrgb8888_to_xbgr8888(u32 pix)
((pix & 0x000000ff) << 16);
}
+static inline u32 drm_pixel_xrgb8888_to_bgrx8888(u32 pix)
+{
+ return ((pix & 0xff000000) >> 24) | /* also copy filler bits */
+ ((pix & 0x00ff0000) >> 8) |
+ ((pix & 0x0000ff00) << 8) |
+ ((pix & 0x000000ff) << 24);
+}
+
static inline u32 drm_pixel_xrgb8888_to_abgr8888(u32 pix)
{
return GENMASK(31, 24) | /* fill alpha bits */
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 3a94ca211f9c..e0d533611040 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -238,6 +238,14 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGRA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_RGB161616, .depth = 0,
+ .num_planes = 1, .char_per_block = { 6, 0, 0 },
+ .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = false },
+ { .format = DRM_FORMAT_BGR161616, .depth = 0,
+ .num_planes = 1, .char_per_block = { 6, 0, 0 },
+ .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = false },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
@@ -346,6 +354,33 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2,
.char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S010, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S210, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S410, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S012, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S212, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S412, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S016, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S216, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S416, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true},
};
unsigned int i;
@@ -382,7 +417,8 @@ EXPORT_SYMBOL(drm_format_info);
/**
* drm_get_format_info - query information for a given framebuffer configuration
* @dev: DRM device
- * @mode_cmd: metadata from the userspace fb creation request
+ * @pixel_format: pixel format (DRM_FORMAT_*)
+ * @modifier: modifier
*
* Returns:
* The instance of struct drm_format_info that describes the pixel format, or
@@ -390,15 +426,16 @@ EXPORT_SYMBOL(drm_format_info);
*/
const struct drm_format_info *
drm_get_format_info(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd)
+ u32 pixel_format, u64 modifier)
{
const struct drm_format_info *info = NULL;
if (dev->mode_config.funcs->get_format_info)
- info = dev->mode_config.funcs->get_format_info(mode_cmd);
+ info = dev->mode_config.funcs->get_format_info(pixel_format,
+ modifier);
if (!info)
- info = drm_format_info(mode_cmd->pixel_format);
+ info = drm_format_info(pixel_format);
return info;
}
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 63a70f285cce..adbb73f00d68 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -153,18 +153,11 @@ int drm_mode_addfb_ioctl(struct drm_device *dev,
}
static int framebuffer_check(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *r)
{
- const struct drm_format_info *info;
int i;
- /* check if the format is supported at all */
- if (!__drm_format_info(r->pixel_format)) {
- drm_dbg_kms(dev, "bad framebuffer format %p4cc\n",
- &r->pixel_format);
- return -EINVAL;
- }
-
if (r->width == 0) {
drm_dbg_kms(dev, "bad framebuffer width %u\n", r->width);
return -EINVAL;
@@ -175,9 +168,6 @@ static int framebuffer_check(struct drm_device *dev,
return -EINVAL;
}
- /* now let the driver pick its own format info */
- info = drm_get_format_info(dev, r);
-
for (i = 0; i < info->num_planes; i++) {
unsigned int width = drm_format_info_plane_width(info, r->width, i);
unsigned int height = drm_format_info_plane_height(info, r->height, i);
@@ -272,6 +262,7 @@ drm_internal_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_mode_config *config = &dev->mode_config;
+ const struct drm_format_info *info;
struct drm_framebuffer *fb;
int ret;
@@ -297,11 +288,21 @@ drm_internal_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
- ret = framebuffer_check(dev, r);
+ /* check if the format is supported at all */
+ if (!__drm_format_info(r->pixel_format)) {
+ drm_dbg_kms(dev, "bad framebuffer format %p4cc\n",
+ &r->pixel_format);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* now let the driver pick its own format info */
+ info = drm_get_format_info(dev, r->pixel_format, r->modifier[0]);
+
+ ret = framebuffer_check(dev, info, r);
if (ret)
return ERR_PTR(ret);
- fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+ fb = dev->mode_config.funcs->fb_create(dev, file_priv, info, r);
if (IS_ERR(fb)) {
drm_dbg_kms(dev, "could not create framebuffer\n");
return fb;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ac0524595bd6..6a44351e58b7 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -26,6 +26,7 @@
*/
#include <linux/dma-buf.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/iosys-map.h>
@@ -1238,38 +1239,6 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
obj->funcs->print_info(p, indent, obj);
}
-int drm_gem_pin_locked(struct drm_gem_object *obj)
-{
- if (obj->funcs->pin)
- return obj->funcs->pin(obj);
-
- return 0;
-}
-
-void drm_gem_unpin_locked(struct drm_gem_object *obj)
-{
- if (obj->funcs->unpin)
- obj->funcs->unpin(obj);
-}
-
-int drm_gem_pin(struct drm_gem_object *obj)
-{
- int ret;
-
- dma_resv_lock(obj->resv, NULL);
- ret = drm_gem_pin_locked(obj);
- dma_resv_unlock(obj->resv);
-
- return ret;
-}
-
-void drm_gem_unpin(struct drm_gem_object *obj)
-{
- dma_resv_lock(obj->resv, NULL);
- drm_gem_unpin_locked(obj);
- dma_resv_unlock(obj->resv);
-}
-
int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
@@ -1514,12 +1483,14 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail);
* @nr_to_scan: The number of pages to try to reclaim
* @remaining: The number of pages left to reclaim, should be initialized by caller
* @shrink: Callback to try to shrink/reclaim the object.
+ * @ticket: Optional ww_acquire_ctx context to use for locking
*/
unsigned long
drm_gem_lru_scan(struct drm_gem_lru *lru,
unsigned int nr_to_scan,
unsigned long *remaining,
- bool (*shrink)(struct drm_gem_object *obj))
+ bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket),
+ struct ww_acquire_ctx *ticket)
{
struct drm_gem_lru still_in_lru;
struct drm_gem_object *obj;
@@ -1552,17 +1523,20 @@ drm_gem_lru_scan(struct drm_gem_lru *lru,
*/
mutex_unlock(lru->lock);
+ if (ticket)
+ ww_acquire_init(ticket, &reservation_ww_class);
+
/*
* Note that this still needs to be trylock, since we can
* hit shrinker in response to trying to get backing pages
* for this obj (ie. while it's lock is already held)
*/
- if (!dma_resv_trylock(obj->resv)) {
+ if (!ww_mutex_trylock(&obj->resv->lock, ticket)) {
*remaining += obj->size >> PAGE_SHIFT;
goto tail;
}
- if (shrink(obj)) {
+ if (shrink(obj, ticket)) {
freed += obj->size >> PAGE_SHIFT;
/*
@@ -1576,6 +1550,9 @@ drm_gem_lru_scan(struct drm_gem_lru *lru,
dma_resv_unlock(obj->resv);
+ if (ticket)
+ ww_acquire_fini(ticket);
+
tail:
drm_gem_object_put(obj);
mutex_lock(lru->lock);
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index 93337543aac3..ebf305fb24f0 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -2,6 +2,7 @@
#include <linux/dma-resv.h>
#include <linux/dma-fence-chain.h>
+#include <linux/export.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_atomic_uapi.h>
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 6ff22e04029e..4bc89d33df59 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -5,6 +5,7 @@
* Copyright (C) 2017 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -67,6 +68,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj);
static int
drm_gem_fb_init(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **obj, unsigned int num_planes,
const struct drm_framebuffer_funcs *funcs)
@@ -74,7 +76,7 @@ drm_gem_fb_init(struct drm_device *dev,
unsigned int i;
int ret;
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
for (i = 0; i < num_planes; i++)
fb->obj[i] = obj[i];
@@ -135,6 +137,7 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle);
* @dev: DRM device
* @fb: framebuffer object
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
*
@@ -151,20 +154,14 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle);
int drm_gem_fb_init_with_funcs(struct drm_device *dev,
struct drm_framebuffer *fb,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
- const struct drm_format_info *info;
struct drm_gem_object *objs[DRM_FORMAT_MAX_PLANES];
unsigned int i;
int ret;
- info = drm_get_format_info(dev, mode_cmd);
- if (!info) {
- drm_dbg_kms(dev, "Failed to get FB format info\n");
- return -EINVAL;
- }
-
if (drm_drv_uses_atomic_modeset(dev) &&
!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
@@ -199,7 +196,7 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev,
}
}
- ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs);
+ ret = drm_gem_fb_init(dev, fb, info, mode_cmd, objs, i, funcs);
if (ret)
goto err_gem_object_put;
@@ -220,6 +217,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs);
* callback
* @dev: DRM device
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
*
@@ -232,6 +230,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs);
*/
struct drm_framebuffer *
drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
@@ -242,7 +241,7 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
if (!fb)
return ERR_PTR(-ENOMEM);
- ret = drm_gem_fb_init_with_funcs(dev, fb, file, mode_cmd, funcs);
+ ret = drm_gem_fb_init_with_funcs(dev, fb, file, info, mode_cmd, funcs);
if (ret) {
kfree(fb);
return ERR_PTR(ret);
@@ -262,6 +261,7 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
* &drm_mode_config_funcs.fb_create callback
* @dev: DRM device
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
*
* This function creates a new framebuffer object described by
@@ -281,9 +281,10 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
*/
struct drm_framebuffer *
drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
+ return drm_gem_fb_create_with_funcs(dev, file, info, mode_cmd,
&drm_gem_fb_funcs);
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create);
@@ -299,6 +300,7 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs_dirtyfb = {
* &drm_mode_config_funcs.fb_create callback
* @dev: DRM device
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
*
* This function creates a new framebuffer object described by
@@ -319,9 +321,10 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs_dirtyfb = {
*/
struct drm_framebuffer *
drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
+ return drm_gem_fb_create_with_funcs(dev, file, info, mode_cmd,
&drm_gem_fb_funcs_dirtyfb);
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
@@ -501,12 +504,9 @@ EXPORT_SYMBOL(drm_gem_fb_end_cpu_access);
// TODO Drop this function and replace by drm_format_info_bpp() once all
// DRM_FORMAT_* provide proper block info in drivers/gpu/drm/drm_fourcc.c
static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info;
-
- info = drm_get_format_info(dev, mode_cmd);
-
switch (info->format) {
case DRM_FORMAT_YUV420_8BIT:
return 12;
@@ -520,6 +520,7 @@ static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
}
static int drm_gem_afbc_min_size(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb)
{
@@ -560,7 +561,7 @@ static int drm_gem_afbc_min_size(struct drm_device *dev,
afbc_fb->aligned_height = ALIGN(mode_cmd->height, h_alignment);
afbc_fb->offset = mode_cmd->offsets[0];
- bpp = drm_gem_afbc_get_bpp(dev, mode_cmd);
+ bpp = drm_gem_afbc_get_bpp(dev, info, mode_cmd);
if (!bpp) {
drm_dbg_kms(dev, "Invalid AFBC bpp value: %d\n", bpp);
return -EINVAL;
@@ -582,6 +583,7 @@ static int drm_gem_afbc_min_size(struct drm_device *dev,
*
* @dev: DRM device
* @afbc_fb: afbc-specific framebuffer
+ * @info: pixel format information
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @afbc_fb: afbc framebuffer
*
@@ -595,24 +597,24 @@ static int drm_gem_afbc_min_size(struct drm_device *dev,
* Zero on success or a negative error value on failure.
*/
int drm_gem_fb_afbc_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb)
{
- const struct drm_format_info *info;
struct drm_gem_object **objs;
int ret;
objs = afbc_fb->base.obj;
- info = drm_get_format_info(dev, mode_cmd);
- if (!info)
- return -EINVAL;
- ret = drm_gem_afbc_min_size(dev, mode_cmd, afbc_fb);
+ ret = drm_gem_afbc_min_size(dev, info, mode_cmd, afbc_fb);
if (ret < 0)
return ret;
- if (objs[0]->size < afbc_fb->afbc_size)
+ if (objs[0]->size < afbc_fb->afbc_size) {
+ drm_dbg_kms(dev, "GEM object size (%zu) smaller than minimum afbc size (%u)\n",
+ objs[0]->size, afbc_fb->afbc_size);
return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index a5dbee6974ab..5d1349c34afd 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -348,6 +348,8 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
+ dma_resv_assert_held(obj->resv);
+
if (drm_gem_is_imported(obj)) {
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
} else {
@@ -408,6 +410,8 @@ void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
{
struct drm_gem_object *obj = &shmem->base;
+ dma_resv_assert_held(obj->resv);
+
if (drm_gem_is_imported(obj)) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else {
@@ -800,6 +804,63 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
+/**
+ * drm_gem_shmem_prime_import_no_map - Import dmabuf without mapping its sg_table
+ * @dev: Device to import into
+ * @dma_buf: dma-buf object to import
+ *
+ * Drivers that use the shmem helpers but also wants to import dmabuf without
+ * mapping its sg_table can use this as their &drm_driver.gem_prime_import
+ * implementation.
+ */
+struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ size_t size;
+ int ret;
+
+ if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ obj = dma_buf->priv;
+ drm_gem_object_get(obj);
+ return obj;
+ }
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ get_dma_buf(dma_buf);
+
+ size = PAGE_ALIGN(attach->dmabuf->size);
+
+ shmem = __drm_gem_shmem_create(dev, size, true, NULL);
+ if (IS_ERR(shmem)) {
+ ret = PTR_ERR(shmem);
+ goto fail_detach;
+ }
+
+ drm_dbg_prime(dev, "size = %zu\n", size);
+
+ shmem->base.import_attach = attach;
+ shmem->base.resv = dma_buf->resv;
+
+ return &shmem->base;
+
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
+
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
MODULE_IMPORT_NS("DMA_BUF");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index 3734aa2d1c5b..257cca4cb97a 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/export.h>
#include <linux/module.h>
#include <drm/drm_gem_ttm_helper.h>
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 22b1fe9c03b8..b04cde4a60e7 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/export.h>
#include <linux/iosys-map.h>
#include <linux/module.h>
@@ -88,11 +89,6 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
* drmm_vram_helper_init() is a managed interface that installs a
* clean-up handler to run during the DRM device's release.
*
- * For drawing or scanout operations, rsp. buffer objects have to be pinned
- * in video RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
- * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
- * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
- *
* A buffer object that is pinned in video RAM has a fixed address within that
* memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
* it's used to program the hardware's scanout engine for framebuffers, set
@@ -299,30 +295,7 @@ out:
return 0;
}
-/**
- * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
- * @gbo: the GEM VRAM object
- * @pl_flag: a bitmask of possible memory regions
- *
- * Pinning a buffer object ensures that it is not evicted from
- * a memory region. A pinned buffer object has to be unpinned before
- * it can be pinned to another region. If the pl_flag argument is 0,
- * the buffer is pinned at its current location (video RAM or system
- * memory).
- *
- * Small buffer objects, such as cursor images, can lead to memory
- * fragmentation if they are pinned in the middle of video RAM. This
- * is especially a problem on devices with only a small amount of
- * video RAM. Fragmentation can prevent the primary framebuffer from
- * fitting in, even though there's enough memory overall. The modifier
- * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
- * at the high end of the memory region to avoid fragmentation.
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
+static int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
{
int ret;
@@ -334,7 +307,6 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
return ret;
}
-EXPORT_SYMBOL(drm_gem_vram_pin);
static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
{
@@ -343,15 +315,7 @@ static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
ttm_bo_unpin(&gbo->bo);
}
-/**
- * drm_gem_vram_unpin() - Unpins a GEM VRAM object
- * @gbo: the GEM VRAM object
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
+static int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
{
int ret;
@@ -364,7 +328,6 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
return 0;
}
-EXPORT_SYMBOL(drm_gem_vram_unpin);
/**
* drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
@@ -690,41 +653,6 @@ EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
*/
/**
- * drm_gem_vram_object_pin() - Implements &struct drm_gem_object_funcs.pin
- * @gem: The GEM object to pin
- *
- * Returns:
- * 0 on success, or
- * a negative errno code otherwise.
- */
-static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
-{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
-
- /*
- * Fbdev console emulation is the use case of these PRIME
- * helpers. This may involve updating a hardware buffer from
- * a shadow FB. We pin the buffer to it's current location
- * (either video RAM or system memory) to prevent it from
- * being relocated during the update operation. If you require
- * the buffer to be pinned to VRAM, implement a callback that
- * sets the flags accordingly.
- */
- return drm_gem_vram_pin_locked(gbo, 0);
-}
-
-/**
- * drm_gem_vram_object_unpin() - Implements &struct drm_gem_object_funcs.unpin
- * @gem: The GEM object to unpin
- */
-static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
-{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
-
- drm_gem_vram_unpin_locked(gbo);
-}
-
-/**
* drm_gem_vram_object_vmap() -
* Implements &struct drm_gem_object_funcs.vmap
* @gem: The GEM object to map
@@ -762,8 +690,6 @@ static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
.free = drm_gem_vram_object_free,
- .pin = drm_gem_vram_object_pin,
- .unpin = drm_gem_vram_object_unpin,
.vmap = drm_gem_vram_object_vmap,
.vunmap = drm_gem_vram_object_vunmap,
.mmap = drm_gem_ttm_mmap,
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 735bfdf4322f..5bb4c77db2c3 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -7,11 +7,11 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/hmm.h>
+#include <linux/hugetlb_inline.h>
#include <linux/memremap.h>
-#include <linux/migrate.h>
#include <linux/mm_types.h>
-#include <linux/pagemap.h>
#include <linux/slab.h>
#include <drm/drm_device.h>
@@ -108,21 +108,6 @@
*/
/**
- * DOC: Migration
- *
- * The migration support is quite simple, allowing migration between RAM and
- * device memory at the range granularity. For example, GPU SVM currently does
- * not support mixing RAM and device memory pages within a range. This means
- * that upon GPU fault, the entire range can be migrated to device memory, and
- * upon CPU fault, the entire range is migrated to RAM. Mixed RAM and device
- * memory storage within a range could be added in the future if required.
- *
- * The reasoning for only supporting range granularity is as follows: it
- * simplifies the implementation, and range sizes are driver-defined and should
- * be relatively small.
- */
-
-/**
* DOC: Partial Unmapping of Ranges
*
* Partial unmapping of ranges (e.g., 1M out of 2M is unmapped by CPU resulting
@@ -191,12 +176,9 @@
* }
*
* if (driver_migration_policy(range)) {
- * mmap_read_lock(mm);
- * devmem = driver_alloc_devmem();
- * err = drm_gpusvm_migrate_to_devmem(gpusvm, range,
- * devmem_allocation,
- * &ctx);
- * mmap_read_unlock(mm);
+ * err = drm_pagemap_populate_mm(driver_choose_drm_pagemap(),
+ * gpuva_start, gpuva_end, gpusvm->mm,
+ * ctx->timeslice_ms);
* if (err) // CPU mappings may have changed
* goto retry;
* }
@@ -289,97 +271,6 @@ npages_in_range(unsigned long start, unsigned long end)
}
/**
- * struct drm_gpusvm_zdd - GPU SVM zone device data
- *
- * @refcount: Reference count for the zdd
- * @devmem_allocation: device memory allocation
- * @device_private_page_owner: Device private pages owner
- *
- * This structure serves as a generic wrapper installed in
- * page->zone_device_data. It provides infrastructure for looking up a device
- * memory allocation upon CPU page fault and asynchronously releasing device
- * memory once the CPU has no page references. Asynchronous release is useful
- * because CPU page references can be dropped in IRQ contexts, while releasing
- * device memory likely requires sleeping locks.
- */
-struct drm_gpusvm_zdd {
- struct kref refcount;
- struct drm_gpusvm_devmem *devmem_allocation;
- void *device_private_page_owner;
-};
-
-/**
- * drm_gpusvm_zdd_alloc() - Allocate a zdd structure.
- * @device_private_page_owner: Device private pages owner
- *
- * This function allocates and initializes a new zdd structure. It sets up the
- * reference count and initializes the destroy work.
- *
- * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure.
- */
-static struct drm_gpusvm_zdd *
-drm_gpusvm_zdd_alloc(void *device_private_page_owner)
-{
- struct drm_gpusvm_zdd *zdd;
-
- zdd = kmalloc(sizeof(*zdd), GFP_KERNEL);
- if (!zdd)
- return NULL;
-
- kref_init(&zdd->refcount);
- zdd->devmem_allocation = NULL;
- zdd->device_private_page_owner = device_private_page_owner;
-
- return zdd;
-}
-
-/**
- * drm_gpusvm_zdd_get() - Get a reference to a zdd structure.
- * @zdd: Pointer to the zdd structure.
- *
- * This function increments the reference count of the provided zdd structure.
- *
- * Return: Pointer to the zdd structure.
- */
-static struct drm_gpusvm_zdd *drm_gpusvm_zdd_get(struct drm_gpusvm_zdd *zdd)
-{
- kref_get(&zdd->refcount);
- return zdd;
-}
-
-/**
- * drm_gpusvm_zdd_destroy() - Destroy a zdd structure.
- * @ref: Pointer to the reference count structure.
- *
- * This function queues the destroy_work of the zdd for asynchronous destruction.
- */
-static void drm_gpusvm_zdd_destroy(struct kref *ref)
-{
- struct drm_gpusvm_zdd *zdd =
- container_of(ref, struct drm_gpusvm_zdd, refcount);
- struct drm_gpusvm_devmem *devmem = zdd->devmem_allocation;
-
- if (devmem) {
- complete_all(&devmem->detached);
- if (devmem->ops->devmem_release)
- devmem->ops->devmem_release(devmem);
- }
- kfree(zdd);
-}
-
-/**
- * drm_gpusvm_zdd_put() - Put a zdd reference.
- * @zdd: Pointer to the zdd structure.
- *
- * This function decrements the reference count of the provided zdd structure
- * and schedules its destruction if the count drops to zero.
- */
-static void drm_gpusvm_zdd_put(struct drm_gpusvm_zdd *zdd)
-{
- kref_put(&zdd->refcount, drm_gpusvm_zdd_destroy);
-}
-
-/**
* drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
* @notifier: Pointer to the GPU SVM notifier structure.
* @start: Start address of the range
@@ -945,7 +836,7 @@ retry:
* process-many-malloc' fails. In the failure case, each process
* mallocs 16k but the CPU VMA is ~128k which results in 64k SVM
* ranges. When migrating the SVM ranges, some processes fail in
- * drm_gpusvm_migrate_to_devmem with 'migrate.cpages != npages'
+ * drm_pagemap_migrate_to_devmem with 'migrate.cpages != npages'
* and then upon drm_gpusvm_range_get_pages device pages from
* other processes are collected + faulted in which creates all
* sorts of problems. Unsure exactly how this happening, also
@@ -981,6 +872,40 @@ static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm)
#endif
/**
+ * drm_gpusvm_find_vma_start() - Find start address for first VMA in range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @start: The inclusive start user address.
+ * @end: The exclusive end user address.
+ *
+ * Returns: The start address of first VMA within the provided range,
+ * ULONG_MAX otherwise. Assumes start_addr < end_addr.
+ */
+unsigned long
+drm_gpusvm_find_vma_start(struct drm_gpusvm *gpusvm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = gpusvm->mm;
+ struct vm_area_struct *vma;
+ unsigned long addr = ULONG_MAX;
+
+ if (!mmget_not_zero(mm))
+ return addr;
+
+ mmap_read_lock(mm);
+
+ vma = find_vma_intersection(mm, start, end);
+ if (vma)
+ addr = vma->vm_start;
+
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return addr;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_find_vma_start);
+
+/**
* drm_gpusvm_range_find_or_insert() - Find or insert GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
* @fault_addr: Fault address
@@ -1329,7 +1254,7 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
.dev_private_owner = gpusvm->device_private_page_owner,
};
struct mm_struct *mm = gpusvm->mm;
- struct drm_gpusvm_zdd *zdd;
+ void *zdd;
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
unsigned long i, j;
@@ -1412,6 +1337,7 @@ map_pages:
}
zdd = NULL;
+ pagemap = NULL;
num_dma_mapped = 0;
for (i = 0, j = 0; i < npages; ++j) {
struct page *page = hmm_pfn_to_page(pfns[i]);
@@ -1431,7 +1357,7 @@ map_pages:
}
pagemap = page_pgmap(page);
- dpagemap = zdd->devmem_allocation->dpagemap;
+ dpagemap = drm_pagemap_page_to_dpagemap(page);
if (drm_WARN_ON(gpusvm->drm, !dpagemap)) {
/*
* Raced. This is not supposed to happen
@@ -1455,7 +1381,7 @@ map_pages:
} else {
dma_addr_t addr;
- if (is_zone_device_page(page) || zdd) {
+ if (is_zone_device_page(page) || pagemap) {
err = -EOPNOTSUPP;
goto err_unmap;
}
@@ -1483,7 +1409,7 @@ map_pages:
flags.has_dma_mapping = true;
}
- if (zdd) {
+ if (pagemap) {
flags.has_devmem_pages = true;
range->dpagemap = dpagemap;
}
@@ -1511,6 +1437,7 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages);
/**
* drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range
+ * drm_gpusvm_range_evict() - Evict GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
* @range: Pointer to the GPU SVM range structure
* @ctx: GPU SVM context
@@ -1541,562 +1468,11 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
EXPORT_SYMBOL_GPL(drm_gpusvm_range_unmap_pages);
/**
- * drm_gpusvm_migration_unlock_put_page() - Put a migration page
- * @page: Pointer to the page to put
- *
- * This function unlocks and puts a page.
- */
-static void drm_gpusvm_migration_unlock_put_page(struct page *page)
-{
- unlock_page(page);
- put_page(page);
-}
-
-/**
- * drm_gpusvm_migration_unlock_put_pages() - Put migration pages
- * @npages: Number of pages
- * @migrate_pfn: Array of migrate page frame numbers
- *
- * This function unlocks and puts an array of pages.
- */
-static void drm_gpusvm_migration_unlock_put_pages(unsigned long npages,
- unsigned long *migrate_pfn)
-{
- unsigned long i;
-
- for (i = 0; i < npages; ++i) {
- struct page *page;
-
- if (!migrate_pfn[i])
- continue;
-
- page = migrate_pfn_to_page(migrate_pfn[i]);
- drm_gpusvm_migration_unlock_put_page(page);
- migrate_pfn[i] = 0;
- }
-}
-
-/**
- * drm_gpusvm_get_devmem_page() - Get a reference to a device memory page
- * @page: Pointer to the page
- * @zdd: Pointer to the GPU SVM zone device data
- *
- * This function associates the given page with the specified GPU SVM zone
- * device data and initializes it for zone device usage.
- */
-static void drm_gpusvm_get_devmem_page(struct page *page,
- struct drm_gpusvm_zdd *zdd)
-{
- page->zone_device_data = drm_gpusvm_zdd_get(zdd);
- zone_device_page_init(page);
-}
-
-/**
- * drm_gpusvm_migrate_map_pages() - Map migration pages for GPU SVM migration
- * @dev: The device for which the pages are being mapped
- * @dma_addr: Array to store DMA addresses corresponding to mapped pages
- * @migrate_pfn: Array of migrate page frame numbers to map
- * @npages: Number of pages to map
- * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
- *
- * This function maps pages of memory for migration usage in GPU SVM. It
- * iterates over each page frame number provided in @migrate_pfn, maps the
- * corresponding page, and stores the DMA address in the provided @dma_addr
- * array.
- *
- * Return: 0 on success, -EFAULT if an error occurs during mapping.
- */
-static int drm_gpusvm_migrate_map_pages(struct device *dev,
- dma_addr_t *dma_addr,
- unsigned long *migrate_pfn,
- unsigned long npages,
- enum dma_data_direction dir)
-{
- unsigned long i;
-
- for (i = 0; i < npages; ++i) {
- struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
-
- if (!page)
- continue;
-
- if (WARN_ON_ONCE(is_zone_device_page(page)))
- return -EFAULT;
-
- dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
- if (dma_mapping_error(dev, dma_addr[i]))
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * drm_gpusvm_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
- * @dev: The device for which the pages were mapped
- * @dma_addr: Array of DMA addresses corresponding to mapped pages
- * @npages: Number of pages to unmap
- * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
- *
- * This function unmaps previously mapped pages of memory for GPU Shared Virtual
- * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks
- * if it's valid and not already unmapped, and unmaps the corresponding page.
- */
-static void drm_gpusvm_migrate_unmap_pages(struct device *dev,
- dma_addr_t *dma_addr,
- unsigned long npages,
- enum dma_data_direction dir)
-{
- unsigned long i;
-
- for (i = 0; i < npages; ++i) {
- if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
- continue;
-
- dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
- }
-}
-
-/**
- * drm_gpusvm_migrate_to_devmem() - Migrate GPU SVM range to device memory
+ * drm_gpusvm_range_evict() - Evict GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
- * @range: Pointer to the GPU SVM range structure
- * @devmem_allocation: Pointer to the device memory allocation. The caller
- * should hold a reference to the device memory allocation,
- * which should be dropped via ops->devmem_release or upon
- * the failure of this function.
- * @ctx: GPU SVM context
- *
- * This function migrates the specified GPU SVM range to device memory. It
- * performs the necessary setup and invokes the driver-specific operations for
- * migration to device memory. Upon successful return, @devmem_allocation can
- * safely reference @range until ops->devmem_release is called which only upon
- * successful return. Expected to be called while holding the mmap lock in read
- * mode.
- *
- * Return: 0 on success, negative error code on failure.
- */
-int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range,
- struct drm_gpusvm_devmem *devmem_allocation,
- const struct drm_gpusvm_ctx *ctx)
-{
- const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops;
- unsigned long start = drm_gpusvm_range_start(range),
- end = drm_gpusvm_range_end(range);
- struct migrate_vma migrate = {
- .start = start,
- .end = end,
- .pgmap_owner = gpusvm->device_private_page_owner,
- .flags = MIGRATE_VMA_SELECT_SYSTEM,
- };
- struct mm_struct *mm = gpusvm->mm;
- unsigned long i, npages = npages_in_range(start, end);
- struct vm_area_struct *vas;
- struct drm_gpusvm_zdd *zdd = NULL;
- struct page **pages;
- dma_addr_t *dma_addr;
- void *buf;
- int err;
-
- mmap_assert_locked(gpusvm->mm);
-
- if (!range->flags.migrate_devmem)
- return -EINVAL;
-
- if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
- !ops->copy_to_ram)
- return -EOPNOTSUPP;
-
- vas = vma_lookup(mm, start);
- if (!vas) {
- err = -ENOENT;
- goto err_out;
- }
-
- if (end > vas->vm_end || start < vas->vm_start) {
- err = -EINVAL;
- goto err_out;
- }
-
- if (!vma_is_anonymous(vas)) {
- err = -EBUSY;
- goto err_out;
- }
-
- buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
- sizeof(*pages), GFP_KERNEL);
- if (!buf) {
- err = -ENOMEM;
- goto err_out;
- }
- dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
- pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
-
- zdd = drm_gpusvm_zdd_alloc(gpusvm->device_private_page_owner);
- if (!zdd) {
- err = -ENOMEM;
- goto err_free;
- }
-
- migrate.vma = vas;
- migrate.src = buf;
- migrate.dst = migrate.src + npages;
-
- err = migrate_vma_setup(&migrate);
- if (err)
- goto err_free;
-
- if (!migrate.cpages) {
- err = -EFAULT;
- goto err_free;
- }
-
- if (migrate.cpages != npages) {
- err = -EBUSY;
- goto err_finalize;
- }
-
- err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
- if (err)
- goto err_finalize;
-
- err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr,
- migrate.src, npages, DMA_TO_DEVICE);
- if (err)
- goto err_finalize;
-
- for (i = 0; i < npages; ++i) {
- struct page *page = pfn_to_page(migrate.dst[i]);
-
- pages[i] = page;
- migrate.dst[i] = migrate_pfn(migrate.dst[i]);
- drm_gpusvm_get_devmem_page(page, zdd);
- }
-
- err = ops->copy_to_devmem(pages, dma_addr, npages);
- if (err)
- goto err_finalize;
-
- /* Upon success bind devmem allocation to range and zdd */
- devmem_allocation->timeslice_expiration = get_jiffies_64() +
- msecs_to_jiffies(ctx->timeslice_ms);
- zdd->devmem_allocation = devmem_allocation; /* Owns ref */
-
-err_finalize:
- if (err)
- drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst);
- migrate_vma_pages(&migrate);
- migrate_vma_finalize(&migrate);
- drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
- DMA_TO_DEVICE);
-err_free:
- if (zdd)
- drm_gpusvm_zdd_put(zdd);
- kvfree(buf);
-err_out:
- return err;
-}
-EXPORT_SYMBOL_GPL(drm_gpusvm_migrate_to_devmem);
-
-/**
- * drm_gpusvm_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area
- * @vas: Pointer to the VM area structure, can be NULL
- * @fault_page: Fault page
- * @npages: Number of pages to populate
- * @mpages: Number of pages to migrate
- * @src_mpfn: Source array of migrate PFNs
- * @mpfn: Array of migrate PFNs to populate
- * @addr: Start address for PFN allocation
- *
- * This function populates the RAM migrate page frame numbers (PFNs) for the
- * specified VM area structure. It allocates and locks pages in the VM area for
- * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use
- * alloc_page for allocation.
- *
- * Return: 0 on success, negative error code on failure.
- */
-static int drm_gpusvm_migrate_populate_ram_pfn(struct vm_area_struct *vas,
- struct page *fault_page,
- unsigned long npages,
- unsigned long *mpages,
- unsigned long *src_mpfn,
- unsigned long *mpfn,
- unsigned long addr)
-{
- unsigned long i;
-
- for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
- struct page *page, *src_page;
-
- if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
- continue;
-
- src_page = migrate_pfn_to_page(src_mpfn[i]);
- if (!src_page)
- continue;
-
- if (fault_page) {
- if (src_page->zone_device_data !=
- fault_page->zone_device_data)
- continue;
- }
-
- if (vas)
- page = alloc_page_vma(GFP_HIGHUSER, vas, addr);
- else
- page = alloc_page(GFP_HIGHUSER);
-
- if (!page)
- goto free_pages;
-
- mpfn[i] = migrate_pfn(page_to_pfn(page));
- }
-
- for (i = 0; i < npages; ++i) {
- struct page *page = migrate_pfn_to_page(mpfn[i]);
-
- if (!page)
- continue;
-
- WARN_ON_ONCE(!trylock_page(page));
- ++*mpages;
- }
-
- return 0;
-
-free_pages:
- for (i = 0; i < npages; ++i) {
- struct page *page = migrate_pfn_to_page(mpfn[i]);
-
- if (!page)
- continue;
-
- put_page(page);
- mpfn[i] = 0;
- }
- return -ENOMEM;
-}
-
-/**
- * drm_gpusvm_evict_to_ram() - Evict GPU SVM range to RAM
- * @devmem_allocation: Pointer to the device memory allocation
- *
- * Similar to __drm_gpusvm_migrate_to_ram but does not require mmap lock and
- * migration done via migrate_device_* functions.
- *
- * Return: 0 on success, negative error code on failure.
- */
-int drm_gpusvm_evict_to_ram(struct drm_gpusvm_devmem *devmem_allocation)
-{
- const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops;
- unsigned long npages, mpages = 0;
- struct page **pages;
- unsigned long *src, *dst;
- dma_addr_t *dma_addr;
- void *buf;
- int i, err = 0;
- unsigned int retry_count = 2;
-
- npages = devmem_allocation->size >> PAGE_SHIFT;
-
-retry:
- if (!mmget_not_zero(devmem_allocation->mm))
- return -EFAULT;
-
- buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) +
- sizeof(*pages), GFP_KERNEL);
- if (!buf) {
- err = -ENOMEM;
- goto err_out;
- }
- src = buf;
- dst = buf + (sizeof(*src) * npages);
- dma_addr = buf + (2 * sizeof(*src) * npages);
- pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages;
-
- err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
- if (err)
- goto err_free;
-
- err = migrate_device_pfns(src, npages);
- if (err)
- goto err_free;
-
- err = drm_gpusvm_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages,
- src, dst, 0);
- if (err || !mpages)
- goto err_finalize;
-
- err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr,
- dst, npages, DMA_FROM_DEVICE);
- if (err)
- goto err_finalize;
-
- for (i = 0; i < npages; ++i)
- pages[i] = migrate_pfn_to_page(src[i]);
-
- err = ops->copy_to_ram(pages, dma_addr, npages);
- if (err)
- goto err_finalize;
-
-err_finalize:
- if (err)
- drm_gpusvm_migration_unlock_put_pages(npages, dst);
- migrate_device_pages(src, dst, npages);
- migrate_device_finalize(src, dst, npages);
- drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
- DMA_FROM_DEVICE);
-err_free:
- kvfree(buf);
-err_out:
- mmput_async(devmem_allocation->mm);
-
- if (completion_done(&devmem_allocation->detached))
- return 0;
-
- if (retry_count--) {
- cond_resched();
- goto retry;
- }
-
- return err ?: -EBUSY;
-}
-EXPORT_SYMBOL_GPL(drm_gpusvm_evict_to_ram);
-
-/**
- * __drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (internal)
- * @vas: Pointer to the VM area structure
- * @device_private_page_owner: Device private pages owner
- * @page: Pointer to the page for fault handling (can be NULL)
- * @fault_addr: Fault address
- * @size: Size of migration
- *
- * This internal function performs the migration of the specified GPU SVM range
- * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and
- * invokes the driver-specific operations for migration to RAM.
- *
- * Return: 0 on success, negative error code on failure.
- */
-static int __drm_gpusvm_migrate_to_ram(struct vm_area_struct *vas,
- void *device_private_page_owner,
- struct page *page,
- unsigned long fault_addr,
- unsigned long size)
-{
- struct migrate_vma migrate = {
- .vma = vas,
- .pgmap_owner = device_private_page_owner,
- .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
- MIGRATE_VMA_SELECT_DEVICE_COHERENT,
- .fault_page = page,
- };
- struct drm_gpusvm_zdd *zdd;
- const struct drm_gpusvm_devmem_ops *ops;
- struct device *dev = NULL;
- unsigned long npages, mpages = 0;
- struct page **pages;
- dma_addr_t *dma_addr;
- unsigned long start, end;
- void *buf;
- int i, err = 0;
-
- if (page) {
- zdd = page->zone_device_data;
- if (time_before64(get_jiffies_64(),
- zdd->devmem_allocation->timeslice_expiration))
- return 0;
- }
-
- start = ALIGN_DOWN(fault_addr, size);
- end = ALIGN(fault_addr + 1, size);
-
- /* Corner where VMA area struct has been partially unmapped */
- if (start < vas->vm_start)
- start = vas->vm_start;
- if (end > vas->vm_end)
- end = vas->vm_end;
-
- migrate.start = start;
- migrate.end = end;
- npages = npages_in_range(start, end);
-
- buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
- sizeof(*pages), GFP_KERNEL);
- if (!buf) {
- err = -ENOMEM;
- goto err_out;
- }
- dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
- pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
-
- migrate.vma = vas;
- migrate.src = buf;
- migrate.dst = migrate.src + npages;
-
- err = migrate_vma_setup(&migrate);
- if (err)
- goto err_free;
-
- /* Raced with another CPU fault, nothing to do */
- if (!migrate.cpages)
- goto err_free;
-
- if (!page) {
- for (i = 0; i < npages; ++i) {
- if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE))
- continue;
-
- page = migrate_pfn_to_page(migrate.src[i]);
- break;
- }
-
- if (!page)
- goto err_finalize;
- }
- zdd = page->zone_device_data;
- ops = zdd->devmem_allocation->ops;
- dev = zdd->devmem_allocation->dev;
-
- err = drm_gpusvm_migrate_populate_ram_pfn(vas, page, npages, &mpages,
- migrate.src, migrate.dst,
- start);
- if (err)
- goto err_finalize;
-
- err = drm_gpusvm_migrate_map_pages(dev, dma_addr, migrate.dst, npages,
- DMA_FROM_DEVICE);
- if (err)
- goto err_finalize;
-
- for (i = 0; i < npages; ++i)
- pages[i] = migrate_pfn_to_page(migrate.src[i]);
-
- err = ops->copy_to_ram(pages, dma_addr, npages);
- if (err)
- goto err_finalize;
-
-err_finalize:
- if (err)
- drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst);
- migrate_vma_pages(&migrate);
- migrate_vma_finalize(&migrate);
- if (dev)
- drm_gpusvm_migrate_unmap_pages(dev, dma_addr, npages,
- DMA_FROM_DEVICE);
-err_free:
- kvfree(buf);
-err_out:
-
- return err;
-}
-
-/**
- * drm_gpusvm_range_evict - Evict GPU SVM range
* @range: Pointer to the GPU SVM range to be removed
*
- * This function evicts the specified GPU SVM range. This function will not
- * evict coherent pages.
+ * This function evicts the specified GPU SVM range.
*
* Return: 0 on success, a negative error code on failure.
*/
@@ -2149,60 +1525,6 @@ int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm,
EXPORT_SYMBOL_GPL(drm_gpusvm_range_evict);
/**
- * drm_gpusvm_page_free() - Put GPU SVM zone device data associated with a page
- * @page: Pointer to the page
- *
- * This function is a callback used to put the GPU SVM zone device data
- * associated with a page when it is being released.
- */
-static void drm_gpusvm_page_free(struct page *page)
-{
- drm_gpusvm_zdd_put(page->zone_device_data);
-}
-
-/**
- * drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (page fault handler)
- * @vmf: Pointer to the fault information structure
- *
- * This function is a page fault handler used to migrate a GPU SVM range to RAM.
- * It retrieves the GPU SVM range information from the faulting page and invokes
- * the internal migration function to migrate the range back to RAM.
- *
- * Return: VM_FAULT_SIGBUS on failure, 0 on success.
- */
-static vm_fault_t drm_gpusvm_migrate_to_ram(struct vm_fault *vmf)
-{
- struct drm_gpusvm_zdd *zdd = vmf->page->zone_device_data;
- int err;
-
- err = __drm_gpusvm_migrate_to_ram(vmf->vma,
- zdd->device_private_page_owner,
- vmf->page, vmf->address,
- zdd->devmem_allocation->size);
-
- return err ? VM_FAULT_SIGBUS : 0;
-}
-
-/*
- * drm_gpusvm_pagemap_ops - Device page map operations for GPU SVM
- */
-static const struct dev_pagemap_ops drm_gpusvm_pagemap_ops = {
- .page_free = drm_gpusvm_page_free,
- .migrate_to_ram = drm_gpusvm_migrate_to_ram,
-};
-
-/**
- * drm_gpusvm_pagemap_ops_get() - Retrieve GPU SVM device page map operations
- *
- * Return: Pointer to the GPU SVM device page map operations structure.
- */
-const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void)
-{
- return &drm_gpusvm_pagemap_ops;
-}
-EXPORT_SYMBOL_GPL(drm_gpusvm_pagemap_ops_get);
-
-/**
* drm_gpusvm_has_mapping() - Check if GPU SVM has mapping for the given address range
* @gpusvm: Pointer to the GPU SVM structure.
* @start: Start address
@@ -2246,28 +1568,5 @@ void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_set_unmapped);
-/**
- * drm_gpusvm_devmem_init() - Initialize a GPU SVM device memory allocation
- *
- * @dev: Pointer to the device structure which device memory allocation belongs to
- * @mm: Pointer to the mm_struct for the address space
- * @ops: Pointer to the operations structure for GPU SVM device memory
- * @dpagemap: The struct drm_pagemap we're allocating from.
- * @size: Size of device memory allocation
- */
-void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem *devmem_allocation,
- struct device *dev, struct mm_struct *mm,
- const struct drm_gpusvm_devmem_ops *ops,
- struct drm_pagemap *dpagemap, size_t size)
-{
- init_completion(&devmem_allocation->detached);
- devmem_allocation->dev = dev;
- devmem_allocation->mm = mm;
- devmem_allocation->ops = ops;
- devmem_allocation->dpagemap = dpagemap;
- devmem_allocation->size = size;
-}
-EXPORT_SYMBOL_GPL(drm_gpusvm_devmem_init);
-
MODULE_DESCRIPTION("DRM GPUSVM");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index f9eb56f24bef..bbc7fecb6f4a 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -27,6 +27,7 @@
#include <drm/drm_gpuvm.h>
+#include <linux/export.h>
#include <linux/interval_tree_generic.h>
#include <linux/mm.h>
@@ -2299,13 +2300,13 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
}
/**
- * drm_gpuvm_sm_map() - creates the &drm_gpuva_op split/merge steps
+ * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
* @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @priv: pointer to a driver private data structure
* @req_addr: the start address of the new mapping
* @req_range: the range of the new mapping
* @req_obj: the &drm_gem_object to map
* @req_offset: the offset within the &drm_gem_object
- * @priv: pointer to a driver private data structure
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the split and merge
@@ -2349,7 +2350,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
/**
- * drm_gpuvm_sm_unmap() - creates the &drm_gpuva_ops to split on unmap
+ * drm_gpuvm_sm_unmap() - calls the &drm_gpuva_ops to split on unmap
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @priv: pointer to a driver private data structure
* @req_addr: the start address of the range to unmap
@@ -2390,6 +2391,132 @@ drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap);
+static int
+drm_gpuva_sm_step_lock(struct drm_gpuva_op *op, void *priv)
+{
+ struct drm_exec *exec = priv;
+
+ switch (op->op) {
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.unmap->va->gem.obj)
+ return drm_exec_lock_obj(exec, op->remap.unmap->va->gem.obj);
+ return 0;
+ case DRM_GPUVA_OP_UNMAP:
+ if (op->unmap.va->gem.obj)
+ return drm_exec_lock_obj(exec, op->unmap.va->gem.obj);
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static const struct drm_gpuvm_ops lock_ops = {
+ .sm_step_map = drm_gpuva_sm_step_lock,
+ .sm_step_remap = drm_gpuva_sm_step_lock,
+ .sm_step_unmap = drm_gpuva_sm_step_lock,
+};
+
+/**
+ * drm_gpuvm_sm_map_exec_lock() - locks the objects touched by a drm_gpuvm_sm_map()
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @exec: the &drm_exec locking context
+ * @num_fences: for newly mapped objects, the # of fences to reserve
+ * @req_addr: the start address of the range to unmap
+ * @req_range: the range of the mappings to unmap
+ * @req_obj: the &drm_gem_object to map
+ * @req_offset: the offset within the &drm_gem_object
+ *
+ * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
+ * remapped, and locks+prepares (drm_exec_prepare_object()) objects that
+ * will be newly mapped.
+ *
+ * The expected usage is:
+ *
+ * vm_bind {
+ * struct drm_exec exec;
+ *
+ * // IGNORE_DUPLICATES is required, INTERRUPTIBLE_WAIT is recommended:
+ * drm_exec_init(&exec, IGNORE_DUPLICATES | INTERRUPTIBLE_WAIT, 0);
+ *
+ * drm_exec_until_all_locked (&exec) {
+ * for_each_vm_bind_operation {
+ * switch (op->op) {
+ * case DRIVER_OP_UNMAP:
+ * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
+ * break;
+ * case DRIVER_OP_MAP:
+ * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
+ * op->addr, op->range,
+ * obj, op->obj_offset);
+ * break;
+ * }
+ *
+ * drm_exec_retry_on_contention(&exec);
+ * if (ret)
+ * return ret;
+ * }
+ * }
+ * }
+ *
+ * This enables all locking to be performed before the driver begins modifying
+ * the VM. This is safe to do in the case of overlapping DRIVER_VM_BIND_OPs,
+ * where an earlier op can alter the sequence of steps generated for a later
+ * op, because the later altered step will involve the same GEM object(s)
+ * already seen in the earlier locking step. For example:
+ *
+ * 1) An earlier driver DRIVER_OP_UNMAP op removes the need for a
+ * DRM_GPUVA_OP_REMAP/UNMAP step. This is safe because we've already
+ * locked the GEM object in the earlier DRIVER_OP_UNMAP op.
+ *
+ * 2) An earlier DRIVER_OP_MAP op overlaps with a later DRIVER_OP_MAP/UNMAP
+ * op, introducing a DRM_GPUVA_OP_REMAP/UNMAP that wouldn't have been
+ * required without the earlier DRIVER_OP_MAP. This is safe because we've
+ * already locked the GEM object in the earlier DRIVER_OP_MAP step.
+ *
+ * Returns: 0 on success or a negative error codec
+ */
+int
+drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec, unsigned int num_fences,
+ u64 req_addr, u64 req_range,
+ struct drm_gem_object *req_obj, u64 req_offset)
+{
+ if (req_obj) {
+ int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
+ if (ret)
+ return ret;
+ }
+
+ return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
+ req_addr, req_range,
+ req_obj, req_offset);
+
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
+
+/**
+ * drm_gpuvm_sm_unmap_exec_lock() - locks the objects touched by drm_gpuvm_sm_unmap()
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @exec: the &drm_exec locking context
+ * @req_addr: the start address of the range to unmap
+ * @req_range: the range of the mappings to unmap
+ *
+ * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
+ * remapped by drm_gpuvm_sm_unmap().
+ *
+ * See drm_gpuvm_sm_map_exec_lock() for expected usage.
+ *
+ * Returns: 0 on success or a negative error code
+ */
+int
+drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
+ u64 req_addr, u64 req_range)
+{
+ return __drm_gpuvm_sm_unmap(gpuvm, &lock_ops, exec,
+ req_addr, req_range);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_exec_lock);
+
static struct drm_gpuva_op *
gpuva_op_alloc(struct drm_gpuvm *gpuvm)
{
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 60c282881958..e79c3c623c9a 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -177,10 +177,6 @@ void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj);
-int drm_gem_pin_locked(struct drm_gem_object *obj);
-void drm_gem_unpin_locked(struct drm_gem_object *obj);
-int drm_gem_pin(struct drm_gem_object *obj);
-void drm_gem_unpin(struct drm_gem_object *obj);
int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
@@ -188,8 +184,7 @@ void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
#if defined(CONFIG_DEBUG_FS)
void drm_debugfs_dev_fini(struct drm_device *dev);
void drm_debugfs_dev_register(struct drm_device *dev);
-int drm_debugfs_register(struct drm_minor *minor, int minor_id,
- struct dentry *root);
+int drm_debugfs_register(struct drm_minor *minor, int minor_id);
void drm_debugfs_unregister(struct drm_minor *minor);
void drm_debugfs_connector_add(struct drm_connector *connector);
void drm_debugfs_connector_remove(struct drm_connector *connector);
@@ -207,8 +202,7 @@ static inline void drm_debugfs_dev_register(struct drm_device *dev)
{
}
-static inline int drm_debugfs_register(struct drm_minor *minor, int minor_id,
- struct dentry *root)
+static inline int drm_debugfs_register(struct drm_minor *minor, int minor_id)
{
return 0;
}
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
index cc4c463daae7..247f468731de 100644
--- a/drivers/gpu/drm/drm_managed.c
+++ b/drivers/gpu/drm/drm_managed.c
@@ -7,6 +7,7 @@
#include <drm/drm_managed.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index a4cd476f9b30..e33c78fc8fbd 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -8,6 +8,7 @@
#include <linux/backlight.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
@@ -229,7 +230,13 @@ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *
case DRM_FORMAT_XRGB8888:
switch (dbidev->pixel_format) {
case DRM_FORMAT_RGB565:
- drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, fmtcnv_state, swap);
+ if (swap) {
+ drm_fb_xrgb8888_to_rgb565be(&dst_map, NULL, src, fb, clip,
+ fmtcnv_state);
+ } else {
+ drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip,
+ fmtcnv_state);
+ }
break;
case DRM_FORMAT_RGB888:
drm_fb_xrgb8888_to_rgb888(&dst_map, NULL, src, fb, clip, fmtcnv_state);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 21fd647f8ce1..3a9b3278a6e3 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -26,6 +26,7 @@
*/
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index b4239fd04e9d..25f376869b3a 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/uaccess.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 5565464c1734..988735560570 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -20,6 +20,8 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client_event.h>
#include <drm/drm_fourcc.h>
@@ -72,6 +74,7 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
* drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
* @dev: DRM device
* @fb: drm_framebuffer object to fill out
+ * @info: pixel format information
* @mode_cmd: metadata from the userspace fb creation request
*
* This helper can be used in a drivers fb_create callback to pre-fill the fb's
@@ -79,12 +82,13 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
*/
void drm_helper_mode_fill_fb_struct(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
int i;
fb->dev = dev;
- fb->format = drm_get_format_info(dev, mode_cmd);
+ fb->format = info;
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
for (i = 0; i < 4; i++) {
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 7694b85e75e3..beb91a13a312 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -21,6 +21,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
new file mode 100644
index 000000000000..1da55322af12
--- /dev/null
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -0,0 +1,838 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright © 2024-2025 Intel Corporation
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/migrate.h>
+#include <linux/pagemap.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_pagemap.h>
+
+/**
+ * DOC: Overview
+ *
+ * The DRM pagemap layer is intended to augment the dev_pagemap functionality by
+ * providing a way to populate a struct mm_struct virtual range with device
+ * private pages and to provide helpers to abstract device memory allocations,
+ * to migrate memory back and forth between device memory and system RAM and
+ * to handle access (and in the future migration) between devices implementing
+ * a fast interconnect that is not necessarily visible to the rest of the
+ * system.
+ *
+ * Typically the DRM pagemap receives requests from one or more DRM GPU SVM
+ * instances to populate struct mm_struct virtual ranges with memory, and the
+ * migration is best effort only and may thus fail. The implementation should
+ * also handle device unbinding by blocking (return an -ENODEV) error for new
+ * population requests and after that migrate all device pages to system ram.
+ */
+
+/**
+ * DOC: Migration
+ *
+ * Migration granularity typically follows the GPU SVM range requests, but
+ * if there are clashes, due to races or due to the fact that multiple GPU
+ * SVM instances have different views of the ranges used, and because of that
+ * parts of a requested range is already present in the requested device memory,
+ * the implementation has a variety of options. It can fail and it can choose
+ * to populate only the part of the range that isn't already in device memory,
+ * and it can evict the range to system before trying to migrate. Ideally an
+ * implementation would just try to migrate the missing part of the range and
+ * allocate just enough memory to do so.
+ *
+ * When migrating to system memory as a response to a cpu fault or a device
+ * memory eviction request, currently a full device memory allocation is
+ * migrated back to system. Moving forward this might need improvement for
+ * situations where a single page needs bouncing between system memory and
+ * device memory due to, for example, atomic operations.
+ *
+ * Key DRM pagemap components:
+ *
+ * - Device Memory Allocations:
+ * Embedded structure containing enough information for the drm_pagemap to
+ * migrate to / from device memory.
+ *
+ * - Device Memory Operations:
+ * Define the interface for driver-specific device memory operations
+ * release memory, populate pfns, and copy to / from device memory.
+ */
+
+/**
+ * struct drm_pagemap_zdd - GPU SVM zone device data
+ *
+ * @refcount: Reference count for the zdd
+ * @devmem_allocation: device memory allocation
+ * @device_private_page_owner: Device private pages owner
+ *
+ * This structure serves as a generic wrapper installed in
+ * page->zone_device_data. It provides infrastructure for looking up a device
+ * memory allocation upon CPU page fault and asynchronously releasing device
+ * memory once the CPU has no page references. Asynchronous release is useful
+ * because CPU page references can be dropped in IRQ contexts, while releasing
+ * device memory likely requires sleeping locks.
+ */
+struct drm_pagemap_zdd {
+ struct kref refcount;
+ struct drm_pagemap_devmem *devmem_allocation;
+ void *device_private_page_owner;
+};
+
+/**
+ * drm_pagemap_zdd_alloc() - Allocate a zdd structure.
+ * @device_private_page_owner: Device private pages owner
+ *
+ * This function allocates and initializes a new zdd structure. It sets up the
+ * reference count and initializes the destroy work.
+ *
+ * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure.
+ */
+static struct drm_pagemap_zdd *
+drm_pagemap_zdd_alloc(void *device_private_page_owner)
+{
+ struct drm_pagemap_zdd *zdd;
+
+ zdd = kmalloc(sizeof(*zdd), GFP_KERNEL);
+ if (!zdd)
+ return NULL;
+
+ kref_init(&zdd->refcount);
+ zdd->devmem_allocation = NULL;
+ zdd->device_private_page_owner = device_private_page_owner;
+
+ return zdd;
+}
+
+/**
+ * drm_pagemap_zdd_get() - Get a reference to a zdd structure.
+ * @zdd: Pointer to the zdd structure.
+ *
+ * This function increments the reference count of the provided zdd structure.
+ *
+ * Return: Pointer to the zdd structure.
+ */
+static struct drm_pagemap_zdd *drm_pagemap_zdd_get(struct drm_pagemap_zdd *zdd)
+{
+ kref_get(&zdd->refcount);
+ return zdd;
+}
+
+/**
+ * drm_pagemap_zdd_destroy() - Destroy a zdd structure.
+ * @ref: Pointer to the reference count structure.
+ *
+ * This function queues the destroy_work of the zdd for asynchronous destruction.
+ */
+static void drm_pagemap_zdd_destroy(struct kref *ref)
+{
+ struct drm_pagemap_zdd *zdd =
+ container_of(ref, struct drm_pagemap_zdd, refcount);
+ struct drm_pagemap_devmem *devmem = zdd->devmem_allocation;
+
+ if (devmem) {
+ complete_all(&devmem->detached);
+ if (devmem->ops->devmem_release)
+ devmem->ops->devmem_release(devmem);
+ }
+ kfree(zdd);
+}
+
+/**
+ * drm_pagemap_zdd_put() - Put a zdd reference.
+ * @zdd: Pointer to the zdd structure.
+ *
+ * This function decrements the reference count of the provided zdd structure
+ * and schedules its destruction if the count drops to zero.
+ */
+static void drm_pagemap_zdd_put(struct drm_pagemap_zdd *zdd)
+{
+ kref_put(&zdd->refcount, drm_pagemap_zdd_destroy);
+}
+
+/**
+ * drm_pagemap_migration_unlock_put_page() - Put a migration page
+ * @page: Pointer to the page to put
+ *
+ * This function unlocks and puts a page.
+ */
+static void drm_pagemap_migration_unlock_put_page(struct page *page)
+{
+ unlock_page(page);
+ put_page(page);
+}
+
+/**
+ * drm_pagemap_migration_unlock_put_pages() - Put migration pages
+ * @npages: Number of pages
+ * @migrate_pfn: Array of migrate page frame numbers
+ *
+ * This function unlocks and puts an array of pages.
+ */
+static void drm_pagemap_migration_unlock_put_pages(unsigned long npages,
+ unsigned long *migrate_pfn)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page;
+
+ if (!migrate_pfn[i])
+ continue;
+
+ page = migrate_pfn_to_page(migrate_pfn[i]);
+ drm_pagemap_migration_unlock_put_page(page);
+ migrate_pfn[i] = 0;
+ }
+}
+
+/**
+ * drm_pagemap_get_devmem_page() - Get a reference to a device memory page
+ * @page: Pointer to the page
+ * @zdd: Pointer to the GPU SVM zone device data
+ *
+ * This function associates the given page with the specified GPU SVM zone
+ * device data and initializes it for zone device usage.
+ */
+static void drm_pagemap_get_devmem_page(struct page *page,
+ struct drm_pagemap_zdd *zdd)
+{
+ page->zone_device_data = drm_pagemap_zdd_get(zdd);
+ zone_device_page_init(page);
+}
+
+/**
+ * drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration
+ * @dev: The device for which the pages are being mapped
+ * @dma_addr: Array to store DMA addresses corresponding to mapped pages
+ * @migrate_pfn: Array of migrate page frame numbers to map
+ * @npages: Number of pages to map
+ * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
+ *
+ * This function maps pages of memory for migration usage in GPU SVM. It
+ * iterates over each page frame number provided in @migrate_pfn, maps the
+ * corresponding page, and stores the DMA address in the provided @dma_addr
+ * array.
+ *
+ * Returns: 0 on success, -EFAULT if an error occurs during mapping.
+ */
+static int drm_pagemap_migrate_map_pages(struct device *dev,
+ dma_addr_t *dma_addr,
+ unsigned long *migrate_pfn,
+ unsigned long npages,
+ enum dma_data_direction dir)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
+
+ if (!page)
+ continue;
+
+ if (WARN_ON_ONCE(is_zone_device_page(page)))
+ return -EFAULT;
+
+ dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
+ if (dma_mapping_error(dev, dma_addr[i]))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
+ * @dev: The device for which the pages were mapped
+ * @dma_addr: Array of DMA addresses corresponding to mapped pages
+ * @npages: Number of pages to unmap
+ * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
+ *
+ * This function unmaps previously mapped pages of memory for GPU Shared Virtual
+ * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks
+ * if it's valid and not already unmapped, and unmaps the corresponding page.
+ */
+static void drm_pagemap_migrate_unmap_pages(struct device *dev,
+ dma_addr_t *dma_addr,
+ unsigned long npages,
+ enum dma_data_direction dir)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
+ continue;
+
+ dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
+ }
+}
+
+static unsigned long
+npages_in_range(unsigned long start, unsigned long end)
+{
+ return (end - start) >> PAGE_SHIFT;
+}
+
+/**
+ * drm_pagemap_migrate_to_devmem() - Migrate a struct mm_struct range to device memory
+ * @devmem_allocation: The device memory allocation to migrate to.
+ * The caller should hold a reference to the device memory allocation,
+ * and the reference is consumed by this function unless it returns with
+ * an error.
+ * @mm: Pointer to the struct mm_struct.
+ * @start: Start of the virtual address range to migrate.
+ * @end: End of the virtual address range to migrate.
+ * @timeslice_ms: The time requested for the migrated pagemap pages to
+ * be present in @mm before being allowed to be migrated back.
+ * @pgmap_owner: Not used currently, since only system memory is considered.
+ *
+ * This function migrates the specified virtual address range to device memory.
+ * It performs the necessary setup and invokes the driver-specific operations for
+ * migration to device memory. Expected to be called while holding the mmap lock in
+ * at least read mode.
+ *
+ * Note: The @timeslice_ms parameter can typically be used to force data to
+ * remain in pagemap pages long enough for a GPU to perform a task and to prevent
+ * a migration livelock. One alternative would be for the GPU driver to block
+ * in a mmu_notifier for the specified amount of time, but adding the
+ * functionality to the pagemap is likely nicer to the system as a whole.
+ *
+ * Return: %0 on success, negative error code on failure.
+ */
+int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ unsigned long timeslice_ms,
+ void *pgmap_owner)
+{
+ const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
+ struct migrate_vma migrate = {
+ .start = start,
+ .end = end,
+ .pgmap_owner = pgmap_owner,
+ .flags = MIGRATE_VMA_SELECT_SYSTEM,
+ };
+ unsigned long i, npages = npages_in_range(start, end);
+ struct vm_area_struct *vas;
+ struct drm_pagemap_zdd *zdd = NULL;
+ struct page **pages;
+ dma_addr_t *dma_addr;
+ void *buf;
+ int err;
+
+ mmap_assert_locked(mm);
+
+ if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
+ !ops->copy_to_ram)
+ return -EOPNOTSUPP;
+
+ vas = vma_lookup(mm, start);
+ if (!vas) {
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ if (end > vas->vm_end || start < vas->vm_start) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (!vma_is_anonymous(vas)) {
+ err = -EBUSY;
+ goto err_out;
+ }
+
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+
+ zdd = drm_pagemap_zdd_alloc(pgmap_owner);
+ if (!zdd) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ migrate.vma = vas;
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+
+ err = migrate_vma_setup(&migrate);
+ if (err)
+ goto err_free;
+
+ if (!migrate.cpages) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ if (migrate.cpages != npages) {
+ err = -EBUSY;
+ goto err_finalize;
+ }
+
+ err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
+ if (err)
+ goto err_finalize;
+
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ migrate.src, npages, DMA_TO_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = pfn_to_page(migrate.dst[i]);
+
+ pages[i] = page;
+ migrate.dst[i] = migrate_pfn(migrate.dst[i]);
+ drm_pagemap_get_devmem_page(page, zdd);
+ }
+
+ err = ops->copy_to_devmem(pages, dma_addr, npages);
+ if (err)
+ goto err_finalize;
+
+ /* Upon success bind devmem allocation to range and zdd */
+ devmem_allocation->timeslice_expiration = get_jiffies_64() +
+ msecs_to_jiffies(timeslice_ms);
+ zdd->devmem_allocation = devmem_allocation; /* Owns ref */
+
+err_finalize:
+ if (err)
+ drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
+ migrate_vma_pages(&migrate);
+ migrate_vma_finalize(&migrate);
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ DMA_TO_DEVICE);
+err_free:
+ if (zdd)
+ drm_pagemap_zdd_put(zdd);
+ kvfree(buf);
+err_out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_migrate_to_devmem);
+
+/**
+ * drm_pagemap_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area
+ * @vas: Pointer to the VM area structure, can be NULL
+ * @fault_page: Fault page
+ * @npages: Number of pages to populate
+ * @mpages: Number of pages to migrate
+ * @src_mpfn: Source array of migrate PFNs
+ * @mpfn: Array of migrate PFNs to populate
+ * @addr: Start address for PFN allocation
+ *
+ * This function populates the RAM migrate page frame numbers (PFNs) for the
+ * specified VM area structure. It allocates and locks pages in the VM area for
+ * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use
+ * alloc_page for allocation.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas,
+ struct page *fault_page,
+ unsigned long npages,
+ unsigned long *mpages,
+ unsigned long *src_mpfn,
+ unsigned long *mpfn,
+ unsigned long addr)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
+ struct page *page, *src_page;
+
+ if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ src_page = migrate_pfn_to_page(src_mpfn[i]);
+ if (!src_page)
+ continue;
+
+ if (fault_page) {
+ if (src_page->zone_device_data !=
+ fault_page->zone_device_data)
+ continue;
+ }
+
+ if (vas)
+ page = alloc_page_vma(GFP_HIGHUSER, vas, addr);
+ else
+ page = alloc_page(GFP_HIGHUSER);
+
+ if (!page)
+ goto free_pages;
+
+ mpfn[i] = migrate_pfn(page_to_pfn(page));
+ }
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = migrate_pfn_to_page(mpfn[i]);
+
+ if (!page)
+ continue;
+
+ WARN_ON_ONCE(!trylock_page(page));
+ ++*mpages;
+ }
+
+ return 0;
+
+free_pages:
+ for (i = 0; i < npages; ++i) {
+ struct page *page = migrate_pfn_to_page(mpfn[i]);
+
+ if (!page)
+ continue;
+
+ put_page(page);
+ mpfn[i] = 0;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * drm_pagemap_evict_to_ram() - Evict GPU SVM range to RAM
+ * @devmem_allocation: Pointer to the device memory allocation
+ *
+ * Similar to __drm_pagemap_migrate_to_ram but does not require mmap lock and
+ * migration done via migrate_device_* functions.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
+{
+ const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
+ unsigned long npages, mpages = 0;
+ struct page **pages;
+ unsigned long *src, *dst;
+ dma_addr_t *dma_addr;
+ void *buf;
+ int i, err = 0;
+ unsigned int retry_count = 2;
+
+ npages = devmem_allocation->size >> PAGE_SHIFT;
+
+retry:
+ if (!mmget_not_zero(devmem_allocation->mm))
+ return -EFAULT;
+
+ buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ src = buf;
+ dst = buf + (sizeof(*src) * npages);
+ dma_addr = buf + (2 * sizeof(*src) * npages);
+ pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages;
+
+ err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
+ if (err)
+ goto err_free;
+
+ err = migrate_device_pfns(src, npages);
+ if (err)
+ goto err_free;
+
+ err = drm_pagemap_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages,
+ src, dst, 0);
+ if (err || !mpages)
+ goto err_finalize;
+
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ dst, npages, DMA_FROM_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i)
+ pages[i] = migrate_pfn_to_page(src[i]);
+
+ err = ops->copy_to_ram(pages, dma_addr, npages);
+ if (err)
+ goto err_finalize;
+
+err_finalize:
+ if (err)
+ drm_pagemap_migration_unlock_put_pages(npages, dst);
+ migrate_device_pages(src, dst, npages);
+ migrate_device_finalize(src, dst, npages);
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ DMA_FROM_DEVICE);
+err_free:
+ kvfree(buf);
+err_out:
+ mmput_async(devmem_allocation->mm);
+
+ if (completion_done(&devmem_allocation->detached))
+ return 0;
+
+ if (retry_count--) {
+ cond_resched();
+ goto retry;
+ }
+
+ return err ?: -EBUSY;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_evict_to_ram);
+
+/**
+ * __drm_pagemap_migrate_to_ram() - Migrate GPU SVM range to RAM (internal)
+ * @vas: Pointer to the VM area structure
+ * @device_private_page_owner: Device private pages owner
+ * @page: Pointer to the page for fault handling (can be NULL)
+ * @fault_addr: Fault address
+ * @size: Size of migration
+ *
+ * This internal function performs the migration of the specified GPU SVM range
+ * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and
+ * invokes the driver-specific operations for migration to RAM.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
+ void *device_private_page_owner,
+ struct page *page,
+ unsigned long fault_addr,
+ unsigned long size)
+{
+ struct migrate_vma migrate = {
+ .vma = vas,
+ .pgmap_owner = device_private_page_owner,
+ .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT,
+ .fault_page = page,
+ };
+ struct drm_pagemap_zdd *zdd;
+ const struct drm_pagemap_devmem_ops *ops;
+ struct device *dev = NULL;
+ unsigned long npages, mpages = 0;
+ struct page **pages;
+ dma_addr_t *dma_addr;
+ unsigned long start, end;
+ void *buf;
+ int i, err = 0;
+
+ if (page) {
+ zdd = page->zone_device_data;
+ if (time_before64(get_jiffies_64(),
+ zdd->devmem_allocation->timeslice_expiration))
+ return 0;
+ }
+
+ start = ALIGN_DOWN(fault_addr, size);
+ end = ALIGN(fault_addr + 1, size);
+
+ /* Corner where VMA area struct has been partially unmapped */
+ if (start < vas->vm_start)
+ start = vas->vm_start;
+ if (end > vas->vm_end)
+ end = vas->vm_end;
+
+ migrate.start = start;
+ migrate.end = end;
+ npages = npages_in_range(start, end);
+
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+
+ migrate.vma = vas;
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+
+ err = migrate_vma_setup(&migrate);
+ if (err)
+ goto err_free;
+
+ /* Raced with another CPU fault, nothing to do */
+ if (!migrate.cpages)
+ goto err_free;
+
+ if (!page) {
+ for (i = 0; i < npages; ++i) {
+ if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ page = migrate_pfn_to_page(migrate.src[i]);
+ break;
+ }
+
+ if (!page)
+ goto err_finalize;
+ }
+ zdd = page->zone_device_data;
+ ops = zdd->devmem_allocation->ops;
+ dev = zdd->devmem_allocation->dev;
+
+ err = drm_pagemap_migrate_populate_ram_pfn(vas, page, npages, &mpages,
+ migrate.src, migrate.dst,
+ start);
+ if (err)
+ goto err_finalize;
+
+ err = drm_pagemap_migrate_map_pages(dev, dma_addr, migrate.dst, npages,
+ DMA_FROM_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i)
+ pages[i] = migrate_pfn_to_page(migrate.src[i]);
+
+ err = ops->copy_to_ram(pages, dma_addr, npages);
+ if (err)
+ goto err_finalize;
+
+err_finalize:
+ if (err)
+ drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
+ migrate_vma_pages(&migrate);
+ migrate_vma_finalize(&migrate);
+ if (dev)
+ drm_pagemap_migrate_unmap_pages(dev, dma_addr, npages,
+ DMA_FROM_DEVICE);
+err_free:
+ kvfree(buf);
+err_out:
+
+ return err;
+}
+
+/**
+ * drm_pagemap_page_free() - Put GPU SVM zone device data associated with a page
+ * @page: Pointer to the page
+ *
+ * This function is a callback used to put the GPU SVM zone device data
+ * associated with a page when it is being released.
+ */
+static void drm_pagemap_page_free(struct page *page)
+{
+ drm_pagemap_zdd_put(page->zone_device_data);
+}
+
+/**
+ * drm_pagemap_migrate_to_ram() - Migrate a virtual range to RAM (page fault handler)
+ * @vmf: Pointer to the fault information structure
+ *
+ * This function is a page fault handler used to migrate a virtual range
+ * to ram. The device memory allocation in which the device page is found is
+ * migrated in its entirety.
+ *
+ * Returns:
+ * VM_FAULT_SIGBUS on failure, 0 on success.
+ */
+static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf)
+{
+ struct drm_pagemap_zdd *zdd = vmf->page->zone_device_data;
+ int err;
+
+ err = __drm_pagemap_migrate_to_ram(vmf->vma,
+ zdd->device_private_page_owner,
+ vmf->page, vmf->address,
+ zdd->devmem_allocation->size);
+
+ return err ? VM_FAULT_SIGBUS : 0;
+}
+
+static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = {
+ .page_free = drm_pagemap_page_free,
+ .migrate_to_ram = drm_pagemap_migrate_to_ram,
+};
+
+/**
+ * drm_pagemap_pagemap_ops_get() - Retrieve GPU SVM device page map operations
+ *
+ * Returns:
+ * Pointer to the GPU SVM device page map operations structure.
+ */
+const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void)
+{
+ return &drm_pagemap_pagemap_ops;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get);
+
+/**
+ * drm_pagemap_devmem_init() - Initialize a drm_pagemap device memory allocation
+ *
+ * @devmem_allocation: The struct drm_pagemap_devmem to initialize.
+ * @dev: Pointer to the device structure which device memory allocation belongs to
+ * @mm: Pointer to the mm_struct for the address space
+ * @ops: Pointer to the operations structure for GPU SVM device memory
+ * @dpagemap: The struct drm_pagemap we're allocating from.
+ * @size: Size of device memory allocation
+ */
+void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
+ struct device *dev, struct mm_struct *mm,
+ const struct drm_pagemap_devmem_ops *ops,
+ struct drm_pagemap *dpagemap, size_t size)
+{
+ init_completion(&devmem_allocation->detached);
+ devmem_allocation->dev = dev;
+ devmem_allocation->mm = mm;
+ devmem_allocation->ops = ops;
+ devmem_allocation->dpagemap = dpagemap;
+ devmem_allocation->size = size;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
+
+/**
+ * drm_pagemap_page_to_dpagemap() - Return a pointer the drm_pagemap of a page
+ * @page: The struct page.
+ *
+ * Return: A pointer to the struct drm_pagemap of a device private page that
+ * was populated from the struct drm_pagemap. If the page was *not* populated
+ * from a struct drm_pagemap, the result is undefined and the function call
+ * may result in dereferencing and invalid address.
+ */
+struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
+{
+ struct drm_pagemap_zdd *zdd = page->zone_device_data;
+
+ return zdd->devmem_allocation->dpagemap;
+}
+EXPORT_SYMBOL_GPL(drm_pagemap_page_to_dpagemap);
+
+/**
+ * drm_pagemap_populate_mm() - Populate a virtual range with device memory pages
+ * @dpagemap: Pointer to the drm_pagemap managing the device memory
+ * @start: Start of the virtual range to populate.
+ * @end: End of the virtual range to populate.
+ * @mm: Pointer to the virtual address space.
+ * @timeslice_ms: The time requested for the migrated pagemap pages to
+ * be present in @mm before being allowed to be migrated back.
+ *
+ * Attempt to populate a virtual range with device memory pages,
+ * clearing them or migrating data from the existing pages if necessary.
+ * The function is best effort only, and implementations may vary
+ * in how hard they try to satisfy the request.
+ *
+ * Return: %0 on success, negative error code on error. If the hardware
+ * device was removed / unbound the function will return %-ENODEV.
+ */
+int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms)
+{
+ int err;
+
+ if (!mmget_not_zero(mm))
+ return -EFAULT;
+ mmap_read_lock(mm);
+ err = dpagemap->ops->populate_mm(dpagemap, start, end, mm,
+ timeslice_ms);
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return err;
+}
+EXPORT_SYMBOL(drm_pagemap_populate_mm);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 650de4da0853..c8bb28dccdc1 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -23,6 +23,7 @@
#include <linux/backlight.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -473,13 +474,51 @@ int of_drm_get_panel_orientation(const struct device_node *np,
EXPORT_SYMBOL(of_drm_get_panel_orientation);
#endif
+/* Find panel by fwnode. This should be identical to of_drm_find_panel(). */
+static struct drm_panel *find_panel_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ struct drm_panel *panel;
+
+ if (!fwnode_device_is_available(fwnode))
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&panel_lock);
+
+ list_for_each_entry(panel, &panel_list, list) {
+ if (dev_fwnode(panel->dev) == fwnode) {
+ mutex_unlock(&panel_lock);
+ return panel;
+ }
+ }
+
+ mutex_unlock(&panel_lock);
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+/* Find panel by follower device */
+static struct drm_panel *find_panel_by_dev(struct device *follower_dev)
+{
+ struct fwnode_handle *fwnode;
+ struct drm_panel *panel;
+
+ fwnode = fwnode_find_reference(dev_fwnode(follower_dev), "panel", 0);
+ if (IS_ERR(fwnode))
+ return ERR_PTR(-ENODEV);
+
+ panel = find_panel_by_fwnode(fwnode);
+ fwnode_handle_put(fwnode);
+
+ return panel;
+}
+
/**
* drm_is_panel_follower() - Check if the device is a panel follower
* @dev: The 'struct device' to check
*
* This checks to see if a device needs to be power sequenced together with
* a panel using the panel follower API.
- * At the moment panels can only be followed on device tree enabled systems.
+ *
* The "panel" property of the follower points to the panel to be followed.
*
* Return: true if we should be power sequenced with a panel; false otherwise.
@@ -491,7 +530,7 @@ bool drm_is_panel_follower(struct device *dev)
* don't bother trying to parse it here. We just need to know if the
* property is there.
*/
- return of_property_present(dev->of_node, "panel");
+ return device_property_present(dev, "panel");
}
EXPORT_SYMBOL(drm_is_panel_follower);
@@ -508,7 +547,6 @@ EXPORT_SYMBOL(drm_is_panel_follower);
* If a follower is added to a panel that's already been turned on, the
* follower's prepare callback is called right away.
*
- * At the moment panels can only be followed on device tree enabled systems.
* The "panel" property of the follower points to the panel to be followed.
*
* Return: 0 or an error code. Note that -ENODEV means that we detected that
@@ -518,16 +556,10 @@ EXPORT_SYMBOL(drm_is_panel_follower);
int drm_panel_add_follower(struct device *follower_dev,
struct drm_panel_follower *follower)
{
- struct device_node *panel_np;
struct drm_panel *panel;
int ret;
- panel_np = of_parse_phandle(follower_dev->of_node, "panel", 0);
- if (!panel_np)
- return -ENODEV;
-
- panel = of_drm_find_panel(panel_np);
- of_node_put(panel_np);
+ panel = find_panel_by_dev(follower_dev);
if (IS_ERR(panel))
return PTR_ERR(panel);
diff --git a/drivers/gpu/drm/drm_panel_backlight_quirks.c b/drivers/gpu/drm/drm_panel_backlight_quirks.c
index c477d98ade2b..598f812b7cb3 100644
--- a/drivers/gpu/drm/drm_panel_backlight_quirks.c
+++ b/drivers/gpu/drm/drm_panel_backlight_quirks.c
@@ -2,6 +2,7 @@
#include <linux/array_size.h>
#include <linux/dmi.h>
+#include <linux/export.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 7ac0fd5391fe..3a218fb592ce 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -9,6 +9,7 @@
*/
#include <linux/dmi.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <drm/drm_connector.h>
#include <drm/drm_utils.h>
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index b4de79583805..1d6312fa1429 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -6,6 +6,7 @@
* Tux Ascii art taken from cowsay written by Tony Monroe
*/
+#include <linux/export.h>
#include <linux/font.h>
#include <linux/highmem.h>
#include <linux/init.h>
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index c585f1e8803e..cb0f68d7f8ea 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -23,7 +23,6 @@
*/
#include <linux/dma-mapping.h>
-#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 04992dfd4c79..38f82391bfda 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 7982be4b0306..747d248aaf02 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -23,6 +23,7 @@
* SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/list.h>
#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index a0a5d725eab0..a23fc712a8b7 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -605,6 +605,7 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
+ int ret;
/*
* drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers
@@ -614,7 +615,16 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
!obj->funcs->get_sg_table)
return -ENOSYS;
- return drm_gem_pin(obj);
+ if (!obj->funcs->pin)
+ return 0;
+
+ ret = dma_resv_lock(obj->resv, NULL);
+ if (ret)
+ return ret;
+ ret = obj->funcs->pin(obj);
+ dma_resv_unlock(obj->resv);
+
+ return ret;
}
EXPORT_SYMBOL(drm_gem_map_attach);
@@ -631,8 +641,16 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
+ int ret;
+
+ if (!obj->funcs->unpin)
+ return;
- drm_gem_unpin(obj);
+ ret = dma_resv_lock(obj->resv, NULL);
+ if (drm_WARN_ON(obj->dev, ret))
+ return;
+ obj->funcs->unpin(obj);
+ dma_resv_unlock(obj->resv);
}
EXPORT_SYMBOL(drm_gem_map_detach);
@@ -916,6 +934,26 @@ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
}
EXPORT_SYMBOL(drm_gem_prime_export);
+
+/**
+ * drm_gem_is_prime_exported_dma_buf -
+ * checks if the DMA-BUF was exported from a GEM object belonging to @dev.
+ * @dev: drm_device to check against
+ * @dma_buf: dma-buf object to import
+ *
+ * Return: true if the DMA-BUF was exported from a GEM object belonging
+ * to @dev, false otherwise.
+ */
+
+bool drm_gem_is_prime_exported_dma_buf(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ return (dma_buf->ops == &drm_gem_prime_dmabuf_ops) && (obj->dev == dev);
+}
+EXPORT_SYMBOL(drm_gem_is_prime_exported_dma_buf);
+
/**
* drm_gem_prime_import_dev - core implementation of the import callback
* @dev: drm_device to import into
@@ -939,16 +977,14 @@ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct drm_gem_object *obj;
int ret;
- if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
+ if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
obj = dma_buf->priv;
- if (obj->dev == dev) {
- /*
- * Importing dmabuf exported from our own gem increases
- * refcount on gem itself instead of f_count of dmabuf.
- */
- drm_gem_object_get(obj);
- return obj;
- }
+ drm_gem_object_get(obj);
+ return obj;
}
if (!dev->driver->gem_prime_import_sg_table)
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 79517bd4418f..ded9461df5f2 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -25,6 +25,7 @@
#include <linux/debugfs.h>
#include <linux/dynamic_debug.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c
index 6cc39e30781f..8959f7084e0b 100644
--- a/drivers/gpu/drm/drm_privacy_screen.c
+++ b/drivers/gpu/drm/drm_privacy_screen.c
@@ -7,6 +7,7 @@
*/
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/drm_self_refresh_helper.c b/drivers/gpu/drm/drm_self_refresh_helper.c
index dd33fec5aabd..c0948586b7fd 100644
--- a/drivers/gpu/drm/drm_self_refresh_helper.c
+++ b/drivers/gpu/drm/drm_self_refresh_helper.c
@@ -7,6 +7,7 @@
*/
#include <linux/average.h>
#include <linux/bitops.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 250819fbc5ce..fcbcaaa36b5f 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -3,6 +3,7 @@
* Copyright (C) 2016 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/drm_suballoc.c b/drivers/gpu/drm/drm_suballoc.c
index 38cc7a123819..879ea33dbbc4 100644
--- a/drivers/gpu/drm/drm_suballoc.c
+++ b/drivers/gpu/drm/drm_suballoc.c
@@ -42,6 +42,8 @@
#include <drm/drm_suballoc.h>
#include <drm/drm_print.h>
+
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/wait.h>
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 636cd83ca29e..e1b0fa4000cd 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -195,6 +195,7 @@
#include <linux/anon_inodes.h>
#include <linux/dma-fence-unwrap.h>
#include <linux/eventfd.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/sched/signal.h>
diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c
index 9cc71120246f..e4e1873f0e1e 100644
--- a/drivers/gpu/drm/drm_vblank_work.c
+++ b/drivers/gpu/drm/drm_vblank_work.c
@@ -2,6 +2,8 @@
#include <uapi/linux/sched/types.h>
+#include <linux/export.h>
+
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 83229a031af0..58659c16874c 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -23,6 +23,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rbtree.h>
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index d983ee85cf13..95b8a2e4bda6 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -10,6 +10,7 @@
*/
#include <linux/dma-fence.h>
+#include <linux/export.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 3c0a5c3e0e3d..76c742328edb 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -534,7 +534,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = drm_sched_job_init(&submit->sched_job,
&ctx->sched_entity[args->pipe],
- 1, submit->ctx);
+ 1, submit->ctx, file->client_id);
if (ret)
goto err_submit_put;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 71e2e6b9d713..df4232d7e135 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -35,17 +35,16 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
*sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
- struct drm_gpu_scheduler *sched = sched_job->sched;
struct etnaviv_gpu *gpu = submit->gpu;
u32 dma_addr, primid = 0;
int change;
/*
- * If the GPU managed to complete this jobs fence, the timout is
- * spurious. Bail out.
+ * If the GPU managed to complete this jobs fence, the timeout has
+ * fired before free-job worker. The timeout is spurious, so bail out.
*/
if (dma_fence_is_signaled(submit->out_fence))
- goto out_no_timeout;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
/*
* If the GPU is still making forward progress on the front-end (which
@@ -71,7 +70,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
gpu->hangcheck_dma_addr = dma_addr;
gpu->hangcheck_primid = primid;
gpu->hangcheck_fence = gpu->completed_fence;
- goto out_no_timeout;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
/* block scheduler */
@@ -87,13 +86,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
drm_sched_resubmit_jobs(&gpu->sched);
drm_sched_start(&gpu->sched, 0);
- return DRM_GPU_SCHED_STAT_NOMINAL;
-
-out_no_timeout:
- spin_lock(&sched->job_list_lock);
- list_add(&sched_job->list, &sched->pending_list);
- spin_unlock(&sched->job_list_lock);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index fc1c5608db96..ddd73e7f26a3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -56,6 +56,7 @@ static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
struct drm_framebuffer *
exynos_drm_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct exynos_drm_gem **exynos_gem,
int count)
@@ -76,7 +77,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
fb->obj[i] = &exynos_gem[i]->base;
}
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
if (ret < 0) {
@@ -94,9 +95,9 @@ err:
static struct drm_framebuffer *
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
struct drm_framebuffer *fb;
int i;
@@ -124,7 +125,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
}
- fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
+ fb = exynos_drm_framebuffer_init(dev, info, mode_cmd, exynos_gem, i);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto err;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 2f841bbdddc5..fdc6cb40cc9c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -14,6 +14,7 @@
struct drm_framebuffer *
exynos_drm_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct exynos_drm_gem **exynos_gem,
int count);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 9526a25e90ac..93de25b77e68 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -116,7 +116,10 @@ int exynos_drm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
return PTR_ERR(exynos_gem);
helper->fb =
- exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem, 1);
+ exynos_drm_framebuffer_init(dev,
+ drm_get_format_info(dev, mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd, &exynos_gem, 1);
if (IS_ERR(helper->fb)) {
DRM_DEV_ERROR(dev->dev, "failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c
index 8edefea2ef59..d32689cb0e23 100644
--- a/drivers/gpu/drm/gma500/fbdev.c
+++ b/drivers/gpu/drm/gma500/fbdev.c
@@ -203,7 +203,10 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
return PTR_ERR(backing);
obj = &backing->base;
- fb = psb_framebuffer_create(dev, &mode_cmd, obj);
+ fb = psb_framebuffer_create(dev,
+ drm_get_format_info(dev, mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd, obj);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto err_drm_gem_object_put;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1a374702b696..e69b537ded6b 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -29,24 +29,23 @@ static const struct drm_framebuffer_funcs psb_fb_funcs = {
*/
static int psb_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
- const struct drm_format_info *info;
int ret;
/*
* Reject unknown formats, YUV formats, and formats with more than
* 4 bytes per pixel.
*/
- info = drm_get_format_info(dev, mode_cmd);
- if (!info || !info->depth || info->cpp[0] > 4)
+ if (!info->depth || info->cpp[0] > 4)
return -EINVAL;
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
fb->obj[0] = obj;
ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
if (ret) {
@@ -59,6 +58,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
/**
* psb_framebuffer_create - create a framebuffer backed by gt
* @dev: our DRM device
+ * @info: pixel format information
* @mode_cmd: the description of the requested mode
* @obj: the backing object
*
@@ -68,6 +68,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
* TODO: review object references
*/
struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
@@ -78,7 +79,7 @@ struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
if (!fb)
return ERR_PTR(-ENOMEM);
- ret = psb_framebuffer_init(dev, fb, mode_cmd, obj);
+ ret = psb_framebuffer_init(dev, fb, info, mode_cmd, obj);
if (ret) {
kfree(fb);
return ERR_PTR(ret);
@@ -96,6 +97,7 @@ struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
*/
static struct drm_framebuffer *psb_user_framebuffer_create
(struct drm_device *dev, struct drm_file *filp,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *cmd)
{
struct drm_gem_object *obj;
@@ -110,7 +112,7 @@ static struct drm_framebuffer *psb_user_framebuffer_create
return ERR_PTR(-ENOENT);
/* Let the core code do all the work */
- fb = psb_framebuffer_create(dev, cmd, obj);
+ fb = psb_framebuffer_create(dev, info, cmd, obj);
if (IS_ERR(fb))
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 7f77cb2b2751..0b27112ec46f 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -594,6 +594,7 @@ extern void psb_modeset_cleanup(struct drm_device *dev);
/* framebuffer */
struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index adadd526641d..8d548d08f127 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -188,8 +188,13 @@ retry:
} else if (format->format == DRM_FORMAT_RGB332) {
drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect, fmtcnv_state);
} else if (format->format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect, fmtcnv_state,
- gud_is_big_endian());
+ if (gud_is_big_endian()) {
+ drm_fb_xrgb8888_to_rgb565be(&dst, NULL, src, fb, rect,
+ fmtcnv_state);
+ } else {
+ drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect,
+ fmtcnv_state);
+ }
} else if (format->format == DRM_FORMAT_RGB888) {
drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect, fmtcnv_state);
} else {
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm.h b/drivers/gpu/drm/hyperv/hyperv_drm.h
index d2d8582b36df..9e776112c03e 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm.h
+++ b/drivers/gpu/drm/hyperv/hyperv_drm.h
@@ -11,7 +11,9 @@
struct hyperv_drm_device {
/* drm */
struct drm_device dev;
- struct drm_simple_display_pipe pipe;
+ struct drm_plane plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
struct drm_connector connector;
/* mode */
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
index 6c6b57298797..945b9482bcb3 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -5,6 +5,8 @@
#include <linux/hyperv.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
@@ -15,7 +17,8 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_panic.h>
+#include <drm/drm_plane.h>
#include "hyperv_drm.h"
@@ -38,18 +41,6 @@ static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb,
return 0;
}
-static int hyperv_blit_to_vram_fullscreen(struct drm_framebuffer *fb,
- const struct iosys_map *map)
-{
- struct drm_rect fullscreen = {
- .x1 = 0,
- .x2 = fb->width,
- .y1 = 0,
- .y2 = fb->height,
- };
- return hyperv_blit_to_vram_rect(fb, map, &fullscreen);
-}
-
static int hyperv_connector_get_modes(struct drm_connector *connector)
{
struct hyperv_drm_device *hv = to_hv(connector->dev);
@@ -98,30 +89,66 @@ static int hyperv_check_size(struct hyperv_drm_device *hv, int w, int h,
return 0;
}
-static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+static const uint32_t hyperv_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const uint64_t hyperv_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static void hyperv_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct hyperv_drm_device *hv = to_hv(crtc->dev);
+ struct drm_plane *plane = &hv->plane;
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_crtc_state *crtc_state = crtc->state;
hyperv_hide_hw_ptr(hv->hdev);
hyperv_update_situation(hv->hdev, 1, hv->screen_depth,
crtc_state->mode.hdisplay,
crtc_state->mode.vdisplay,
plane_state->fb->pitches[0]);
- hyperv_blit_to_vram_fullscreen(plane_state->fb, &shadow_plane_state->data[0]);
}
-static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state)
+static const struct drm_crtc_helper_funcs hyperv_crtc_helper_funcs = {
+ .atomic_check = drm_crtc_helper_atomic_check,
+ .atomic_enable = hyperv_crtc_helper_atomic_enable,
+};
+
+static const struct drm_crtc_funcs hyperv_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int hyperv_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct drm_crtc_state *crtc_state = NULL;
+ int ret;
- if (fb->format->format != DRM_FORMAT_XRGB8888)
- return -EINVAL;
+ if (crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+
+ if (!plane_state->visible)
+ return 0;
if (fb->pitches[0] * fb->height > hv->fb_size) {
drm_err(&hv->dev, "fb size requested by %s for %dX%d (pitch %d) greater than %ld\n",
@@ -132,53 +159,120 @@ static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
return 0;
}
-static void hyperv_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
+static void hyperv_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
- struct drm_plane_state *state = pipe->plane.state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
- struct drm_rect rect;
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state);
+ struct drm_rect damage;
+ struct drm_rect dst_clip;
+ struct drm_atomic_helper_damage_iter iter;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_state, new_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ dst_clip = new_state->dst;
+
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+ hyperv_blit_to_vram_rect(new_state->fb, &shadow_plane_state->data[0], &damage);
+ hyperv_update_dirt(hv->hdev, &damage);
+ }
+}
- if (drm_atomic_helper_damage_merged(old_state, state, &rect)) {
- hyperv_blit_to_vram_rect(state->fb, &shadow_plane_state->data[0], &rect);
- hyperv_update_dirt(hv->hdev, &rect);
+static int hyperv_plane_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(hv->vram);
+
+ if (plane->state && plane->state->fb) {
+ sb->format = plane->state->fb->format;
+ sb->width = plane->state->fb->width;
+ sb->height = plane->state->fb->height;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ sb->map[0] = map;
+ return 0;
}
+ return -ENODEV;
}
-static const struct drm_simple_display_pipe_funcs hyperv_pipe_funcs = {
- .enable = hyperv_pipe_enable,
- .check = hyperv_pipe_check,
- .update = hyperv_pipe_update,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+static void hyperv_plane_panic_flush(struct drm_plane *plane)
+{
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
+ struct drm_rect rect;
+
+ if (!plane->state || !plane->state->fb)
+ return;
+
+ rect.x1 = 0;
+ rect.y1 = 0;
+ rect.x2 = plane->state->fb->width;
+ rect.y2 = plane->state->fb->height;
+
+ hyperv_update_dirt(hv->hdev, &rect);
+}
+
+static const struct drm_plane_helper_funcs hyperv_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = hyperv_plane_atomic_check,
+ .atomic_update = hyperv_plane_atomic_update,
+ .get_scanout_buffer = hyperv_plane_get_scanout_buffer,
+ .panic_flush = hyperv_plane_panic_flush,
};
-static const uint32_t hyperv_formats[] = {
- DRM_FORMAT_XRGB8888,
+static const struct drm_plane_funcs hyperv_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static const uint64_t hyperv_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
+static const struct drm_encoder_funcs hyperv_drm_simple_encoder_funcs_cleanup = {
+ .destroy = drm_encoder_cleanup,
};
static inline int hyperv_pipe_init(struct hyperv_drm_device *hv)
{
+ struct drm_device *dev = &hv->dev;
+ struct drm_encoder *encoder = &hv->encoder;
+ struct drm_plane *plane = &hv->plane;
+ struct drm_crtc *crtc = &hv->crtc;
+ struct drm_connector *connector = &hv->connector;
int ret;
- ret = drm_simple_display_pipe_init(&hv->dev,
- &hv->pipe,
- &hyperv_pipe_funcs,
- hyperv_formats,
- ARRAY_SIZE(hyperv_formats),
- hyperv_modifiers,
- &hv->connector);
+ ret = drm_universal_plane_init(dev, plane, 0,
+ &hyperv_plane_funcs,
+ hyperv_formats, ARRAY_SIZE(hyperv_formats),
+ hyperv_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ret;
+ drm_plane_helper_add(plane, &hyperv_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(plane);
- drm_plane_enable_fb_damage_clips(&hv->pipe.plane);
+ ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ &hyperv_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+ drm_crtc_helper_add(crtc, &hyperv_crtc_helper_funcs);
- return 0;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder,
+ &hyperv_drm_simple_encoder_funcs_cleanup,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
+
+ ret = hyperv_conn_init(hv);
+ if (ret) {
+ drm_err(dev, "Failed to initialized connector.\n");
+ return ret;
+ }
+
+ return drm_connector_attach_encoder(connector, encoder);
}
static enum drm_mode_status
@@ -221,12 +315,6 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv)
dev->mode_config.funcs = &hyperv_mode_config_funcs;
- ret = hyperv_conn_init(hv);
- if (ret) {
- drm_err(dev, "Failed to initialized connector.\n");
- return ret;
- }
-
ret = hyperv_pipe_init(hv);
if (ret) {
drm_err(dev, "Failed to initialized pipe.\n");
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e153686256c9..853543443072 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,12 +40,11 @@ i915-y += \
intel_pcode.o \
intel_region_ttm.o \
intel_runtime_pm.o \
- intel_sbi.o \
intel_step.o \
intel_uncore.o \
intel_uncore_trace.o \
intel_wakeref.o \
- vlv_sideband.o \
+ vlv_iosf_sb.o \
vlv_suspend.o
# core peripheral code
@@ -219,12 +218,11 @@ i915-$(CONFIG_HWMON) += \
# modesetting core code
i915-y += \
display/hsw_ips.o \
- display/i9xx_plane.o \
display/i9xx_display_sr.o \
+ display/i9xx_plane.o \
display/i9xx_wm.o \
display/intel_alpm.o \
display/intel_atomic.o \
- display/intel_atomic_plane.o \
display/intel_audio.o \
display/intel_bios.o \
display/intel_bo.o \
@@ -266,6 +264,7 @@ i915-y += \
display/intel_fbc.o \
display/intel_fdi.o \
display/intel_fifo_underrun.o \
+ display/intel_flipq.o \
display/intel_frontbuffer.o \
display/intel_global_state.o \
display/intel_hdcp.o \
@@ -284,10 +283,12 @@ i915-y += \
display/intel_pch.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
+ display/intel_plane.o \
display/intel_plane_initial.o \
display/intel_pmdemand.o \
display/intel_psr.o \
display/intel_quirks.o \
+ display/intel_sbi.o \
display/intel_sprite.o \
display/intel_sprite_uapi.o \
display/intel_tc.o \
@@ -296,7 +297,8 @@ i915-y += \
display/intel_wm.o \
display/skl_scaler.o \
display/skl_universal_plane.o \
- display/skl_watermark.o
+ display/skl_watermark.o \
+ display/vlv_sideband.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index e0a98e6fd6d1..87f6b9602b16 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -18,6 +18,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 1d252432d729..2610f5702fb9 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -15,6 +15,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
#include "intel_dpio_phy.h"
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 4307e2ed03d9..927fe56aec77 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -5,11 +5,13 @@
#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
#include "hsw_ips.h"
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_color_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_pcode.h"
@@ -17,8 +19,6 @@
static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 val;
if (!crtc_state->ips_enabled)
@@ -39,8 +39,8 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
if (display->platform.broadwell) {
drm_WARN_ON(display->drm,
- snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL,
- val | IPS_PCODE_CONTROL));
+ intel_pcode_write(display->drm, DISPLAY_IPS_CONTROL,
+ val | IPS_PCODE_CONTROL));
/*
* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
@@ -65,8 +65,6 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
bool need_vblank_wait = false;
if (!crtc_state->ips_enabled)
@@ -74,7 +72,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
if (display->platform.broadwell) {
drm_WARN_ON(display->drm,
- snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0));
+ intel_pcode_write(display->drm, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
@@ -267,7 +265,7 @@ int hsw_ips_compute_config(struct intel_atomic_state *state,
return PTR_ERR(cdclk_state);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
+ if (crtc_state->pixel_rate > intel_cdclk_logical(cdclk_state) * 95 / 100)
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_display_sr.c b/drivers/gpu/drm/i915/display/i9xx_display_sr.c
index 32abe9743014..935419441709 100644
--- a/drivers/gpu/drm/i915/display/i9xx_display_sr.c
+++ b/drivers/gpu/drm/i915/display/i9xx_display_sr.c
@@ -5,10 +5,10 @@
#include <drm/drm_device.h>
-#include "i915_reg.h"
#include "i9xx_display_sr.h"
#include "i9xx_wm_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_gmbus.h"
#include "intel_pci_config.h"
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index a2a6d52be0a5..f291ced989dc 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -2,6 +2,7 @@
/*
* Copyright © 2020 Intel Corporation
*/
+
#include <linux/kernel.h>
#include <drm/drm_atomic_helper.h>
@@ -14,13 +15,15 @@
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
+#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_sprite.h"
/* Primary plane formats for gen <= 3 */
@@ -334,10 +337,10 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- i9xx_plane_has_windowing(plane));
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ i9xx_plane_has_windowing(plane));
if (ret)
return ret;
@@ -903,6 +906,27 @@ static const struct drm_plane_funcs i8xx_plane_funcs = {
.format_mod_supported_async = intel_plane_format_mod_supported_async,
};
+static void i9xx_disable_tiling(struct intel_plane *plane)
+{
+ struct intel_display *display = to_intel_display(plane);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 dspcntr;
+ u32 reg;
+
+ dspcntr = intel_de_read_fw(display, DSPCNTR(display, i9xx_plane));
+ dspcntr &= ~DISP_TILED;
+ intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
+
+ if (DISPLAY_VER(display) >= 4) {
+ reg = intel_de_read_fw(display, DSPSURF(display, i9xx_plane));
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane), reg);
+
+ } else {
+ reg = intel_de_read_fw(display, DSPADDR(display, i9xx_plane));
+ intel_de_write_fw(display, DSPADDR(display, i9xx_plane), reg);
+ }
+}
+
struct intel_plane *
intel_primary_plane_create(struct intel_display *display, enum pipe pipe)
{
@@ -1045,6 +1069,8 @@ intel_primary_plane_create(struct intel_display *display, enum pipe pipe)
}
}
+ plane->disable_tiling = i9xx_disable_tiling;
+
modifiers = intel_fb_plane_get_modifiers(display, INTEL_PLANE_CAP_TILING_X);
if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
@@ -1149,7 +1175,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
drm_WARN_ON(display->drm, pipe != crtc->pipe);
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ intel_fb = intel_bo_alloc_framebuffer();
if (!intel_fb) {
drm_dbg_kms(display->drm, "failed to alloc fb\n");
return;
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 77876ef735b7..1f9db5118777 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -11,6 +11,7 @@
#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display.h"
+#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_fb.h"
#include "intel_mchbar_regs.h"
@@ -107,43 +108,41 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *dis
static void chv_set_memory_dvfs(struct intel_display *display, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2);
if (enable)
val &= ~FORCE_DDR_HIGH_FREQ;
else
val |= FORCE_DDR_HIGH_FREQ;
val &= ~FORCE_DDR_LOW_FREQ;
val |= FORCE_DDR_FREQ_REQ_ACK;
- vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+ vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
drm_err(display->drm,
"timed out waiting for Punit DDR DVFS request\n");
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
static void chv_set_memory_pm5(struct intel_display *display, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
if (enable)
val |= DSP_MAXFIFO_PM5_ENABLE;
else
val &= ~DSP_MAXFIFO_PM5_ENABLE;
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
#define FW_WM(value, plane) \
@@ -3900,7 +3899,6 @@ static void g4x_wm_sanitize(struct intel_display *display)
static void vlv_wm_get_hw_state(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct vlv_wm_values *wm = &display->wm.vlv;
struct intel_crtc *crtc;
u32 val;
@@ -3911,9 +3909,9 @@ static void vlv_wm_get_hw_state(struct intel_display *display)
wm->level = VLV_WM_LEVEL_PM2;
if (display->platform.cherryview) {
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
if (val & DSP_MAXFIFO_PM5_ENABLE)
wm->level = VLV_WM_LEVEL_PM5;
@@ -3926,23 +3924,23 @@ static void vlv_wm_get_hw_state(struct intel_display *display)
* HIGH/LOW bits so that we don't actually change
* the current state.
*/
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2);
val |= FORCE_DDR_FREQ_REQ_ACK;
- vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+ vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
drm_dbg_kms(display->drm,
"Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
display->wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
} else {
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2);
if ((val & FORCE_DDR_HIGH_FREQ) == 0)
wm->level = VLV_WM_LEVEL_DDR_DVFS;
}
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
for_each_intel_crtc(display->drm, crtc) {
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index ca7033251e91..8d9cb73a93a7 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -45,6 +45,7 @@
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_panel.h"
@@ -192,12 +193,12 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
else
tmp &= ~PAYLOAD_PRESENT;
- tmp &= ~VBLANK_FENCE;
+ tmp &= ~(VBLANK_FENCE | LP_DATA_TRANSFER | PIPELINE_FLUSH);
if (enable_lpdt)
tmp |= LP_DATA_TRANSFER;
else
- tmp &= ~LP_DATA_TRANSFER;
+ tmp |= PIPELINE_FLUSH;
tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
tmp |= ((packet->header[0] & VC_MASK) << VC_SHIFT);
@@ -658,7 +659,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy;
u32 val;
@@ -1276,6 +1277,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+ intel_panel_prepare(crtc_state, conn_state);
+
intel_crtc_vblank_on(crtc_state);
}
@@ -1409,6 +1412,8 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ intel_panel_unprepare(old_conn_state);
+
/* step1: turn off backlight */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_backlight_disable(old_conn_state);
diff --git a/drivers/gpu/drm/i915/display/icl_dsi_regs.h b/drivers/gpu/drm/i915/display/icl_dsi_regs.h
index d4845ac65acc..b601b7632339 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi_regs.h
+++ b/drivers/gpu/drm/i915/display/icl_dsi_regs.h
@@ -272,6 +272,7 @@
#define PAYLOAD_PRESENT (1 << 31)
#define LP_DATA_TRANSFER (1 << 30)
#define VBLANK_FENCE (1 << 29)
+#define PIPELINE_FLUSH (1 << 28)
#define PARAM_WC_MASK (0xffff << 8)
#define PARAM_WC_LOWER_SHIFT 8
#define PARAM_WC_UPPER_SHIFT 16
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index c176bdbc19a3..dfdde8e4eabe 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -26,6 +26,13 @@ bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp)
return intel_dp->alpm_dpcd & DP_ALPM_AUX_LESS_CAP;
}
+bool intel_alpm_is_alpm_aux_less(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ return intel_psr_needs_alpm_aux_less(intel_dp, crtc_state) ||
+ (crtc_state->has_lobf && intel_alpm_aux_less_wake_supported(intel_dp));
+}
+
void intel_alpm_init(struct intel_dp *intel_dp)
{
u8 dpcd;
@@ -329,7 +336,6 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
{
struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- enum port port = dp_to_dig_port(intel_dp)->base.port;
u32 alpm_ctl;
if (DISPLAY_VER(display) < 20 || (!intel_psr_needs_alpm(intel_dp, crtc_state) &&
@@ -341,30 +347,26 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
* Panel Replay on eDP is always using ALPM aux less. I.e. no need to
* check panel support at this point.
*/
- if ((crtc_state->has_panel_replay && intel_dp_is_edp(intel_dp)) ||
- (crtc_state->has_lobf && intel_alpm_aux_less_wake_supported(intel_dp))) {
+ if (intel_alpm_is_alpm_aux_less(intel_dp, crtc_state)) {
alpm_ctl = ALPM_CTL_ALPM_ENABLE |
ALPM_CTL_ALPM_AUX_LESS_ENABLE |
ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS |
ALPM_CTL_AUX_LESS_WAKE_TIME(intel_dp->alpm_parameters.aux_less_wake_lines);
- intel_de_write(display,
- PORT_ALPM_CTL(port),
- PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
- PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
- PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
- PORT_ALPM_CTL_SILENCE_PERIOD(
- intel_dp->alpm_parameters.silence_period_sym_clocks));
-
- intel_de_write(display,
- PORT_ALPM_LFPS_CTL(port),
- PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
- PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
- PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
- PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms));
+ if (intel_dp->as_sdp_supported) {
+ u32 pr_alpm_ctl = PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T1;
+
+ if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
+ DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP)
+ pr_alpm_ctl |= PR_ALPM_CTL_ALLOW_LINK_OFF_BETWEEN_AS_SDP_AND_SU;
+ if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
+ DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR))
+ pr_alpm_ctl |= PR_ALPM_CTL_AS_SDP_TRANSMISSION_IN_ACTIVE_DISABLE;
+
+ intel_de_write(display, PR_ALPM_CTL(display, cpu_transcoder),
+ pr_alpm_ctl);
+ }
+
} else {
alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
ALPM_CTL_EXTENDED_FAST_WAKE_TIME(intel_dp->alpm_parameters.fast_wake_lines);
@@ -388,6 +390,36 @@ void intel_alpm_configure(struct intel_dp *intel_dp,
intel_dp->alpm_parameters.transcoder = crtc_state->cpu_transcoder;
}
+void intel_alpm_port_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
+ u32 alpm_ctl_val = 0, lfps_ctl_val = 0;
+
+ if (DISPLAY_VER(display) < 20)
+ return;
+
+ if (intel_alpm_is_alpm_aux_less(intel_dp, crtc_state)) {
+ alpm_ctl_val = PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
+ PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
+ PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
+ PORT_ALPM_CTL_SILENCE_PERIOD(
+ intel_dp->alpm_parameters.silence_period_sym_clocks);
+ lfps_ctl_val = PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
+ PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
+ intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
+ intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
+ intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms);
+ }
+
+ intel_de_write(display, PORT_ALPM_CTL(port), alpm_ctl_val);
+
+ intel_de_write(display, PORT_ALPM_LFPS_CTL(port), lfps_ctl_val);
+}
+
void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.h b/drivers/gpu/drm/i915/display/intel_alpm.h
index c9fe21e3e72c..a861c20b5d79 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.h
+++ b/drivers/gpu/drm/i915/display/intel_alpm.h
@@ -27,11 +27,15 @@ void intel_alpm_enable_sink(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+void intel_alpm_port_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
void intel_alpm_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector);
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp);
bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp);
+bool intel_alpm_is_alpm_aux_less(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
void intel_alpm_disable(struct intel_dp *intel_dp);
bool intel_alpm_get_error(struct intel_dp *intel_dp);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index e83feca5c9c9..348b1655435e 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -26,7 +26,7 @@
*
* The functions here implement the state management and hardware programming
* dispatch required by the atomic modeset infrastructure.
- * See intel_atomic_plane.c for the plane-specific atomic functionality.
+ * See intel_plane.c for the plane-specific atomic functionality.
*/
#include <drm/display/drm_dp_tunnel.h>
@@ -274,7 +274,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->do_async_flip = false;
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
- crtc_state->dsb_color_vblank = NULL;
+ crtc_state->dsb_color = NULL;
crtc_state->dsb_commit = NULL;
crtc_state->use_dsb = false;
@@ -310,7 +310,7 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
{
struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
- drm_WARN_ON(crtc->dev, crtc_state->dsb_color_vblank);
+ drm_WARN_ON(crtc->dev, crtc_state->dsb_color);
drm_WARN_ON(crtc->dev, crtc_state->dsb_commit);
__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 55af3a553c58..5bdaef38f13d 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -951,7 +951,7 @@ static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state,
if (IS_ERR(cdclk_state))
return PTR_ERR(cdclk_state);
- cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0;
+ intel_cdclk_force_min_cdclk(cdclk_state, enable ? 2 * 96000 : 0);
return drm_atomic_commit(&state->base);
}
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 5827da586003..e007380e9a63 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <linux/pwm.h>
#include <linux/string_helpers.h>
-
#include <acpi/video.h>
#include <drm/drm_file.h>
@@ -19,6 +18,7 @@
#include "intel_backlight_regs.h"
#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 166ee11831ab..9c268bed091d 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -37,6 +37,7 @@
#include "i915_drv.h"
#include "intel_display.h"
+#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c
index fbd16d7b58d9..65d64f79a4bd 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_bo.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
+#include <drm/drm_panic.h>
+#include "display/intel_display_types.h"
#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_object_frontbuffer.h"
@@ -57,3 +59,18 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
i915_debugfs_describe_obj(m, to_intel_bo(obj));
}
+
+struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
+{
+ return i915_gem_object_alloc_framebuffer();
+}
+
+int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
+{
+ return i915_gem_object_panic_setup(sb);
+}
+
+void intel_bo_panic_finish(struct intel_framebuffer *fb)
+{
+ return i915_gem_object_panic_finish(fb);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h
index ea7a2253aaa5..97087a64d23b 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_bo.h
@@ -7,6 +7,8 @@
#include <linux/types.h>
struct drm_gem_object;
+struct drm_scanout_buffer;
+struct intel_framebuffer;
struct seq_file;
struct vm_area_struct;
@@ -23,5 +25,8 @@ struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
struct intel_frontbuffer *front);
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj);
+struct intel_framebuffer *intel_bo_alloc_framebuffer(void);
+int intel_bo_panic_setup(struct drm_scanout_buffer *sb);
+void intel_bo_panic_finish(struct intel_framebuffer *fb);
#endif /* __INTEL_BO__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index a5dd2932b852..d29a755612de 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -5,6 +5,8 @@
#include <drm/drm_atomic_state_helper.h>
+#include "soc/intel_dram.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
@@ -12,10 +14,47 @@
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_display_core.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
-#include "skl_watermark.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
+#include "intel_uncore.h"
+#include "skl_watermark.h"
+
+struct intel_dbuf_bw {
+ unsigned int max_bw[I915_MAX_DBUF_SLICES];
+ u8 active_planes[I915_MAX_DBUF_SLICES];
+};
+
+struct intel_bw_state {
+ struct intel_global_state base;
+ struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
+
+ /*
+ * Contains a bit mask, used to determine, whether correspondent
+ * pipe allows SAGV or not.
+ */
+ u8 pipe_sagv_reject;
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
+
+ /*
+ * From MTL onwards, to lock a QGV point, punit expects the peak BW of
+ * the selected QGV point as the parameter in multiples of 100MB/s
+ */
+ u16 qgv_point_peakbw;
+
+ /*
+ * Current QGV points mask, which restricts
+ * some particular SAGV states, not to confuse
+ * with pipe_sagv_mask.
+ */
+ u16 qgv_points_mask;
+
+ unsigned int data_rate[I915_MAX_PIPES];
+ u8 num_active_planes[I915_MAX_PIPES];
+};
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
@@ -79,14 +118,13 @@ static int icl_pcode_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
u32 val = 0, val2 = 0;
u16 dclk;
int ret;
- ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
- &val, &val2);
+ ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
+ &val, &val2);
if (ret)
return ret;
@@ -107,13 +145,12 @@ static int icl_pcode_read_qgv_point_info(struct intel_display *display,
static int adls_pcode_read_psf_gv_point_info(struct intel_display *display,
struct intel_psf_gv_point *points)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
u32 val = 0;
int ret;
int i;
- ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
+ ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
if (ret)
return ret;
@@ -151,21 +188,20 @@ static bool is_sagv_enabled(struct intel_display *display, u16 points_mask)
ICL_PCODE_REQ_QGV_PT_MASK);
}
-int icl_pcode_restrict_qgv_points(struct intel_display *display,
- u32 points_mask)
+static int icl_pcode_restrict_qgv_points(struct intel_display *display,
+ u32 points_mask)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (DISPLAY_VER(display) >= 14)
return 0;
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(&i915->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
- points_mask,
- ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
- ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
- 1);
+ ret = intel_pcode_request(display->drm, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ points_mask,
+ ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
+ ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
+ 1);
if (ret < 0) {
drm_err(display->drm,
@@ -218,11 +254,10 @@ intel_read_qgv_point_info(struct intel_display *display,
}
static int icl_get_qgv_points(struct intel_display *display,
+ const struct dram_info *dram_info,
struct intel_qgv_info *qi,
bool is_y_tile)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- const struct dram_info *dram_info = &i915->dram_info;
int i, ret;
qi->num_points = dram_info->num_qgv_points;
@@ -418,19 +453,27 @@ static const struct intel_sa_info xe3lpd_sa_info = {
.derating = 10,
};
-static int icl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa)
+static const struct intel_sa_info xe3lpd_3002_sa_info = {
+ .deburst = 32,
+ .deprogbwlimit = 22, /* GB/s */
+ .displayrtids = 256,
+ .derating = 10,
+};
+
+static int icl_get_bw_info(struct intel_display *display,
+ const struct dram_info *dram_info,
+ const struct intel_sa_info *sa)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = max_t(u8, 1, i915->dram_info.num_channels);
+ int num_channels = max_t(u8, 1, dram_info->num_channels);
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw;
int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(display, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
if (ret) {
drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
@@ -488,11 +531,11 @@ static int icl_get_bw_info(struct intel_display *display, const struct intel_sa_
return 0;
}
-static int tgl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa)
+static int tgl_get_bw_info(struct intel_display *display,
+ const struct dram_info *dram_info,
+ const struct intel_sa_info *sa)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
- const struct dram_info *dram_info = &i915->dram_info;
bool is_y_tile = true; /* assume y tile may be used */
int num_channels = max_t(u8, 1, dram_info->num_channels);
int ipqdepth, ipqdepthpch = 16;
@@ -502,7 +545,7 @@ static int tgl_get_bw_info(struct intel_display *display, const struct intel_sa_
int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(display, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
if (ret) {
drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
@@ -632,15 +675,15 @@ static void dg2_get_bw_info(struct intel_display *display)
}
static int xe2_hpd_get_bw_info(struct intel_display *display,
+ const struct dram_info *dram_info,
const struct intel_sa_info *sa)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
- int num_channels = i915->dram_info.num_channels;
+ int num_channels = dram_info->num_channels;
int peakbw, maxdebw;
int ret, i;
- ret = icl_get_qgv_points(display, &qi, true);
+ ret = icl_get_qgv_points(display, dram_info, &qi, true);
if (ret) {
drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
@@ -763,32 +806,34 @@ static unsigned int icl_qgv_bw(struct intel_display *display,
void intel_bw_init_hw(struct intel_display *display)
{
- const struct dram_info *dram_info = &to_i915(display->drm)->dram_info;
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
if (!HAS_DISPLAY(display))
return;
- if (DISPLAY_VER(display) >= 30)
- tgl_get_bw_info(display, &xe3lpd_sa_info);
+ if (DISPLAY_VERx100(display) >= 3002)
+ tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info);
+ else if (DISPLAY_VER(display) >= 30)
+ tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info);
else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx &&
dram_info->type == INTEL_DRAM_GDDR_ECC)
- xe2_hpd_get_bw_info(display, &xe2_hpd_ecc_sa_info);
+ xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info);
else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx)
- xe2_hpd_get_bw_info(display, &xe2_hpd_sa_info);
+ xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info);
else if (DISPLAY_VER(display) >= 14)
- tgl_get_bw_info(display, &mtl_sa_info);
+ tgl_get_bw_info(display, dram_info, &mtl_sa_info);
else if (display->platform.dg2)
dg2_get_bw_info(display);
else if (display->platform.alderlake_p)
- tgl_get_bw_info(display, &adlp_sa_info);
+ tgl_get_bw_info(display, dram_info, &adlp_sa_info);
else if (display->platform.alderlake_s)
- tgl_get_bw_info(display, &adls_sa_info);
+ tgl_get_bw_info(display, dram_info, &adls_sa_info);
else if (display->platform.rocketlake)
- tgl_get_bw_info(display, &rkl_sa_info);
+ tgl_get_bw_info(display, dram_info, &rkl_sa_info);
else if (DISPLAY_VER(display) == 12)
- tgl_get_bw_info(display, &tgl_sa_info);
+ tgl_get_bw_info(display, dram_info, &tgl_sa_info);
else if (DISPLAY_VER(display) == 11)
- icl_get_bw_info(display, &icl_sa_info);
+ icl_get_bw_info(display, dram_info, &icl_sa_info);
}
static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
@@ -862,6 +907,11 @@ static unsigned int intel_bw_data_rate(struct intel_display *display,
return data_rate;
}
+struct intel_bw_state *to_intel_bw_state(struct intel_global_state *obj_state)
+{
+ return container_of(obj_state, struct intel_bw_state, base);
+}
+
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
{
@@ -971,6 +1021,70 @@ static void icl_force_disable_sagv(struct intel_display *display,
icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask);
}
+void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask;
+ new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Restrict required qgv points before updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(display, new_mask);
+}
+
+void icl_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+ new_mask = new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Allow required qgv points after updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(display, new_mask);
+}
+
static int mtl_find_qgv_points(struct intel_display *display,
unsigned int data_rate,
unsigned int num_active_planes,
@@ -991,7 +1105,7 @@ static int mtl_find_qgv_points(struct intel_display *display,
* for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is
* not enabled. PM Demand code will clamp the value for the register
*/
- if (!intel_can_enable_sagv(display, new_bw_state)) {
+ if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
new_bw_state->qgv_point_peakbw = U16_MAX;
drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw.");
return 0;
@@ -1104,7 +1218,7 @@ static int icl_find_qgv_points(struct intel_display *display,
* we can't enable SAGV due to the increased memory latency it may
* cause.
*/
- if (!intel_can_enable_sagv(display, new_bw_state)) {
+ if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes);
drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n",
qgv_points);
@@ -1354,12 +1468,12 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
* requirements. This can reduce back and forth
* display blinking due to constant cdclk changes.
*/
- if (new_min_cdclk <= cdclk_state->bw_min_cdclk)
+ if (new_min_cdclk <= intel_cdclk_bw_min_cdclk(cdclk_state))
return 0;
drm_dbg_kms(display->drm,
"new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
- new_min_cdclk, cdclk_state->bw_min_cdclk);
+ new_min_cdclk, intel_cdclk_bw_min_cdclk(cdclk_state));
*need_cdclk_calc = true;
return 0;
@@ -1471,8 +1585,8 @@ static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
if (!new_bw_state)
return 0;
- if (intel_can_enable_sagv(display, new_bw_state) !=
- intel_can_enable_sagv(display, old_bw_state)) {
+ if (intel_bw_can_enable_sagv(display, new_bw_state) !=
+ intel_bw_can_enable_sagv(display, old_bw_state)) {
ret = intel_atomic_serialize_global_state(&new_bw_state->base);
if (ret)
return ret;
@@ -1518,8 +1632,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms)
new_bw_state = intel_atomic_get_new_bw_state(state);
if (new_bw_state &&
- intel_can_enable_sagv(display, old_bw_state) !=
- intel_can_enable_sagv(display, new_bw_state))
+ intel_bw_can_enable_sagv(display, old_bw_state) !=
+ intel_bw_can_enable_sagv(display, new_bw_state))
changed = true;
/*
@@ -1641,3 +1755,32 @@ int intel_bw_init(struct intel_display *display)
return 0;
}
+
+bool intel_bw_pmdemand_needs_update(struct intel_atomic_state *state)
+{
+ const struct intel_bw_state *new_bw_state, *old_bw_state;
+
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ if (new_bw_state &&
+ new_bw_state->qgv_point_peakbw != old_bw_state->qgv_point_peakbw)
+ return true;
+
+ return false;
+}
+
+bool intel_bw_can_enable_sagv(struct intel_display *display,
+ const struct intel_bw_state *bw_state)
+{
+ if (DISPLAY_VER(display) < 11 &&
+ bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
+ return false;
+
+ return bw_state->pipe_sagv_reject == 0;
+}
+
+int intel_bw_qgv_point_peakbw(const struct intel_bw_state *bw_state)
+{
+ return bw_state->qgv_point_peakbw;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index eb2cc883e9c1..d51f50c9d302 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -8,52 +8,14 @@
#include <drm/drm_atomic.h>
-#include "intel_display_limits.h"
-#include "intel_display_power.h"
-#include "intel_global_state.h"
-
struct intel_atomic_state;
+struct intel_bw_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
+struct intel_global_state;
-struct intel_dbuf_bw {
- unsigned int max_bw[I915_MAX_DBUF_SLICES];
- u8 active_planes[I915_MAX_DBUF_SLICES];
-};
-
-struct intel_bw_state {
- struct intel_global_state base;
- struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
-
- /*
- * Contains a bit mask, used to determine, whether correspondent
- * pipe allows SAGV or not.
- */
- u8 pipe_sagv_reject;
-
- /* bitmask of active pipes */
- u8 active_pipes;
-
- /*
- * From MTL onwards, to lock a QGV point, punit expects the peak BW of
- * the selected QGV point as the parameter in multiples of 100MB/s
- */
- u16 qgv_point_peakbw;
-
- /*
- * Current QGV points mask, which restricts
- * some particular SAGV states, not to confuse
- * with pipe_sagv_mask.
- */
- u16 qgv_points_mask;
-
- unsigned int data_rate[I915_MAX_PIPES];
- u8 num_active_planes[I915_MAX_PIPES];
-};
-
-#define to_intel_bw_state(global_state) \
- container_of_const((global_state), struct intel_bw_state, base)
+struct intel_bw_state *to_intel_bw_state(struct intel_global_state *obj_state);
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state);
@@ -67,8 +29,6 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state);
void intel_bw_init_hw(struct intel_display *display);
int intel_bw_init(struct intel_display *display);
int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms);
-int icl_pcode_restrict_qgv_points(struct intel_display *display,
- u32 points_mask);
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc);
int intel_bw_min_cdclk(struct intel_display *display,
@@ -76,4 +36,11 @@ int intel_bw_min_cdclk(struct intel_display *display,
void intel_bw_update_hw_state(struct intel_display *display);
void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
+bool intel_bw_pmdemand_needs_update(struct intel_atomic_state *state);
+bool intel_bw_can_enable_sagv(struct intel_display *display,
+ const struct intel_bw_state *bw_state);
+void icl_sagv_pre_plane_update(struct intel_atomic_state *state);
+void icl_sagv_post_plane_update(struct intel_atomic_state *state);
+int intel_bw_qgv_point_peakbw(const struct intel_bw_state *bw_state);
+
#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index b1718b491ffd..228aa64c1349 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -32,16 +32,17 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_audio.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
#include "intel_pcode.h"
+#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_vdsc.h"
#include "skl_watermark.h"
@@ -113,6 +114,42 @@
* dividers can be programmed correctly.
*/
+struct intel_cdclk_state {
+ struct intel_global_state base;
+
+ /*
+ * Logical configuration of cdclk (used for all scaling,
+ * watermark, etc. calculations and checks). This is
+ * computed as if all enabled crtcs were active.
+ */
+ struct intel_cdclk_config logical;
+
+ /*
+ * Actual configuration of cdclk, can be different from the
+ * logical configuration only when all crtc's are DPMS off.
+ */
+ struct intel_cdclk_config actual;
+
+ /* minimum acceptable cdclk to satisfy bandwidth requirements */
+ int bw_min_cdclk;
+ /* minimum acceptable cdclk for each pipe */
+ int min_cdclk[I915_MAX_PIPES];
+ /* minimum acceptable voltage level for each pipe */
+ u8 min_voltage_level[I915_MAX_PIPES];
+
+ /* pipe to which cd2x update is synchronized */
+ enum pipe pipe;
+
+ /* forced minimum cdclk for glk+ audio w/a */
+ int force_min_cdclk;
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
+
+ /* update cdclk with pipes disabled */
+ bool disable_pipes;
+};
+
struct intel_cdclk_funcs {
void (*get_cdclk)(struct intel_display *display,
struct intel_cdclk_config *cdclk_config);
@@ -567,20 +604,18 @@ static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk)
static void vlv_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
- vlv_iosf_sb_get(dev_priv,
- BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
+ vlv_iosf_sb_get(display->drm, BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
- cdclk_config->vco = vlv_get_hpll_vco(dev_priv);
- cdclk_config->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
+ cdclk_config->vco = vlv_get_hpll_vco(display->drm);
+ cdclk_config->cdclk = vlv_get_cck_clock(display->drm, "cdclk",
CCK_DISPLAY_CLOCK_CONTROL,
cdclk_config->vco);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
- vlv_iosf_sb_put(dev_priv,
+ vlv_iosf_sb_put(display->drm,
BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
if (display->platform.valleyview)
@@ -658,16 +693,16 @@ static void vlv_set_cdclk(struct intel_display *display,
*/
wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE);
- vlv_iosf_sb_get(dev_priv,
+ vlv_iosf_sb_get(display->drm,
BIT(VLV_IOSF_SB_CCK) |
BIT(VLV_IOSF_SB_BUNIT) |
BIT(VLV_IOSF_SB_PUNIT));
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
+ if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
50)) {
drm_err(display->drm,
@@ -681,12 +716,12 @@ static void vlv_set_cdclk(struct intel_display *display,
cdclk) - 1;
/* adjust cdclk divider */
- val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ val = vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL);
val &= ~CCK_FREQUENCY_VALUES;
val |= divider;
- vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+ vlv_cck_write(display->drm, CCK_DISPLAY_CLOCK_CONTROL, val);
- if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
+ if (wait_for((vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL) &
CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
50))
drm_err(display->drm,
@@ -694,7 +729,7 @@ static void vlv_set_cdclk(struct intel_display *display,
}
/* adjust self-refresh exit latency value */
- val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+ val = vlv_bunit_read(display->drm, BUNIT_REG_BISOC);
val &= ~0x7f;
/*
@@ -705,9 +740,9 @@ static void vlv_set_cdclk(struct intel_display *display,
val |= 4500 / 250; /* 4.5 usec */
else
val |= 3000 / 250; /* 3.0 usec */
- vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+ vlv_bunit_write(display->drm, BUNIT_REG_BISOC, val);
- vlv_iosf_sb_put(dev_priv,
+ vlv_iosf_sb_put(display->drm,
BIT(VLV_IOSF_SB_CCK) |
BIT(VLV_IOSF_SB_BUNIT) |
BIT(VLV_IOSF_SB_PUNIT));
@@ -723,7 +758,6 @@ static void chv_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
@@ -747,19 +781,19 @@ static void chv_set_cdclk(struct intel_display *display,
*/
wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE);
- vlv_punit_get(dev_priv);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ vlv_punit_get(display->drm);
+ val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
+ if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
50)) {
drm_err(display->drm,
"timed out waiting for CDclk change\n");
}
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
intel_update_cdclk(display);
@@ -843,7 +877,6 @@ static void bdw_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
int ret;
@@ -856,7 +889,7 @@ static void bdw_set_cdclk(struct intel_display *display,
"trying to change cdclk frequency with cdclk not enabled\n"))
return;
- ret = snb_pcode_write(&dev_priv->uncore, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+ ret = intel_pcode_write(display->drm, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
if (ret) {
drm_err(display->drm,
"failed to inform pcode about cdclk change\n");
@@ -884,8 +917,8 @@ static void bdw_set_cdclk(struct intel_display *display,
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
drm_err(display->drm, "Switching back to LCPLL failed\n");
- snb_pcode_write(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_config->voltage_level);
+ intel_pcode_write(display->drm, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_config->voltage_level);
intel_de_write(display, CDCLK_FREQ,
DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
@@ -1125,7 +1158,6 @@ static void skl_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
u32 freq_select, cdclk_ctl;
@@ -1142,10 +1174,10 @@ static void skl_set_cdclk(struct intel_display *display,
drm_WARN_ON_ONCE(display->drm,
display->platform.skylake && vco == 8640000);
- ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret) {
drm_err(display->drm,
"Failed to inform PCU about cdclk change (%d)\n", ret);
@@ -1188,8 +1220,8 @@ static void skl_set_cdclk(struct intel_display *display,
intel_de_posting_read(display, CDCLK_CTL);
/* inform PCU of the change */
- snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- cdclk_config->voltage_level);
+ intel_pcode_write(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_config->voltage_level);
intel_update_cdclk(display);
}
@@ -2125,7 +2157,6 @@ static void bxt_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_cdclk_config mid_cdclk_config;
int cdclk = cdclk_config->cdclk;
int ret = 0;
@@ -2139,18 +2170,18 @@ static void bxt_set_cdclk(struct intel_display *display,
if (DISPLAY_VER(display) >= 14 || display->platform.dg2)
; /* NOOP */
else if (DISPLAY_VER(display) >= 11)
- ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
else
/*
* BSpec requires us to wait up to 150usec, but that leads to
* timeouts; the 2ms used here is based on experiment.
*/
- ret = snb_pcode_write_timeout(&dev_priv->uncore,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000, 150, 2);
+ ret = intel_pcode_write_timeout(display->drm,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 2);
if (ret) {
drm_err(display->drm,
@@ -2179,8 +2210,8 @@ static void bxt_set_cdclk(struct intel_display *display,
* Display versions 14 and beyond
*/;
else if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
- ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- cdclk_config->voltage_level);
+ ret = intel_pcode_write(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_config->voltage_level);
if (DISPLAY_VER(display) < 11) {
/*
* The timeout isn't specified, the 2ms used here is based on
@@ -2188,10 +2219,9 @@ static void bxt_set_cdclk(struct intel_display *display,
* FIXME: Waiting for the request completion could be delayed
* until the next PCODE request based on BSpec.
*/
- ret = snb_pcode_write_timeout(&dev_priv->uncore,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_config->voltage_level,
- 150, 2);
+ ret = intel_pcode_write_timeout(display->drm,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_config->voltage_level, 2);
}
if (ret) {
drm_err(display->drm,
@@ -2477,7 +2507,6 @@ static void intel_pcode_notify(struct intel_display *display,
bool cdclk_update_valid,
bool pipe_count_update_valid)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
u32 update_mask = 0;
@@ -2492,11 +2521,11 @@ static void intel_pcode_notify(struct intel_display *display,
if (pipe_count_update_valid)
update_mask |= DISPLAY_TO_PCODE_PIPE_COUNT_VALID;
- ret = skl_pcode_request(&i915->uncore, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE |
- update_mask,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE |
+ update_mask,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret)
drm_err(display->drm,
"Failed to inform PCU about display config (err %d)\n",
@@ -3388,7 +3417,9 @@ static int intel_compute_max_dotclk(struct intel_display *display)
*/
void intel_update_max_cdclk(struct intel_display *display)
{
- if (DISPLAY_VER(display) >= 30) {
+ if (DISPLAY_VERx100(display) >= 3002) {
+ display->cdclk.max_cdclk_freq = 480000;
+ } else if (DISPLAY_VER(display) >= 30) {
display->cdclk.max_cdclk_freq = 691200;
} else if (display->platform.jasperlake || display->platform.elkhartlake) {
if (display->cdclk.hw.ref == 24000)
@@ -3528,10 +3559,8 @@ static int pch_rawclk(struct intel_display *display)
static int vlv_hrawclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
/* RAWCLK_FREQ_VLV register updated from power well code */
- return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
+ return vlv_get_cck_clock_hpll(display->drm, "hrawclk",
CCK_DISPLAY_REF_CLOCK_CONTROL);
}
@@ -3841,3 +3870,60 @@ void intel_init_cdclk_hooks(struct intel_display *display)
"Unknown platform. Assuming i830\n"))
display->funcs.cdclk = &i830_cdclk_funcs;
}
+
+int intel_cdclk_logical(const struct intel_cdclk_state *cdclk_state)
+{
+ return cdclk_state->logical.cdclk;
+}
+
+int intel_cdclk_actual(const struct intel_cdclk_state *cdclk_state)
+{
+ return cdclk_state->actual.cdclk;
+}
+
+int intel_cdclk_actual_voltage_level(const struct intel_cdclk_state *cdclk_state)
+{
+ return cdclk_state->actual.voltage_level;
+}
+
+int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe pipe)
+{
+ return cdclk_state->min_cdclk[pipe];
+}
+
+int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state)
+{
+ return cdclk_state->bw_min_cdclk;
+}
+
+bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state)
+{
+ const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
+
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+
+ if (new_cdclk_state &&
+ (new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk ||
+ new_cdclk_state->actual.voltage_level != old_cdclk_state->actual.voltage_level))
+ return true;
+
+ return false;
+}
+
+void intel_cdclk_force_min_cdclk(struct intel_cdclk_state *cdclk_state, int force_min_cdclk)
+{
+ cdclk_state->force_min_cdclk = force_min_cdclk;
+}
+
+void intel_cdclk_read_hw(struct intel_display *display)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state);
+
+ intel_update_cdclk(display);
+ intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
+ cdclk_state->actual = display->cdclk.hw;
+ cdclk_state->logical = display->cdclk.hw;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index a1cefd455d92..cacee598af0e 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -8,10 +8,9 @@
#include <linux/types.h>
-#include "intel_display_limits.h"
-#include "intel_global_state.h"
-
+enum pipe;
struct intel_atomic_state;
+struct intel_cdclk_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
@@ -23,42 +22,6 @@ struct intel_cdclk_config {
bool joined_mbus;
};
-struct intel_cdclk_state {
- struct intel_global_state base;
-
- /*
- * Logical configuration of cdclk (used for all scaling,
- * watermark, etc. calculations and checks). This is
- * computed as if all enabled crtcs were active.
- */
- struct intel_cdclk_config logical;
-
- /*
- * Actual configuration of cdclk, can be different from the
- * logical configuration only when all crtc's are DPMS off.
- */
- struct intel_cdclk_config actual;
-
- /* minimum acceptable cdclk to satisfy bandwidth requirements */
- int bw_min_cdclk;
- /* minimum acceptable cdclk for each pipe */
- int min_cdclk[I915_MAX_PIPES];
- /* minimum acceptable voltage level for each pipe */
- u8 min_voltage_level[I915_MAX_PIPES];
-
- /* pipe to which cd2x update is synchronized */
- enum pipe pipe;
-
- /* forced minimum cdclk for glk+ audio w/a */
- int force_min_cdclk;
-
- /* bitmask of active pipes */
- u8 active_pipes;
-
- /* update cdclk with pipes disabled */
- bool disable_pipes;
-};
-
void intel_cdclk_init_hw(struct intel_display *display);
void intel_cdclk_uninit_hw(struct intel_display *display);
void intel_init_cdclk_hooks(struct intel_display *display);
@@ -97,4 +60,13 @@ void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc);
int intel_cdclk_init(struct intel_display *display);
void intel_cdclk_debugfs_register(struct intel_display *display);
+int intel_cdclk_logical(const struct intel_cdclk_state *cdclk_state);
+int intel_cdclk_actual(const struct intel_cdclk_state *cdclk_state);
+int intel_cdclk_actual_voltage_level(const struct intel_cdclk_state *cdclk_state);
+int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe pipe);
+int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state);
+bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state);
+void intel_cdclk_force_min_cdclk(struct intel_cdclk_state *cdclk_state, int force_min_cdclk);
+void intel_cdclk_read_hw(struct intel_display *display);
+
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg.c b/drivers/gpu/drm/i915/display/intel_cmtg.c
index 82606ebae1de..165138b95cb2 100644
--- a/drivers/gpu/drm/i915/display/intel_cmtg.c
+++ b/drivers/gpu/drm/i915/display/intel_cmtg.c
@@ -9,13 +9,13 @@
#include <drm/drm_device.h>
#include <drm/drm_print.h>
-#include "i915_reg.h"
-#include "intel_crtc.h"
#include "intel_cmtg.h"
#include "intel_cmtg_regs.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_device.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
/**
* DOC: Common Primary Timing Generator (CMTG)
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg_regs.h b/drivers/gpu/drm/i915/display/intel_cmtg_regs.h
index 668e41d65e86..945a35578284 100644
--- a/drivers/gpu/drm/i915/display/intel_cmtg_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cmtg_regs.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_CMTG_REGS_H__
#define __INTEL_CMTG_REGS_H__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
#define CMTG_CLK_SEL _MMIO(0x46160)
#define CMTG_CLK_SEL_A_MASK REG_GENMASK(31, 29)
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 98dddf72c0eb..671db6926e4c 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -1339,8 +1339,8 @@ static void ilk_lut_write(const struct intel_crtc_state *crtc_state,
{
struct intel_display *display = to_intel_display(crtc_state);
- if (crtc_state->dsb_color_vblank)
- intel_dsb_reg_write(crtc_state->dsb_color_vblank, reg, val);
+ if (crtc_state->dsb_color)
+ intel_dsb_reg_write(crtc_state->dsb_color, reg, val);
else
intel_de_write_fw(display, reg, val);
}
@@ -1350,8 +1350,8 @@ static void ilk_lut_write_indexed(const struct intel_crtc_state *crtc_state,
{
struct intel_display *display = to_intel_display(crtc_state);
- if (crtc_state->dsb_color_vblank)
- intel_dsb_reg_write_indexed(crtc_state->dsb_color_vblank, reg, val);
+ if (crtc_state->dsb_color)
+ intel_dsb_reg_write_indexed(crtc_state->dsb_color, reg, val);
else
intel_de_write_fw(display, reg, val);
}
@@ -1389,7 +1389,7 @@ static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
for (i = 0; i < 256; i++) {
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
- if (crtc_state->dsb_color_vblank)
+ if (crtc_state->dsb_color)
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
}
@@ -1917,7 +1917,7 @@ void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- if (crtc_state->dsb_color_vblank)
+ if (crtc_state->dsb_color)
return;
display->funcs.color->load_luts(crtc_state);
@@ -1965,6 +1965,25 @@ void intel_color_modeset(const struct intel_crtc_state *crtc_state)
}
}
+bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->dsb_color;
+}
+
+bool intel_color_uses_chained_dsb(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return crtc_state->dsb_color && !HAS_DOUBLE_BUFFERED_LUT(display);
+}
+
+bool intel_color_uses_gosub_dsb(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return crtc_state->dsb_color && HAS_DOUBLE_BUFFERED_LUT(display);
+}
+
void intel_color_prepare_commit(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -1982,47 +2001,53 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut)
return;
- crtc_state->dsb_color_vblank = intel_dsb_prepare(state, crtc, INTEL_DSB_1, 1024);
- if (!crtc_state->dsb_color_vblank)
+ if (HAS_DOUBLE_BUFFERED_LUT(display))
+ crtc_state->dsb_color = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 1024);
+ else
+ crtc_state->dsb_color = intel_dsb_prepare(state, crtc, INTEL_DSB_1, 1024);
+
+ if (!intel_color_uses_dsb(crtc_state))
return;
display->funcs.color->load_luts(crtc_state);
- if (crtc_state->use_dsb) {
- intel_vrr_send_push(crtc_state->dsb_color_vblank, crtc_state);
- intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color_vblank);
- intel_vrr_check_push_sent(crtc_state->dsb_color_vblank, crtc_state);
- intel_dsb_interrupt(crtc_state->dsb_color_vblank);
+ if (crtc_state->use_dsb && intel_color_uses_chained_dsb(crtc_state)) {
+ intel_vrr_send_push(crtc_state->dsb_color, crtc_state);
+ intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color);
+ intel_vrr_check_push_sent(crtc_state->dsb_color, crtc_state);
+ intel_dsb_interrupt(crtc_state->dsb_color);
}
- intel_dsb_finish(crtc_state->dsb_color_vblank);
+ if (intel_color_uses_gosub_dsb(crtc_state))
+ intel_dsb_gosub_finish(crtc_state->dsb_color);
+ else
+ intel_dsb_finish(crtc_state->dsb_color);
}
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state)
{
- if (crtc_state->dsb_color_vblank) {
- intel_dsb_cleanup(crtc_state->dsb_color_vblank);
- crtc_state->dsb_color_vblank = NULL;
+ if (crtc_state->dsb_color) {
+ intel_dsb_cleanup(crtc_state->dsb_color);
+ crtc_state->dsb_color = NULL;
}
}
void intel_color_wait_commit(const struct intel_crtc_state *crtc_state)
{
- if (crtc_state->dsb_color_vblank)
- intel_dsb_wait(crtc_state->dsb_color_vblank);
-}
-
-bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state)
-{
- return crtc_state->dsb_color_vblank;
+ if (crtc_state->dsb_color)
+ intel_dsb_wait(crtc_state->dsb_color);
}
static bool intel_can_preload_luts(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
+ if (HAS_DOUBLE_BUFFERED_LUT(display))
+ return false;
+
return !old_crtc_state->post_csc_lut &&
!old_crtc_state->pre_csc_lut;
}
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index 9d66457c1e89..bf7a12ce9df0 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -24,6 +24,8 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state);
bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state);
+bool intel_color_uses_chained_dsb(const struct intel_crtc_state *crtc_state);
+bool intel_color_uses_gosub_dsb(const struct intel_crtc_state *crtc_state);
void intel_color_wait_commit(const struct intel_crtc_state *crtc_state);
void intel_color_commit_noarm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index f5cc38dbe559..112749f97c26 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -5,11 +5,11 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#define for_each_combo_phy(__display, __phy) \
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
index ee41acdccf4e..3694f95376c2 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_COMBO_PHY_REGS__
#define __INTEL_COMBO_PHY_REGS__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
#define _ICL_COMBOPHY_A 0x162000
#define _ICL_COMBOPHY_B 0x6C000
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 6c81c9f2fd09..42c923f416b3 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -32,7 +32,6 @@
#include "i915_drv.h"
#include "i915_utils.h"
-#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_display_core.h"
#include "intel_display_debugfs.h"
@@ -65,10 +64,10 @@ static void intel_connector_modeset_retry_work_fn(struct work_struct *work)
void intel_connector_queue_modeset_retry_work(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
drm_connector_get(&connector->base);
- if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work))
+ if (!queue_work(display->wq.unordered, &connector->modeset_retry_work))
drm_connector_put(&connector->base);
}
@@ -153,36 +152,36 @@ void intel_connector_destroy(struct drm_connector *connector)
kfree(connector);
}
-int intel_connector_register(struct drm_connector *connector)
+int intel_connector_register(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct drm_i915_private *i915 = to_i915(_connector->dev);
int ret;
- ret = intel_backlight_device_register(intel_connector);
+ ret = intel_panel_register(connector);
if (ret)
goto err;
if (i915_inject_probe_failure(i915)) {
ret = -EFAULT;
- goto err_backlight;
+ goto err_panel;
}
- intel_connector_debugfs_add(intel_connector);
+ intel_connector_debugfs_add(connector);
return 0;
-err_backlight:
- intel_backlight_device_unregister(intel_connector);
+err_panel:
+ intel_panel_unregister(connector);
err:
return ret;
}
-void intel_connector_unregister(struct drm_connector *connector)
+void intel_connector_unregister(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
- intel_backlight_device_unregister(intel_connector);
+ intel_panel_unregister(connector);
}
void intel_connector_attach_encoder(struct intel_connector *connector,
@@ -209,8 +208,7 @@ enum pipe intel_connector_get_pipe(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
- drm_WARN_ON(display->drm,
- !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
+ drm_modeset_lock_assert_held(&display->drm->mode_config.connection_mutex);
if (!connector->base.state->crtc)
return INVALID_PIPE;
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 38b50a779b6b..898c5d9e8f7a 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -34,8 +34,6 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_irq.h"
-#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_crt.h"
#include "intel_crt_regs.h"
@@ -44,6 +42,7 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 29cfc38f12e0..a187db6df2d3 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -17,7 +17,6 @@
#include "i9xx_plane.h"
#include "icl_dsi.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_cursor.h"
@@ -29,6 +28,7 @@
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_pipe_crc.h"
+#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_sprite.h"
#include "intel_vblank.h"
@@ -417,10 +417,13 @@ int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
+
return crtc_state->hw.active &&
!crtc_state->preload_luts &&
!intel_crtc_needs_modeset(crtc_state) &&
- intel_crtc_needs_color_update(crtc_state) &&
+ (intel_crtc_needs_color_update(crtc_state) &&
+ !HAS_DOUBLE_BUFFERED_LUT(display)) &&
!intel_color_uses_dsb(crtc_state) &&
!crtc_state->use_dsb;
}
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 2fec5ba58373..198e69efe9ac 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -12,10 +12,8 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_cursor.h"
#include "intel_cursor_regs.h"
#include "intel_de.h"
@@ -24,6 +22,7 @@
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_vblank.h"
@@ -159,10 +158,10 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true);
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index a82b93cbc81d..ed8e640b96b0 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -8,8 +8,8 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
+#include "intel_alpm.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -39,7 +39,13 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- if (display->platform.pantherlake && phy == PHY_A)
+ /* PTL doesn't have a PHY connected to PORT B; as such,
+ * there will never be a case where PTL uses PHY B.
+ * WCL uses PORT A and B with the C10 PHY.
+ * Reusing the condition for WCL and extending it for PORT B
+ * should not cause any issues for PTL.
+ */
+ if (display->platform.pantherlake && phy < PHY_C)
return true;
if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C)
@@ -3224,6 +3230,37 @@ void intel_mtl_pll_enable(struct intel_encoder *encoder,
intel_cx0pll_enable(encoder, crtc_state);
}
+/*
+ * According to HAS we need to enable MAC Transmitting LFPS in the "PHY Common
+ * Control 0" PIPE register in case of AUX Less ALPM is going to be used. This
+ * function is doing that and is called by link retrain sequence.
+ */
+void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
+ bool enable = intel_alpm_is_alpm_aux_less(enc_to_intel_dp(encoder),
+ crtc_state);
+ int i;
+
+ if (DISPLAY_VER(display) < 20)
+ return;
+
+ for (i = 0; i < 4; i++) {
+ int tx = i % 2 + 1;
+ u8 lane_mask = i < 2 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1;
+
+ if (!(owned_lane_mask & lane_mask))
+ continue;
+
+ intel_cx0_rmw(encoder, lane_mask, PHY_CMN1_CONTROL(tx, 0),
+ CONTROL0_MAC_TRANSMIT_LFPS,
+ enable ? CONTROL0_MAC_TRANSMIT_LFPS : 0,
+ MB_WRITE_COMMITTED);
+ }
+}
+
static u8 cx0_power_control_disable_val(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index a8f811ca5e7b..c5a7b529955b 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -43,5 +43,7 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder);
void intel_cx0_pll_power_save_wa(struct intel_display *display);
+void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_CX0_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index 59c22beaf1de..77eae1d845f7 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -6,8 +6,8 @@
#ifndef __INTEL_CX0_PHY_REGS_H__
#define __INTEL_CX0_PHY_REGS_H__
-#include "i915_reg_defs.h"
#include "intel_display_limits.h"
+#include "intel_display_reg_defs.h"
/* DDI Buffer Control */
#define _DDI_CLK_VALFREQ_A 0x64030
@@ -285,6 +285,9 @@
#define PHY_CX0_TX_CONTROL(tx, control) (0x400 + ((tx) - 1) * 0x200 + (control))
#define CONTROL2_DISABLE_SINGLE_TX REG_BIT(6)
+#define PHY_CMN1_CONTROL(tx, control) (0x800 + ((tx) - 1) * 0x200 + (control))
+#define CONTROL0_MAC_TRANSMIT_LFPS REG_BIT(1)
+
/* C20 Registers */
#define PHY_C20_WR_ADDRESS_L 0xC02
#define PHY_C20_WR_ADDRESS_H 0xC03
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index d58f8fc37326..0405396c7750 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -50,6 +50,7 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
@@ -72,11 +73,13 @@
#include "intel_lspcon.h"
#include "intel_mg_phy_regs.h"
#include "intel_modeset_lock.h"
+#include "intel_panel.h"
#include "intel_pfit.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_snps_phy.h"
+#include "intel_step.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vdsc_regs.h"
@@ -236,7 +239,7 @@ static void intel_wait_ddi_buf_active(struct intel_encoder *encoder)
port_name(port));
}
-static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
+static u32 hsw_pll_to_ddi_pll_sel(const struct intel_dpll *pll)
{
switch (pll->info->id) {
case DPLL_ID_WRPLL1:
@@ -260,7 +263,7 @@ static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
int clock = crtc_state->port_clock;
const enum intel_dpll_id id = pll->info->id;
@@ -1393,6 +1396,21 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
for (ln = 0; ln < 2; ln++) {
int level;
+ /* Wa_16011342517:adl-p */
+ if (display->platform.alderlake_p &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_D0)) {
+ if ((intel_encoder_is_hdmi(encoder) &&
+ crtc_state->port_clock == 594000) ||
+ (intel_encoder_is_dp(encoder) &&
+ crtc_state->port_clock == 162000)) {
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL2(tc_port, ln),
+ LOADGEN_SHARING_PMD_DISABLE, 1);
+ } else {
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL2(tc_port, ln),
+ LOADGEN_SHARING_PMD_DISABLE, 0);
+ }
+ }
+
intel_dkl_phy_write(display, DKL_TX_PMD_LANE_SUS(tc_port, ln), 0);
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
@@ -1561,7 +1579,7 @@ static bool _icl_ddi_is_clock_enabled(struct intel_display *display, i915_reg_t
return !(intel_de_read(display, reg) & clk_off);
}
-static struct intel_shared_dpll *
+static struct intel_dpll *
_icl_ddi_get_pll(struct intel_display *display, i915_reg_t reg,
u32 clk_sel_mask, u32 clk_sel_shift)
{
@@ -1569,14 +1587,14 @@ _icl_ddi_get_pll(struct intel_display *display, i915_reg_t reg,
id = (intel_de_read(display, reg) & clk_sel_mask) >> clk_sel_shift;
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
static void adls_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(display->drm, !pll))
@@ -1606,7 +1624,7 @@ static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -1620,7 +1638,7 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(display->drm, !pll))
@@ -1650,7 +1668,7 @@ static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -1664,7 +1682,7 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(display->drm, !pll))
@@ -1703,7 +1721,7 @@ static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -1723,14 +1741,14 @@ static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
if (phy >= PHY_C)
id += DPLL_ID_DG1_DPLL2;
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(display->drm, !pll))
@@ -1760,7 +1778,7 @@ static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
+struct intel_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -1774,7 +1792,7 @@ static void jsl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum port port = encoder->port;
if (drm_WARN_ON(display->drm, !pll))
@@ -1817,7 +1835,7 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
@@ -1868,7 +1886,7 @@ static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
return !(tmp & ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
}
-static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
@@ -1895,10 +1913,10 @@ static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encode
return NULL;
}
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
-static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder->base.dev);
enum intel_dpll_id id;
@@ -1918,14 +1936,14 @@ static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
return NULL;
}
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
static void skl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum port port = encoder->port;
if (drm_WARN_ON(display->drm, !pll))
@@ -1967,7 +1985,7 @@ static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder)
return !(intel_de_read(display, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
}
-static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -1986,14 +2004,14 @@ static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
id = (tmp & DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
enum port port = encoder->port;
if (drm_WARN_ON(display->drm, !pll))
@@ -2018,7 +2036,7 @@ bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder)
return intel_de_read(display, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
}
-static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
+static struct intel_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -2053,7 +2071,7 @@ static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
return NULL;
}
- return intel_get_shared_dpll_by_id(display, id);
+ return intel_get_dpll_by_id(display, id);
}
void intel_ddi_enable_clock(struct intel_encoder *encoder,
@@ -2760,7 +2778,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* 4. Enable the port PLL.
*
* The PLL enabling itself was already done before this function by
- * hsw_crtc_enable()->intel_enable_shared_dpll(). We need only
+ * hsw_crtc_enable()->intel_enable_dpll(). We need only
* configure the PLL to port mapping here.
*/
intel_ddi_enable_clock(encoder, crtc_state);
@@ -3354,6 +3372,8 @@ static void intel_ddi_enable_dp(struct intel_atomic_state *state,
drm_connector_update_privacy_screen(conn_state);
intel_edp_backlight_on(crtc_state, conn_state);
+ intel_panel_prepare(crtc_state, conn_state);
+
if (!intel_lspcon_active(dig_port) || intel_dp_has_hdmi_sink(&dig_port->dp))
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
@@ -3551,6 +3571,7 @@ static void intel_ddi_disable_dp(struct intel_atomic_state *state,
intel_dp->link.active = false;
+ intel_panel_unprepare(old_conn_state);
intel_psr_disable(intel_dp, old_crtc_state);
intel_alpm_disable(intel_dp);
intel_edp_backlight_off(old_conn_state);
@@ -3647,7 +3668,7 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(crtc_state))
- intel_update_active_dpll(state, pipe_crtc, encoder);
+ intel_dpll_update_active(state, pipe_crtc, encoder);
}
/*
@@ -3740,6 +3761,18 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
intel_ddi_buf_enable(encoder, intel_dp->DP);
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+
+ /*
+ * 6.k If AUX-Less ALPM is going to be enabled:
+ * i. Configure PORT_ALPM_CTL and PORT_ALPM_LFPS_CTL here
+ */
+ intel_alpm_port_configure(intel_dp, crtc_state);
+
+ /*
+ * ii. Enable MAC Transmits LFPS in the "PHY Common Control 0" PIPE
+ * register
+ */
+ intel_lnl_mac_transmit_lfps(encoder, crtc_state);
}
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
@@ -4184,7 +4217,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
void intel_ddi_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
struct intel_display *display = to_intel_display(encoder);
enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
@@ -4200,7 +4233,7 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
icl_set_active_port_dpll(crtc_state, port_dpll_id);
- crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->shared_dpll,
+ crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->intel_dpll,
&crtc_state->dpll_hw_state);
}
@@ -4254,7 +4287,7 @@ static void icl_ddi_combo_get_config(struct intel_encoder *encoder,
intel_ddi_get_config(encoder, crtc_state);
}
-static bool icl_ddi_tc_pll_is_tbt(const struct intel_shared_dpll *pll)
+static bool icl_ddi_tc_pll_is_tbt(const struct intel_dpll *pll)
{
return pll->info->id == DPLL_ID_ICL_TBTPLL;
}
@@ -4264,7 +4297,7 @@ icl_ddi_tc_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ const struct intel_dpll *pll = crtc_state->intel_dpll;
if (drm_WARN_ON(display->drm, !pll))
return ICL_PORT_DPLL_DEFAULT;
@@ -4287,7 +4320,7 @@ intel_ddi_port_pll_type(struct intel_encoder *encoder,
static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
struct intel_display *display = to_intel_display(encoder);
enum icl_port_dpll_id port_dpll_id;
@@ -4310,10 +4343,10 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
icl_set_active_port_dpll(crtc_state, port_dpll_id);
- if (icl_ddi_tc_pll_is_tbt(crtc_state->shared_dpll))
+ if (icl_ddi_tc_pll_is_tbt(crtc_state->intel_dpll))
crtc_state->port_clock = icl_calc_tbt_pll_link(display, encoder->port);
else
- crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->shared_dpll,
+ crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->intel_dpll,
&crtc_state->dpll_hw_state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 353eb04079e9..f6f511bb0431 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -16,9 +16,9 @@ struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
struct intel_dp;
+struct intel_dpll;
struct intel_dpll_hw_state;
struct intel_encoder;
-struct intel_shared_dpll;
enum pipe;
enum port;
enum transcoder;
@@ -40,7 +40,7 @@ void intel_ddi_enable_clock(struct intel_encoder *encoder,
void intel_ddi_disable_clock(struct intel_encoder *encoder);
void intel_ddi_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- struct intel_shared_dpll *pll);
+ struct intel_dpll *pll);
void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_ddi_disable_clock(struct intel_encoder *encoder);
@@ -50,7 +50,7 @@ intel_ddi_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
-struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
+struct intel_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_wait_ddi_buf_idle(struct intel_display *display, enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index 54ce3e4f8fd9..9ecdcf6b73e4 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -107,10 +107,10 @@ intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set)
static inline int
__intel_de_wait_for_register_nowl(struct intel_display *display,
i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+ u32 mask, u32 value, unsigned int timeout_ms)
{
return intel_wait_for_register(__to_uncore(display), reg, mask,
- value, timeout);
+ value, timeout_ms);
}
static inline int
@@ -125,14 +125,14 @@ __intel_de_wait_for_register_atomic_nowl(struct intel_display *display,
static inline int
intel_de_wait(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+ u32 mask, u32 value, unsigned int timeout_ms)
{
int ret;
intel_dmc_wl_get(display, reg);
ret = __intel_de_wait_for_register_nowl(display, reg, mask, value,
- timeout);
+ timeout_ms);
intel_dmc_wl_put(display, reg);
@@ -141,14 +141,14 @@ intel_de_wait(struct intel_display *display, i915_reg_t reg,
static inline int
intel_de_wait_fw(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+ u32 mask, u32 value, unsigned int timeout_ms, u32 *out_value)
{
int ret;
intel_dmc_wl_get(display, reg);
ret = intel_wait_for_register_fw(__to_uncore(display), reg, mask,
- value, timeout);
+ value, timeout_ms, out_value);
intel_dmc_wl_put(display, reg);
@@ -176,16 +176,16 @@ intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
static inline int
intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+ u32 mask, unsigned int timeout_ms)
{
- return intel_de_wait(display, reg, mask, mask, timeout);
+ return intel_de_wait(display, reg, mask, mask, timeout_ms);
}
static inline int
intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+ u32 mask, unsigned int timeout_ms)
{
- return intel_de_wait(display, reg, mask, 0, timeout);
+ return intel_de_wait(display, reg, mask, 0, timeout_ms);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 43aa1f97378b..7035c1fc9033 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -57,7 +57,6 @@
#include "i9xx_wm.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_audio.h"
#include "intel_bo.h"
#include "intel_bw.h"
@@ -67,13 +66,14 @@
#include "intel_crt.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
+#include "intel_cursor.h"
#include "intel_cursor_regs.h"
#include "intel_cx0_phy.h"
-#include "intel_cursor.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
@@ -93,6 +93,7 @@
#include "intel_fbc.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
+#include "intel_flipq.h"
#include "intel_frontbuffer.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
@@ -105,9 +106,9 @@
#include "intel_panel.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
-#include "intel_pcode.h"
#include "intel_pfit.h"
#include "intel_pipe_crc.h"
+#include "intel_plane.h"
#include "intel_plane_initial.h"
#include "intel_pmdemand.h"
#include "intel_pps.h"
@@ -140,46 +141,47 @@ static void bdw_set_pipe_misc(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
/* returns HPLL frequency in kHz */
-int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
+int vlv_get_hpll_vco(struct drm_device *drm)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
/* Obtain SKU information */
- hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
+ hpll_freq = vlv_cck_read(drm, CCK_FUSE_REG) &
CCK_FUSE_HPLL_FREQ_MASK;
return vco_freq[hpll_freq] * 1000;
}
-int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+int vlv_get_cck_clock(struct drm_device *drm,
const char *name, u32 reg, int ref_freq)
{
u32 val;
int divider;
- val = vlv_cck_read(dev_priv, reg);
+ val = vlv_cck_read(drm, reg);
divider = val & CCK_FREQUENCY_VALUES;
- drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
+ drm_WARN(drm, (val & CCK_FREQUENCY_STATUS) !=
(divider << CCK_FREQUENCY_STATUS_SHIFT),
"%s change in progress\n", name);
return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
}
-int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+int vlv_get_cck_clock_hpll(struct drm_device *drm,
const char *name, u32 reg)
{
+ struct drm_i915_private *dev_priv = to_i915(drm);
int hpll;
- vlv_cck_get(dev_priv);
+ vlv_cck_get(drm);
if (dev_priv->hpll_freq == 0)
- dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
+ dev_priv->hpll_freq = vlv_get_hpll_vco(drm);
- hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
+ hpll = vlv_get_cck_clock(drm, name, reg, dev_priv->hpll_freq);
- vlv_cck_put(dev_priv);
+ vlv_cck_put(drm);
return hpll;
}
@@ -191,7 +193,7 @@ void intel_update_czclk(struct intel_display *display)
if (!display->platform.valleyview && !display->platform.cherryview)
return;
- dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
+ dev_priv->czclk_freq = vlv_get_cck_clock_hpll(display->drm, "czclk",
CCK_CZ_CLOCK_CONTROL);
drm_dbg_kms(display->drm, "CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
@@ -1325,7 +1327,7 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state)
if (intel_crtc_needs_modeset(new_crtc_state))
continue;
- new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
+ new_crtc_state->intel_dpll = old_crtc_state->intel_dpll;
new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
}
}
@@ -1658,13 +1660,17 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (drm_WARN_ON(display->drm, crtc->active))
return;
- for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i)
- intel_dmc_enable_pipe(display, pipe_crtc->pipe);
+ for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
+ const struct intel_crtc_state *new_pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
+
+ intel_dmc_enable_pipe(new_pipe_crtc_state);
+ }
intel_encoders_pre_pll_enable(state, crtc);
- if (new_crtc_state->shared_dpll)
- intel_enable_shared_dpll(new_crtc_state);
+ if (new_crtc_state->intel_dpll)
+ intel_dpll_enable(new_crtc_state);
intel_encoders_pre_enable(state, crtc);
@@ -1793,12 +1799,16 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_encoders_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
- intel_disable_shared_dpll(old_crtc_state);
+ intel_dpll_disable(old_crtc_state);
intel_encoders_post_pll_disable(state, crtc);
- for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i)
- intel_dmc_disable_pipe(display, pipe_crtc->pipe);
+ for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
+
+ intel_dmc_disable_pipe(old_pipe_crtc_state);
+ }
}
/* Prefer intel_encoder_is_combo() */
@@ -1959,7 +1969,7 @@ static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
if (HAS_DDI(display) && crtc_state->has_audio)
set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
- if (crtc_state->shared_dpll)
+ if (crtc_state->intel_dpll)
set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
if (crtc_state->dsc.compression_enable)
@@ -4159,7 +4169,7 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
return 0;
linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
- cdclk_state->logical.cdclk);
+ intel_cdclk_logical(cdclk_state));
return min(linetime_wm, 0x1ff);
}
@@ -4225,7 +4235,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
crtc_state->update_wm_post = true;
if (intel_crtc_needs_modeset(crtc_state)) {
- ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
+ ret = intel_dpll_crtc_get_dpll(state, crtc);
if (ret)
return ret;
}
@@ -4318,6 +4328,22 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
return 0;
}
+int intel_display_min_pipe_bpp(void)
+{
+ return 6 * 3;
+}
+
+int intel_display_max_pipe_bpp(struct intel_display *display)
+{
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview)
+ return 10*3;
+ else if (DISPLAY_VER(display) >= 5)
+ return 12*3;
+ else
+ return 8*3;
+}
+
static int
compute_baseline_pipe_bpp(struct intel_atomic_state *state,
struct intel_crtc *crtc)
@@ -4327,17 +4353,9 @@ compute_baseline_pipe_bpp(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_connector *connector;
struct drm_connector_state *connector_state;
- int bpp, i;
-
- if (display->platform.g4x || display->platform.valleyview ||
- display->platform.cherryview)
- bpp = 10*3;
- else if (DISPLAY_VER(display) >= 5)
- bpp = 12*3;
- else
- bpp = 8*3;
+ int i;
- crtc_state->pipe_bpp = bpp;
+ crtc_state->pipe_bpp = intel_display_max_pipe_bpp(display);
/* Clamp display bpp to connector max bpp */
for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
@@ -4501,7 +4519,7 @@ copy_joiner_crtc_state_modeset(struct intel_atomic_state *state,
/* preserve some things from the slave's original crtc state */
saved_state->uapi = secondary_crtc_state->uapi;
saved_state->scaler_state = secondary_crtc_state->scaler_state;
- saved_state->shared_dpll = secondary_crtc_state->shared_dpll;
+ saved_state->intel_dpll = secondary_crtc_state->intel_dpll;
saved_state->crc_enabled = secondary_crtc_state->crc_enabled;
intel_crtc_free_hw_state(secondary_crtc_state);
@@ -4564,7 +4582,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
saved_state->uapi = crtc_state->uapi;
saved_state->inherited = crtc_state->inherited;
saved_state->scaler_state = crtc_state->scaler_state;
- saved_state->shared_dpll = crtc_state->shared_dpll;
+ saved_state->intel_dpll = crtc_state->intel_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
sizeof(saved_state->icl_port_dplls));
@@ -5318,7 +5336,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(double_wide);
if (display->dpll.mgr)
- PIPE_CONF_CHECK_P(shared_dpll);
+ PIPE_CONF_CHECK_P(intel_dpll);
/* FIXME convert everything over the dpll_mgr */
if (display->dpll.mgr || HAS_GMCH(display))
@@ -5470,7 +5488,7 @@ static int intel_modeset_pipe(struct intel_atomic_state *state,
if (ret)
return ret;
- ret = intel_atomic_add_affected_planes(state, crtc);
+ ret = intel_plane_add_affected(state, crtc);
if (ret)
return ret;
@@ -6186,7 +6204,7 @@ static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
if (ret)
return ret;
- ret = intel_atomic_add_affected_planes(state, crtc);
+ ret = intel_plane_add_affected(state, crtc);
if (ret)
return ret;
}
@@ -6428,7 +6446,7 @@ int intel_atomic_check(struct drm_device *dev,
any_ms = true;
- intel_release_shared_dplls(state, crtc);
+ intel_dpll_release(state, crtc);
}
if (any_ms && !check_digital_port_conflicts(state)) {
@@ -6438,7 +6456,7 @@ int intel_atomic_check(struct drm_device *dev,
goto fail;
}
- ret = intel_atomic_check_planes(state);
+ ret = intel_plane_atomic_check(state);
if (ret)
goto fail;
@@ -6602,7 +6620,7 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
bool modeset = intel_crtc_needs_modeset(new_crtc_state);
- drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb || new_crtc_state->use_flipq);
/*
* During modesets pipe configuration was programmed as the
@@ -6630,18 +6648,24 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
- drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb || new_crtc_state->use_flipq);
/*
* Disable the scaler(s) after the plane(s) so that we don't
* get a catastrophic underrun even if the two operations
* end up happening in two different frames.
*/
- if (DISPLAY_VER(display) >= 9 &&
- !intel_crtc_needs_modeset(new_crtc_state))
+ if (DISPLAY_VER(display) >= 9 && !modeset)
skl_detach_scalers(NULL, new_crtc_state);
+ if (!modeset &&
+ intel_crtc_needs_color_update(new_crtc_state) &&
+ !intel_color_uses_dsb(new_crtc_state) &&
+ HAS_DOUBLE_BUFFERED_LUT(display))
+ intel_color_load_luts(new_crtc_state);
+
if (intel_crtc_vrr_enabling(state, crtc))
intel_vrr_enable(new_crtc_state);
}
@@ -6715,10 +6739,10 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
if (!modeset &&
intel_crtc_needs_color_update(new_crtc_state) &&
- !new_crtc_state->use_dsb)
+ !new_crtc_state->use_dsb && !new_crtc_state->use_flipq)
intel_color_commit_noarm(NULL, new_crtc_state);
- if (!new_crtc_state->use_dsb)
+ if (!new_crtc_state->use_dsb && !new_crtc_state->use_flipq)
intel_crtc_planes_update_noarm(NULL, state, crtc);
}
@@ -6730,16 +6754,23 @@ static void intel_update_crtc(struct intel_atomic_state *state,
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (new_crtc_state->use_dsb) {
+ if (new_crtc_state->use_flipq) {
+ intel_flipq_enable(new_crtc_state);
+
+ intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->flipq_event);
+
+ intel_flipq_add(crtc, INTEL_FLIPQ_PLANE_1, 0, INTEL_DSB_0,
+ new_crtc_state->dsb_commit);
+ } else if (new_crtc_state->use_dsb) {
intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->dsb_event);
- intel_dsb_commit(new_crtc_state->dsb_commit, false);
+ intel_dsb_commit(new_crtc_state->dsb_commit);
} else {
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(state, crtc);
if (new_crtc_state->dsb_commit)
- intel_dsb_commit(new_crtc_state->dsb_commit, false);
+ intel_dsb_commit(new_crtc_state->dsb_commit);
commit_pipe_pre_planes(state, crtc);
@@ -7169,7 +7200,17 @@ static void intel_atomic_dsb_prepare(struct intel_atomic_state *state,
return;
/* FIXME deal with everything */
+ new_crtc_state->use_flipq =
+ intel_flipq_supported(display) &&
+ !new_crtc_state->do_async_flip &&
+ !new_crtc_state->vrr.enable &&
+ !new_crtc_state->has_psr &&
+ !intel_crtc_needs_modeset(new_crtc_state) &&
+ !intel_crtc_needs_fastset(new_crtc_state) &&
+ !intel_crtc_needs_color_update(new_crtc_state);
+
new_crtc_state->use_dsb =
+ !new_crtc_state->use_flipq &&
!new_crtc_state->do_async_flip &&
(DISPLAY_VER(display) >= 20 || !new_crtc_state->has_psr) &&
!intel_crtc_needs_modeset(new_crtc_state) &&
@@ -7185,7 +7226,9 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (!new_crtc_state->use_dsb && !new_crtc_state->dsb_color_vblank)
+ if (!new_crtc_state->use_flipq &&
+ !new_crtc_state->use_dsb &&
+ !new_crtc_state->dsb_color)
return;
/*
@@ -7194,14 +7237,20 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
* Double that for pipe stuff and other overhead.
*/
new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0,
- new_crtc_state->use_dsb ? 1024 : 16);
+ new_crtc_state->use_dsb ||
+ new_crtc_state->use_flipq ? 1024 : 16);
if (!new_crtc_state->dsb_commit) {
+ new_crtc_state->use_flipq = false;
new_crtc_state->use_dsb = false;
intel_color_cleanup_commit(new_crtc_state);
return;
}
- if (new_crtc_state->use_dsb) {
+ if (new_crtc_state->use_flipq || new_crtc_state->use_dsb) {
+ /* Wa_18034343758 */
+ if (new_crtc_state->use_flipq)
+ intel_flipq_wait_dmc_halt(new_crtc_state->dsb_commit, crtc);
+
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_noarm(new_crtc_state->dsb_commit,
new_crtc_state);
@@ -7216,7 +7265,8 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
intel_psr_trigger_frame_change_event(new_crtc_state->dsb_commit,
state, crtc);
- intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit);
+ if (new_crtc_state->use_dsb)
+ intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit);
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_arm(new_crtc_state->dsb_commit,
@@ -7232,19 +7282,27 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
skl_detach_scalers(new_crtc_state->dsb_commit,
new_crtc_state);
- if (!new_crtc_state->dsb_color_vblank) {
- intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
-
- intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state);
- intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit);
- intel_vrr_check_push_sent(new_crtc_state->dsb_commit, new_crtc_state);
- intel_dsb_interrupt(new_crtc_state->dsb_commit);
- }
+ /* Wa_18034343758 */
+ if (new_crtc_state->use_flipq)
+ intel_flipq_unhalt_dmc(new_crtc_state->dsb_commit, crtc);
}
- if (new_crtc_state->dsb_color_vblank)
+ if (intel_color_uses_chained_dsb(new_crtc_state))
intel_dsb_chain(state, new_crtc_state->dsb_commit,
- new_crtc_state->dsb_color_vblank, true);
+ new_crtc_state->dsb_color, true);
+ else if (intel_color_uses_gosub_dsb(new_crtc_state))
+ intel_dsb_gosub(new_crtc_state->dsb_commit,
+ new_crtc_state->dsb_color);
+
+ if (new_crtc_state->use_dsb && !intel_color_uses_chained_dsb(new_crtc_state)) {
+ intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
+
+ intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state);
+ intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit);
+ intel_vrr_check_push_sent(new_crtc_state->dsb_commit,
+ new_crtc_state);
+ intel_dsb_interrupt(new_crtc_state->dsb_commit);
+ }
intel_dsb_finish(new_crtc_state->dsb_commit);
}
@@ -7367,6 +7425,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
display->funcs.display->commit_modeset_enables(state);
+ /* FIXME probably need to sequence this properly */
intel_program_dpkgc_latency(state);
intel_wait_for_vblank_workers(state);
@@ -7390,6 +7449,9 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (!state->base.legacy_cursor_update && !new_crtc_state->use_dsb)
intel_vrr_check_push_sent(NULL, new_crtc_state);
+
+ if (new_crtc_state->use_flipq)
+ intel_flipq_disable(new_crtc_state);
}
/*
@@ -7433,7 +7495,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
*
* FIXME get rid of this funny new->old swapping
*/
- old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank);
+ old_crtc_state->dsb_color = fetch_and_zero(&new_crtc_state->dsb_color);
old_crtc_state->dsb_commit = fetch_and_zero(&new_crtc_state->dsb_commit);
}
@@ -7526,7 +7588,7 @@ static int intel_atomic_swap_state(struct intel_atomic_state *state)
intel_atomic_swap_global_state(state);
- intel_shared_dpll_swap_state(state);
+ intel_dpll_swap_state(state);
intel_atomic_track_fbs(state);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 3b54a62c290a..37e2ab301a80 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -30,38 +30,21 @@
#include "i915_reg_defs.h"
#include "intel_display_limits.h"
-enum drm_scaling_filter;
-struct dpll;
struct drm_atomic_state;
-struct drm_connector;
struct drm_device;
struct drm_display_mode;
struct drm_encoder;
-struct drm_file;
-struct drm_format_info;
-struct drm_framebuffer;
-struct drm_i915_private;
-struct drm_mode_fb_cmd2;
struct drm_modeset_acquire_ctx;
-struct drm_plane;
-struct drm_plane_state;
-struct i915_address_space;
-struct i915_gtt_view;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_digital_port;
struct intel_display;
-struct intel_dp;
struct intel_encoder;
-struct intel_initial_plane_config;
struct intel_link_m_n;
struct intel_plane;
struct intel_plane_state;
struct intel_power_domain_mask;
-struct pci_dev;
-struct work_struct;
-
#define pipe_name(p) ((p) + 'A')
@@ -452,10 +435,10 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct intel_display *display, enum pipe pipe);
void i830_disable_pipe(struct intel_display *display, enum pipe pipe);
-int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
-int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+int vlv_get_hpll_vco(struct drm_device *drm);
+int vlv_get_cck_clock(struct drm_device *drm,
const char *name, u32 reg, int ref_freq);
-int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+int vlv_get_cck_clock_hpll(struct drm_device *drm,
const char *name, u32 reg);
bool intel_has_pending_fb_unpin(struct intel_display *display);
void intel_encoder_destroy(struct drm_encoder *encoder);
@@ -524,6 +507,9 @@ void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state);
bool intel_crtc_vrr_disabling(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+int intel_display_min_pipe_bpp(void);
+int intel_display_max_pipe_bpp(struct intel_display *display);
+
/* modesetting */
int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
const char *reason, u8 pipe_mask);
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.c b/drivers/gpu/drm/i915/display/intel_display_conversion.c
index 0578b68404da..4d565935e2cc 100644
--- a/drivers/gpu/drm/i915/display/intel_display_conversion.c
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.c
@@ -2,10 +2,11 @@
/* Copyright © 2024 Intel Corporation */
#include "i915_drv.h"
+#include "intel_display_conversion.h"
struct intel_display *__i915_to_display(struct drm_i915_private *i915)
{
- return &i915->display;
+ return i915->display;
}
struct intel_display *__drm_to_display(struct drm_device *drm)
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index b4937e102360..8c226406c5cd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -42,7 +42,7 @@ struct intel_color_funcs;
struct intel_crtc;
struct intel_crtc_state;
struct intel_dmc;
-struct intel_dpll_funcs;
+struct intel_dpll_global_funcs;
struct intel_dpll_mgr;
struct intel_fbdev;
struct intel_fdi_funcs;
@@ -122,11 +122,11 @@ struct intel_audio {
* intel_{prepare,enable,disable}_shared_dpll. Must be global rather than per
* dpll, because on some platforms plls share registers.
*/
-struct intel_dpll {
+struct intel_dpll_global {
struct mutex lock;
- int num_shared_dpll;
- struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ int num_dpll;
+ struct intel_dpll dplls[I915_NUM_PLLS];
const struct intel_dpll_mgr *mgr;
struct {
@@ -300,7 +300,7 @@ struct intel_display {
const struct intel_cdclk_funcs *cdclk;
/* Display pll funcs */
- const struct intel_dpll_funcs *dpll;
+ const struct intel_dpll_global_funcs *dpll;
/* irq display functions */
const struct intel_hotplug_funcs *hotplug;
@@ -480,6 +480,12 @@ struct intel_display {
} irq;
struct {
+ /* protected by wm.wm_mutex */
+ u16 linetime[I915_MAX_PIPES];
+ bool disable[I915_MAX_PIPES];
+ } pkgc;
+
+ struct {
wait_queue_head_t waitqueue;
/* mutex to protect pmdemand programming sequence */
@@ -539,6 +545,11 @@ struct intel_display {
} sagv;
struct {
+ /* LPT/WPT IOSF sideband protection */
+ struct mutex lock;
+ } sbi;
+
+ struct {
/*
* DG2: Mask of PHYs that were not calibrated by the firmware
* and should not be used.
@@ -565,12 +576,15 @@ struct intel_display {
/* hipri wq for commit cleanups */
struct workqueue_struct *cleanup;
+
+ /* unordered workqueue for all display unordered work */
+ struct workqueue_struct *unordered;
} wq;
/* Grouping using named structs. Keep sorted. */
struct drm_dp_tunnel_mgr *dp_tunnel_mgr;
struct intel_audio audio;
- struct intel_dpll dpll;
+ struct intel_dpll_global dpll;
struct intel_fbc *fbc[I915_MAX_FBCS];
struct intel_frontbuffer_tracking fb_tracking;
struct intel_hotplug hotplug;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 8d0a1779dd19..ce3f9810c42d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -4,6 +4,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/string_choices.h>
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
@@ -13,7 +14,6 @@
#include <drm/drm_fourcc.h>
#include "hsw_ips.h"
-#include "i915_irq.h"
#include "i915_reg.h"
#include "i9xx_wm_regs.h"
#include "intel_alpm.h"
@@ -25,6 +25,7 @@
#include "intel_display_debugfs_params.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
@@ -39,6 +40,7 @@
#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
+#include "intel_link_bw.h"
#include "intel_panel.h"
#include "intel_pps.h"
#include "intel_psr.h"
@@ -618,7 +620,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
struct intel_display *display = node_to_intel_display(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
drm_modeset_lock_all(display->drm);
@@ -627,7 +629,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
display->dpll.ref_clks.nssc,
display->dpll.ref_clks.ssc);
- for_each_shared_dpll(display, pll, i) {
+ for_each_dpll(display, pll, i) {
drm_printf(&p, "DPLL%i: %s, id: %i\n", pll->index,
pll->info->name, pll->info->id);
drm_printf(&p, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
@@ -972,7 +974,7 @@ static ssize_t i915_dsc_fec_support_write(struct file *file,
return ret;
drm_dbg(display->drm, "Got %s for DSC Enable\n",
- (dsc_enable) ? "true" : "false");
+ str_true_false(dsc_enable));
intel_dp->force_dsc_en = dsc_enable;
*offp += len;
@@ -1183,7 +1185,7 @@ static ssize_t i915_dsc_fractional_bpp_write(struct file *file,
return ret;
drm_dbg(display->drm, "Got %s for DSC Fractional BPP Enable\n",
- (dsc_fractional_bpp_enable) ? "true" : "false");
+ str_true_false(dsc_fractional_bpp_enable));
intel_dp->force_dsc_fractional_bpp_en = dsc_fractional_bpp_enable;
*offp += len;
@@ -1325,6 +1327,7 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
intel_psr_connector_debugfs_add(connector);
intel_alpm_lobf_debugfs_add(connector);
intel_dp_link_training_debugfs_add(connector);
+ intel_link_bw_connector_debugfs_add(connector);
if (DISPLAY_VER(display) >= 11 &&
((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !connector->mst.dp) ||
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 90d714598664..089cffabbad5 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -18,6 +18,7 @@
#include "intel_display_params.h"
#include "intel_display_power.h"
#include "intel_display_reg_defs.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_step.h"
@@ -1479,6 +1480,7 @@ static const struct {
{ 14, 1, &xe2_hpd_display },
{ 20, 0, &xe2_lpd_display },
{ 30, 0, &xe2_lpd_display },
+ { 30, 2, &xe2_lpd_display },
};
static const struct intel_display_device_info *
@@ -1621,13 +1623,17 @@ static void display_platforms_or(struct intel_display_platforms *dst,
struct intel_display *intel_display_device_probe(struct pci_dev *pdev)
{
- struct intel_display *display = to_intel_display(pdev);
+ struct intel_display *display;
const struct intel_display_device_info *info;
struct intel_display_ip_ver ip_ver = {};
const struct platform_desc *desc;
const struct subplatform_desc *subdesc;
enum intel_step step;
+ display = kzalloc(sizeof(*display), GFP_KERNEL);
+ if (!display)
+ return ERR_PTR(-ENOMEM);
+
/* Add drm device backpointer as early as possible. */
display->drm = pci_get_drvdata(pdev);
@@ -1708,7 +1714,11 @@ no_display:
void intel_display_device_remove(struct intel_display *display)
{
+ if (!display)
+ return;
+
intel_display_params_free(&display->params);
+ kfree(display);
}
static void __intel_display_device_info_runtime_init(struct intel_display *display)
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 87c666792c0d..4308822f0415 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -157,6 +157,7 @@ struct intel_display_platforms {
#define HAS_DMC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dmc)
#define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell)
+#define HAS_DOUBLE_BUFFERED_LUT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4)
#define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13)
@@ -172,6 +173,7 @@ struct intel_display_platforms {
#define HAS_GMBUS_BURST_READ(__display) (DISPLAY_VER(__display) >= 10 || (__display)->platform.kabylake)
#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
#define HAS_GMCH(__display) (DISPLAY_INFO(__display)->has_gmch)
+#define HAS_FDI(__display) (IS_DISPLAY_VER((__display), 5, 8) && !HAS_GMCH(__display))
#define HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
#define HAS_HW_SAGV_WM(__display) (DISPLAY_VER(__display) >= 13 && !(__display)->platform.dgfx)
#define HAS_IPC(__display) (DISPLAY_INFO(__display)->has_ipc)
@@ -181,6 +183,7 @@ struct intel_display_platforms {
#define HAS_MBUS_JOINING(__display) ((__display)->platform.alderlake_p || DISPLAY_VER(__display) >= 14)
#define HAS_MSO(__display) (DISPLAY_VER(__display) >= 12)
#define HAS_OVERLAY(__display) (DISPLAY_INFO(__display)->has_overlay)
+#define HAS_PIPEDMC(__display) (DISPLAY_VER(__display) >= 12)
#define HAS_PSR(__display) (DISPLAY_INFO(__display)->has_psr)
#define HAS_PSR_HW_TRACKING(__display) (DISPLAY_INFO(__display)->has_psr_hw_tracking)
#define HAS_PSR2_SEL_FETCH(__display) (DISPLAY_VER(__display) >= 12)
@@ -189,9 +192,8 @@ struct intel_display_platforms {
#define HAS_TRANSCODER(__display, trans) ((DISPLAY_RUNTIME_INFO(__display)->cpu_transcoder_mask & \
BIT(trans)) != 0)
#define HAS_UNCOMPRESSED_JOINER(__display) (DISPLAY_VER(__display) >= 13)
-#define HAS_ULTRAJOINER(__display) ((DISPLAY_VER(__display) >= 20 || \
- ((__display)->platform.dgfx && DISPLAY_VER(__display) == 14)) && \
- HAS_DSC(__display))
+#define HAS_ULTRAJOINER(__display) (((__display)->platform.dgfx && \
+ DISPLAY_VER(__display) == 14) && HAS_DSC(__display))
#define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11)
#define INTEL_NUM_PIPES(__display) (hweight8(DISPLAY_RUNTIME_INFO(__display)->pipe_mask))
#define OVERLAY_NEEDS_PHYSICAL(__display) (DISPLAY_INFO(__display)->overlay_needs_physical)
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 411fe7b918a7..8586ba102605 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -27,6 +27,7 @@
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
#include "intel_display_debugfs.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
@@ -43,6 +44,7 @@
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fdi.h"
+#include "intel_flipq.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
@@ -83,16 +85,10 @@ bool intel_display_driver_probe_defer(struct pci_dev *pdev)
void intel_display_driver_init_hw(struct intel_display *display)
{
- struct intel_cdclk_state *cdclk_state;
-
if (!HAS_DISPLAY(display))
return;
- cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state);
-
- intel_update_cdclk(display);
- intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
- cdclk_state->logical = cdclk_state->actual = display->cdclk.hw;
+ intel_cdclk_read_hw(display);
intel_display_wa_apply(display);
}
@@ -241,12 +237,16 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
if (!HAS_DISPLAY(display))
return 0;
- intel_dmc_init(display);
+ display->hotplug.dp_wq = alloc_ordered_workqueue("intel-dp", 0);
+ if (!display->hotplug.dp_wq) {
+ ret = -ENOMEM;
+ goto cleanup_vga_client_pw_domain_dmc;
+ }
display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
if (!display->wq.modeset) {
ret = -ENOMEM;
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_dp;
}
display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
@@ -262,27 +262,35 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
goto cleanup_wq_flip;
}
+ display->wq.unordered = alloc_workqueue("display_unordered", 0, 0);
+ if (!display->wq.unordered) {
+ ret = -ENOMEM;
+ goto cleanup_wq_cleanup;
+ }
+
+ intel_dmc_init(display);
+
intel_mode_config_init(display);
ret = intel_cdclk_init(display);
if (ret)
- goto cleanup_wq_cleanup;
+ goto cleanup_wq_unordered;
ret = intel_color_init(display);
if (ret)
- goto cleanup_wq_cleanup;
+ goto cleanup_wq_unordered;
ret = intel_dbuf_init(display);
if (ret)
- goto cleanup_wq_cleanup;
+ goto cleanup_wq_unordered;
ret = intel_bw_init(display);
if (ret)
- goto cleanup_wq_cleanup;
+ goto cleanup_wq_unordered;
ret = intel_pmdemand_init(display);
if (ret)
- goto cleanup_wq_cleanup;
+ goto cleanup_wq_unordered;
intel_init_quirks(display);
@@ -290,12 +298,16 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
return 0;
+cleanup_wq_unordered:
+ destroy_workqueue(display->wq.unordered);
cleanup_wq_cleanup:
destroy_workqueue(display->wq.cleanup);
cleanup_wq_flip:
destroy_workqueue(display->wq.flip);
cleanup_wq_modeset:
destroy_workqueue(display->wq.modeset);
+cleanup_wq_dp:
+ destroy_workqueue(display->hotplug.dp_wq);
cleanup_vga_client_pw_domain_dmc:
intel_dmc_fini(display);
intel_power_domains_driver_remove(display);
@@ -466,7 +478,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
}
intel_plane_possible_crtcs_init(display);
- intel_shared_dpll_init(display);
+ intel_dpll_init(display);
intel_fdi_pll_freq_update(display);
intel_update_czclk(display);
@@ -526,6 +538,8 @@ int intel_display_driver_probe(struct intel_display *display)
*/
intel_hdcp_component_init(display);
+ intel_flipq_init(display);
+
/*
* Force all active planes to recompute their states. So that on
* mode_setcrtc after probe, all the intel_plane_state variables
@@ -591,6 +605,7 @@ void intel_display_driver_remove(struct intel_display *display)
flush_workqueue(display->wq.flip);
flush_workqueue(display->wq.modeset);
flush_workqueue(display->wq.cleanup);
+ flush_workqueue(display->wq.unordered);
/*
* MST topology needs to be suspended so we don't have any calls to
@@ -603,8 +618,6 @@ void intel_display_driver_remove(struct intel_display *display)
/* part #2: call after irq uninstall */
void intel_display_driver_remove_noirq(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!HAS_DISPLAY(display))
return;
@@ -619,7 +632,7 @@ void intel_display_driver_remove_noirq(struct intel_display *display)
intel_unregister_dsm_handler();
/* flush any delayed tasks or pending work */
- flush_workqueue(i915->unordered_wq);
+ flush_workqueue(display->wq.unordered);
intel_hdcp_component_fini(display);
@@ -631,9 +644,11 @@ void intel_display_driver_remove_noirq(struct intel_display *display)
intel_gmbus_teardown(display);
+ destroy_workqueue(display->hotplug.dp_wq);
destroy_workqueue(display->wq.flip);
destroy_workqueue(display->wq.modeset);
destroy_workqueue(display->wq.cleanup);
+ destroy_workqueue(display->wq.unordered);
intel_fbc_cleanup(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 3e73832e5e81..fb25ec8adae3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -9,14 +9,15 @@
#include "i915_irq.h"
#include "i915_reg.h"
#include "icl_dsi_regs.h"
-#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_rps.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
#include "intel_dmc_wl.h"
#include "intel_dp_aux.h"
#include "intel_dsb.h"
@@ -25,6 +26,7 @@
#include "intel_gmbus.h"
#include "intel_hotplug_irq.h"
#include "intel_pipe_crc_regs.h"
+#include "intel_plane.h"
#include "intel_pmdemand.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
@@ -1016,7 +1018,15 @@ static u32 gen8_de_port_aux_mask(struct intel_display *display)
static u32 gen8_de_pipe_fault_mask(struct intel_display *display)
{
- if (DISPLAY_VER(display) >= 14)
+ if (DISPLAY_VER(display) >= 20)
+ return MTL_PLANE_ATS_FAULT |
+ GEN9_PIPE_CURSOR_FAULT |
+ GEN11_PIPE_PLANE5_FAULT |
+ GEN9_PIPE_PLANE4_FAULT |
+ GEN9_PIPE_PLANE3_FAULT |
+ GEN9_PIPE_PLANE2_FAULT |
+ GEN9_PIPE_PLANE1_FAULT;
+ else if (DISPLAY_VER(display) >= 14)
return MTL_PIPEDMC_ATS_FAULT |
MTL_PLANE_ATS_FAULT |
GEN12_PIPEDMC_FAULT |
@@ -1418,7 +1428,8 @@ void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl)
iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
drm_err_ratelimited(display->drm,
- "The master control interrupt lied (DE PIPE)!\n");
+ "The master control interrupt lied (DE PIPE %c)!\n",
+ pipe_name(pipe));
continue;
}
@@ -1441,6 +1452,9 @@ void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl)
intel_dsb_irq_handler(display, pipe, INTEL_DSB_2);
}
+ if (HAS_PIPEDMC(display) && iir & GEN12_PIPEDMC_INTERRUPT)
+ intel_pipedmc_irq_handler(display, pipe);
+
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(display, pipe);
@@ -2258,6 +2272,10 @@ void gen8_de_irq_postinstall(struct intel_display *display)
GEN12_DSB_INT(INTEL_DSB_1) |
GEN12_DSB_INT(INTEL_DSB_2);
+ /* TODO figure PIPEDMC interrupts for pre-LNL */
+ if (DISPLAY_VER(display) >= 20)
+ de_pipe_masked |= GEN12_PIPEDMC_INTERRUPT;
+
de_pipe_enables = de_pipe_masked |
GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
gen8_de_pipe_flip_done_mask(display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index c4f1ab43fc0c..75316247ee8a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -62,6 +62,9 @@ intel_display_param_named_unsafe(enable_dpt, bool, 0400,
intel_display_param_named_unsafe(enable_dsb, bool, 0400,
"Enable display state buffer (DSB) (default: true)");
+intel_display_param_named_unsafe(enable_flipq, bool, 0400,
+ "Enable DMC flip queue (default: false)");
+
intel_display_param_named_unsafe(enable_sagv, bool, 0400,
"Enable system agent voltage/frequency scaling (SAGV) (default: true)");
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
index 5317138e6044..784e6bae8615 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -31,6 +31,7 @@ struct drm_printer;
param(int, enable_dc, -1, 0400) \
param(bool, enable_dpt, true, 0400) \
param(bool, enable_dsb, true, 0600) \
+ param(bool, enable_flipq, false, 0600) \
param(bool, enable_sagv, true, 0600) \
param(int, disable_power_well, -1, 0400) \
param(bool, enable_ips, true, 0600) \
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 16356523816f..273054c22325 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -5,6 +5,8 @@
#include <linux/string_helpers.h>
+#include "soc/intel_dram.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -16,6 +18,7 @@
#include "intel_display_power.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
@@ -1254,10 +1257,8 @@ static u32 hsw_read_dcomp(struct intel_display *display)
static void hsw_write_dcomp(struct intel_display *display, u32 val)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (display->platform.haswell) {
- if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
+ if (intel_pcode_write(display->drm, GEN6_PCODE_WRITE_D_COMP, val))
drm_dbg_kms(display->drm, "Failed to write to D_COMP\n");
} else {
intel_de_write(display, D_COMP_BDW, val);
@@ -1604,9 +1605,7 @@ static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
static void tgl_bw_buddy_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- enum intel_dram_type type = dev_priv->dram_info.type;
- u8 num_channels = dev_priv->dram_info.num_channels;
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
const struct buddy_page_mask *table;
unsigned long abox_mask = DISPLAY_INFO(display)->abox_mask;
int config, i;
@@ -1623,8 +1622,8 @@ static void tgl_bw_buddy_init(struct intel_display *display)
table = tgl_buddy_page_masks;
for (config = 0; table[config].page_mask != 0; config++)
- if (table[config].num_channels == num_channels &&
- table[config].type == type)
+ if (table[config].num_channels == dram_info->num_channels &&
+ table[config].type == dram_info->type)
break;
if (table[config].page_mask == 0) {
@@ -1883,12 +1882,11 @@ static void vlv_cmnlane_wa(struct intel_display *display)
static bool vlv_punit_is_power_gated(struct intel_display *display, u32 reg0)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
bool ret;
- vlv_punit_get(dev_priv);
- ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
- vlv_punit_put(dev_priv);
+ vlv_punit_get(display->drm);
+ ret = (vlv_punit_read(display->drm, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
+ vlv_punit_put(display->drm);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index ab1163744bc5..77268802b55e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -5,12 +5,12 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_display_core.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
-#include "vlv_sideband_reg.h"
+#include "vlv_iosf_sb_reg.h"
#define __LIST_INLINE_ELEMS(__elem_type, ...) \
((__elem_type[]) { __VA_ARGS__ })
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index b104bce0e14d..48cac225a809 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -13,6 +13,7 @@
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
@@ -30,8 +31,20 @@
#include "intel_vga.h"
#include "skl_watermark.h"
#include "vlv_dpio_phy_regs.h"
+#include "vlv_iosf_sb_reg.h"
#include "vlv_sideband.h"
-#include "vlv_sideband_reg.h"
+
+/*
+ * PG0 is HW controlled, so doesn't have a corresponding power well control knob
+ *
+ * {ICL,SKL}_DISP_PW1_IDX..{ICL,SKL}_DISP_PW4_IDX -> PG1..PG4
+ */
+static enum skl_power_gate pw_idx_to_pg(struct intel_display *display, int pw_idx)
+{
+ int pw1_idx = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_PW_1 : SKL_PW_CTL_IDX_PW_1;
+
+ return pw_idx - pw1_idx + SKL_PG1;
+}
struct i915_power_well_regs {
i915_reg_t bios;
@@ -307,8 +320,8 @@ static void hsw_wait_for_power_well_disable(struct intel_display *display,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- bool disabled;
u32 reqs;
+ int ret;
/*
* Bspec doesn't require waiting for PWs to get disabled, but still do
@@ -319,12 +332,18 @@ static void hsw_wait_for_power_well_disable(struct intel_display *display,
* Skip the wait in case any of the request bits are set and print a
* diagnostic message.
*/
- wait_for((disabled = !(intel_de_read(display, regs->driver) &
- HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
- (reqs = hsw_power_well_requesters(display, regs, pw_idx)), 1);
- if (disabled)
+ reqs = hsw_power_well_requesters(display, regs, pw_idx);
+
+ ret = intel_de_wait_for_clear(display, regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx),
+ reqs ? 0 : 1);
+ if (!ret)
return;
+ /* Refresh requesters in case they popped up during the wait. */
+ if (!reqs)
+ reqs = hsw_power_well_requesters(display, regs, pw_idx);
+
drm_dbg_kms(display->drm,
"%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
intel_power_well_name(power_well),
@@ -349,8 +368,7 @@ static void hsw_power_well_enable(struct intel_display *display,
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
- pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
- SKL_PW_CTL_IDX_TO_PG(pw_idx);
+ pg = pw_idx_to_pg(display, pw_idx);
/* Wa_16013190616:adlp */
if (display->platform.alderlake_p && pg == SKL_PG1)
@@ -374,8 +392,8 @@ static void hsw_power_well_enable(struct intel_display *display,
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
- pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
- SKL_PW_CTL_IDX_TO_PG(pw_idx);
+ pg = pw_idx_to_pg(display, pw_idx);
+
gen9_wait_for_power_well_fuses(display, pg);
}
@@ -485,8 +503,7 @@ static void icl_tc_cold_exit(struct intel_display *display)
int ret, tries = 0;
while (1) {
- ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0,
- 250, 1);
+ ret = intel_pcode_write(display->drm, ICL_PCODE_EXIT_TCCOLD, 0);
if (ret != -EAGAIN || ++tries == 3)
break;
msleep(1);
@@ -809,7 +826,6 @@ static void tgl_disable_dc3co(struct intel_display *display)
static void assert_can_enable_dc5(struct intel_display *display)
{
- struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
enum i915_power_well_id high_pg;
/* Power wells at this level and above must be disabled for DC5 entry */
@@ -829,7 +845,7 @@ static void assert_can_enable_dc5(struct intel_display *display)
assert_display_rpm_held(display);
- assert_dmc_loaded(display);
+ assert_main_dmc_loaded(display);
}
void gen9_enable_dc5(struct intel_display *display)
@@ -860,7 +876,7 @@ static void assert_can_enable_dc6(struct intel_display *display)
DC_STATE_EN_UPTO_DC6),
"DC6 already programmed to be enabled.\n");
- assert_dmc_loaded(display);
+ assert_main_dmc_loaded(display);
}
void skl_enable_dc6(struct intel_display *display)
@@ -1102,7 +1118,6 @@ static void i830_pipes_power_well_sync_hw(struct intel_display *display,
static void vlv_set_power_well(struct intel_display *display,
struct i915_power_well *power_well, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
u32 mask;
u32 state;
@@ -1112,29 +1127,29 @@ static void vlv_set_power_well(struct intel_display *display,
state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
PUNIT_PWRGT_PWR_GATE(pw_idx);
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+ ((vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS) & mask) == state)
if (COND)
goto out;
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL);
ctrl &= ~mask;
ctrl |= state;
- vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+ vlv_punit_write(display->drm, PUNIT_REG_PWRGT_CTRL, ctrl);
if (wait_for(COND, 100))
drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
- vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+ vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL));
#undef COND
out:
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
static void vlv_power_well_enable(struct intel_display *display,
@@ -1152,7 +1167,6 @@ static void vlv_power_well_disable(struct intel_display *display,
static bool vlv_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
bool enabled = false;
u32 mask;
@@ -1162,9 +1176,9 @@ static bool vlv_power_well_enabled(struct intel_display *display,
mask = PUNIT_PWRGT_MASK(pw_idx);
ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+ state = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS) & mask;
/*
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
@@ -1178,10 +1192,10 @@ static bool vlv_power_well_enabled(struct intel_display *display,
* A transient state at this point would mean some unexpected party
* is poking at the power controls too.
*/
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL) & mask;
drm_WARN_ON(display->drm, ctrl != state);
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
return enabled;
}
@@ -1437,7 +1451,6 @@ static void assert_chv_phy_status(struct intel_display *display)
static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
enum dpio_phy phy;
u32 tmp;
@@ -1461,30 +1474,30 @@ static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
drm_err(display->drm, "Display PHY %d is not power up\n",
phy);
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Enable dynamic power down */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW28);
+ tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW28);
tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW28, tmp);
if (id == VLV_DISP_PW_DPIO_CMN_BC) {
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW6_CH1);
+ tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW6_CH1, tmp);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW6_CH1, tmp);
} else {
/*
* Force the non-existing CL2 off. BXT does this
* too, so maybe it saves some power even though
* CL2 doesn't exist?
*/
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW30);
+ tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW30);
tmp |= DPIO_CL2_LDOFUSE_PWRENB;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW30, tmp);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW30, tmp);
}
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
intel_de_write(display, DISPLAY_PHY_CONTROL,
@@ -1535,7 +1548,6 @@ static void chv_dpio_cmn_power_well_disable(struct intel_display *display,
static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_phy phy,
enum dpio_channel ch, bool override, unsigned int mask)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, val, expected, actual;
/*
@@ -1553,9 +1565,9 @@ static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_ph
else
reg = CHV_CMN_DW6_CH1;
- vlv_dpio_get(dev_priv);
- val = vlv_dpio_read(dev_priv, phy, reg);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_get(display->drm);
+ val = vlv_dpio_read(display->drm, phy, reg);
+ vlv_dpio_put(display->drm);
/*
* This assumes !override is only used when the port is disabled.
@@ -1665,14 +1677,13 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
static bool chv_pipe_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = PIPE_A;
bool enabled;
u32 state, ctrl;
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
- state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
+ state = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
/*
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
@@ -1685,10 +1696,10 @@ static bool chv_pipe_power_well_enabled(struct intel_display *display,
* A transient state at this point would mean some unexpected party
* is poking at the power controls too.
*/
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
drm_WARN_ON(display->drm, ctrl << 16 != state);
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
return enabled;
}
@@ -1697,36 +1708,35 @@ static void chv_set_pipe_power_well(struct intel_display *display,
struct i915_power_well *power_well,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = PIPE_A;
u32 state;
u32 ctrl;
state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
- vlv_punit_get(dev_priv);
+ vlv_punit_get(display->drm);
#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
+ ((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
if (COND)
goto out;
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
ctrl &= ~DP_SSC_MASK(pipe);
ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
+ vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, ctrl);
if (wait_for(COND, 100))
drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
+ vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM));
#undef COND
out:
- vlv_punit_put(dev_priv);
+ vlv_punit_put(display->drm);
}
static void chv_pipe_power_well_sync_hw(struct intel_display *display,
@@ -1772,7 +1782,7 @@ tgl_tc_cold_request(struct intel_display *display, bool block)
* Spec states that we should timeout the request after 200us
* but the function below will timeout after 500us
*/
- ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val);
+ ret = intel_pcode_read(display->drm, TGL_PCODE_TCCOLD, &low_val, &high_val);
if (ret == 0) {
if (block &&
(low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
diff --git a/drivers/gpu/drm/i915/display/intel_display_regs.h b/drivers/gpu/drm/i915/display/intel_display_regs.h
new file mode 100644
index 000000000000..7bd09d981cd2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_regs.h
@@ -0,0 +1,2932 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_REGS_H__
+#define __INTEL_DISPLAY_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
+#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
+#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL)
+
+#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
+#define DPIO_MODSEL1 (1 << 3) /* if ref clk b == 27 */
+#define DPIO_MODSEL0 (1 << 2) /* if ref clk a == 27 */
+#define DPIO_SFR_BYPASS (1 << 1)
+#define DPIO_CMNRST (1 << 0)
+
+#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
+#define MIPIO_RST_CTRL (1 << 2)
+
+#define _BXT_PHY_CTL_DDI_A 0x64C00
+#define _BXT_PHY_CTL_DDI_B 0x64C10
+#define _BXT_PHY_CTL_DDI_C 0x64C20
+#define BXT_PHY_CMNLANE_POWERDOWN_ACK (1 << 10)
+#define BXT_PHY_LANE_POWERDOWN_ACK (1 << 9)
+#define BXT_PHY_LANE_ENABLED (1 << 8)
+#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \
+ _BXT_PHY_CTL_DDI_B)
+
+#define _PHY_CTL_FAMILY_DDI 0x64C90
+#define _PHY_CTL_FAMILY_EDP 0x64C80
+#define _PHY_CTL_FAMILY_DDI_C 0x64CA0
+#define COMMON_RESET_DIS (1 << 31)
+#define BXT_PHY_CTL_FAMILY(phy) \
+ _MMIO(_PICK_EVEN_2RANGES(phy, 1, \
+ _PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \
+ _PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C))
+
+/* UAIMI scratch pad register 1 */
+#define UAIMI_SPR1 _MMIO(0x4F074)
+/* SKL VccIO mask */
+#define SKL_VCCIO_MASK 0x1
+/* SKL balance leg register */
+#define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C)
+/* I_boost values */
+#define BALANCE_LEG_SHIFT(port) (8 + 3 * (port))
+#define BALANCE_LEG_MASK(port) (7 << (8 + 3 * (port)))
+/* Balance leg disable bits */
+#define BALANCE_LEG_DISABLE_SHIFT 23
+#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
+
+#define ILK_GTT_FAULT _MMIO(0x44040) /* ilk/snb */
+#define GTT_FAULT_INVALID_GTT_PTE (1 << 7)
+#define GTT_FAULT_INVALID_PTE_DATA (1 << 6)
+#define GTT_FAULT_CURSOR_B_FAULT (1 << 5)
+#define GTT_FAULT_CURSOR_A_FAULT (1 << 4)
+#define GTT_FAULT_SPRITE_B_FAULT (1 << 3)
+#define GTT_FAULT_SPRITE_A_FAULT (1 << 2)
+#define GTT_FAULT_PRIMARY_B_FAULT (1 << 1)
+#define GTT_FAULT_PRIMARY_A_FAULT (1 << 0)
+
+#define DERRMR _MMIO(0x44050)
+/* Note that HBLANK events are reserved on bdw+ */
+#define DERRMR_PIPEA_SCANLINE (1 << 0)
+#define DERRMR_PIPEA_PRI_FLIP_DONE (1 << 1)
+#define DERRMR_PIPEA_SPR_FLIP_DONE (1 << 2)
+#define DERRMR_PIPEA_VBLANK (1 << 3)
+#define DERRMR_PIPEA_HBLANK (1 << 5)
+#define DERRMR_PIPEB_SCANLINE (1 << 8)
+#define DERRMR_PIPEB_PRI_FLIP_DONE (1 << 9)
+#define DERRMR_PIPEB_SPR_FLIP_DONE (1 << 10)
+#define DERRMR_PIPEB_VBLANK (1 << 11)
+#define DERRMR_PIPEB_HBLANK (1 << 13)
+/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
+#define DERRMR_PIPEC_SCANLINE (1 << 14)
+#define DERRMR_PIPEC_PRI_FLIP_DONE (1 << 15)
+#define DERRMR_PIPEC_SPR_FLIP_DONE (1 << 20)
+#define DERRMR_PIPEC_VBLANK (1 << 21)
+#define DERRMR_PIPEC_HBLANK (1 << 22)
+
+#define VLV_IRQ_REGS I915_IRQ_REGS(VLV_IMR, \
+ VLV_IER, \
+ VLV_IIR)
+
+#define VLV_EIR _MMIO(VLV_DISPLAY_BASE + 0x20b0)
+#define VLV_EMR _MMIO(VLV_DISPLAY_BASE + 0x20b4)
+#define VLV_ESR _MMIO(VLV_DISPLAY_BASE + 0x20b8)
+#define VLV_ERROR_GUNIT_TLB_DATA (1 << 6)
+#define VLV_ERROR_GUNIT_TLB_PTE (1 << 5)
+#define VLV_ERROR_PAGE_TABLE (1 << 4)
+#define VLV_ERROR_CLAIM (1 << 0)
+
+#define VLV_ERROR_REGS I915_ERROR_REGS(VLV_EMR, VLV_EIR)
+
+#define _MBUS_ABOX0_CTL 0x45038
+#define _MBUS_ABOX1_CTL 0x45048
+#define _MBUS_ABOX2_CTL 0x4504C
+#define MBUS_ABOX_CTL(x) \
+ _MMIO(_PICK_EVEN_2RANGES(x, 2, \
+ _MBUS_ABOX0_CTL, _MBUS_ABOX1_CTL, \
+ _MBUS_ABOX2_CTL, _MBUS_ABOX2_CTL))
+
+#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
+#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
+#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
+#define MBUS_ABOX_B_CREDIT(x) ((x) << 16)
+#define MBUS_ABOX_BT_CREDIT_POOL2_MASK (0x1F << 8)
+#define MBUS_ABOX_BT_CREDIT_POOL2(x) ((x) << 8)
+#define MBUS_ABOX_BT_CREDIT_POOL1_MASK (0x1F << 0)
+#define MBUS_ABOX_BT_CREDIT_POOL1(x) ((x) << 0)
+
+#define IPS_CTL _MMIO(0x43408)
+#define IPS_ENABLE REG_BIT(31)
+#define IPS_FALSE_COLOR REG_BIT(4)
+
+/*
+ * Clock control & power management
+ */
+#define _DPLL_A 0x6014
+#define _DPLL_B 0x6018
+#define _CHV_DPLL_C 0x6030
+#define DPLL(dev_priv, pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
+ (pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
+
+#define VGA0 _MMIO(0x6000)
+#define VGA1 _MMIO(0x6004)
+#define VGA_PD _MMIO(0x6010)
+#define VGA0_PD_P2_DIV_4 (1 << 7)
+#define VGA0_PD_P1_DIV_2 (1 << 5)
+#define VGA0_PD_P1_SHIFT 0
+#define VGA0_PD_P1_MASK (0x1f << 0)
+#define VGA1_PD_P2_DIV_4 (1 << 15)
+#define VGA1_PD_P1_DIV_2 (1 << 13)
+#define VGA1_PD_P1_SHIFT 8
+#define VGA1_PD_P1_MASK (0x1f << 8)
+#define DPLL_VCO_ENABLE (1 << 31)
+#define DPLL_SDVO_HIGH_SPEED (1 << 30)
+#define DPLL_DVO_2X_MODE (1 << 30)
+#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
+#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_REF_CLK_ENABLE_VLV (1 << 29)
+#define DPLL_VGA_MODE_DIS (1 << 28)
+#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
+#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
+#define DPLL_MODE_MASK (3 << 26)
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
+#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
+#define DPLL_LOCK_VLV (1 << 15)
+#define DPLL_INTEGRATED_CRI_CLK_VLV (1 << 14)
+#define DPLL_INTEGRATED_REF_CLK_VLV (1 << 13)
+#define DPLL_SSC_REF_CLK_CHV (1 << 13)
+#define DPLL_PORTC_READY_MASK (0xf << 4)
+#define DPLL_PORTB_READY_MASK (0xf)
+
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+
+/* Additional CHV pll/phy registers */
+#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
+#define DPLL_PORTD_READY_MASK (0xf)
+#define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
+#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2 * (phy) + (ch) + 27))
+#define PHY_LDO_DELAY_0NS 0x0
+#define PHY_LDO_DELAY_200NS 0x1
+#define PHY_LDO_DELAY_600NS 0x2
+#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2 * (phy) + 23))
+#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8 * (phy) + 4 * (ch) + 11))
+#define PHY_CH_SU_PSR 0x1
+#define PHY_CH_DEEP_PSR 0x7
+#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6 * (phy) + 3 * (ch) + 2))
+#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
+#define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
+#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1 << 31) : (1 << 30))
+#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6 - (6 * (phy) + 3 * (ch))))
+#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8 - (6 * (phy) + 3 * (ch) + (spline))))
+
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
+/* i830, required in DVO non-gang */
+#define PLL_P2_DIVIDE_BY_4 (1 << 23)
+#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
+#define PLL_REF_INPUT_DREFCLK (0 << 13)
+#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
+#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
+#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+#define PLL_REF_INPUT_MASK (3 << 13)
+#define PLL_LOAD_PULSE_PHASE_SHIFT 9
+/* Ironlake */
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x) - 1) << 9)
+# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
+# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
+
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ */
+#define SDVO_MULTIPLIER_MASK 0x000000ff
+#define SDVO_MULTIPLIER_SHIFT_HIRES 4
+#define SDVO_MULTIPLIER_SHIFT_VGA 0
+
+#define _DPLL_A_MD 0x601c
+#define _DPLL_B_MD 0x6020
+#define _CHV_DPLL_C_MD 0x603c
+#define DPLL_MD(dev_priv, pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
+ (pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
+
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
+ */
+#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
+#define DPLL_MD_UDI_DIVIDER_SHIFT 24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
+#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+
+#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
+
+#define _FPA0 0x6040
+#define _FPA1 0x6044
+#define _FPB0 0x6048
+#define _FPB1 0x604c
+#define FP0(pipe) _MMIO_PIPE(pipe, _FPA0, _FPB0)
+#define FP1(pipe) _MMIO_PIPE(pipe, _FPA1, _FPB1)
+#define FP_N_DIV_MASK 0x003f0000
+#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
+#define FP_N_DIV_SHIFT 16
+#define FP_M1_DIV_MASK 0x00003f00
+#define FP_M1_DIV_SHIFT 8
+#define FP_M2_DIV_MASK 0x0000003f
+#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
+#define FP_M2_DIV_SHIFT 0
+
+#define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500)
+#define FW_CSPWRDWNEN (1 << 15)
+
+#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
+
+#define CZCLK_CDCLK_FREQ_RATIO _MMIO(VLV_DISPLAY_BASE + 0x6508)
+#define CDCLK_FREQ_SHIFT 4
+#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
+#define CZCLK_FREQ_MASK 0xf
+
+#define GCI_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x650C)
+#define PFI_CREDIT_63 (9 << 28) /* chv only */
+#define PFI_CREDIT_31 (8 << 28) /* chv only */
+#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */
+#define PFI_CREDIT_RESEND (1 << 27)
+#define VGA_FAST_MODE_DISABLE (1 << 14)
+
+#define GMBUSFREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6510)
+
+#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
+
+/*
+ * Overlay regs
+ */
+#define OVADD _MMIO(0x30000)
+#define DOVSTA _MMIO(0x30008)
+#define OC_BUF (0x3 << 20)
+#define OGAMC5 _MMIO(0x30010)
+#define OGAMC4 _MMIO(0x30014)
+#define OGAMC3 _MMIO(0x30018)
+#define OGAMC2 _MMIO(0x3001c)
+#define OGAMC1 _MMIO(0x30020)
+#define OGAMC0 _MMIO(0x30024)
+
+#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
+#define BXT_GMBUS_GATING_DIS (1 << 14)
+#define DG2_DPFC_GATING_DIS REG_BIT(31)
+
+#define GEN9_CLKGATE_DIS_5 _MMIO(0x46540)
+#define DPCE_GATING_DIS REG_BIT(17)
+
+#define _CLKGATE_DIS_PSL_A 0x46520
+#define _CLKGATE_DIS_PSL_B 0x46524
+#define _CLKGATE_DIS_PSL_C 0x46528
+#define DUPS1_GATING_DIS (1 << 15)
+#define DUPS2_GATING_DIS (1 << 19)
+#define DUPS3_GATING_DIS (1 << 23)
+#define CURSOR_GATING_DIS REG_BIT(28)
+#define DPF_GATING_DIS (1 << 10)
+#define DPF_RAM_GATING_DIS (1 << 9)
+#define DPFR_GATING_DIS (1 << 8)
+
+#define CLKGATE_DIS_PSL(pipe) \
+ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B)
+
+#define _CLKGATE_DIS_PSL_EXT_A 0x4654C
+#define _CLKGATE_DIS_PSL_EXT_B 0x46550
+#define PIPEDMC_GATING_DIS REG_BIT(12)
+
+#define CLKGATE_DIS_PSL_EXT(pipe) \
+ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B)
+
+/*
+ * Display engine regs
+ */
+/* Pipe/transcoder A timing regs */
+#define _TRANS_HTOTAL_A 0x60000
+#define _TRANS_HTOTAL_B 0x61000
+#define TRANS_HTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A)
+#define HTOTAL_MASK REG_GENMASK(31, 16)
+#define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal))
+#define HACTIVE_MASK REG_GENMASK(15, 0)
+#define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay))
+
+#define _TRANS_HBLANK_A 0x60004
+#define _TRANS_HBLANK_B 0x61004
+#define TRANS_HBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A)
+#define HBLANK_END_MASK REG_GENMASK(31, 16)
+#define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end))
+#define HBLANK_START_MASK REG_GENMASK(15, 0)
+#define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start))
+
+#define _TRANS_HSYNC_A 0x60008
+#define _TRANS_HSYNC_B 0x61008
+#define TRANS_HSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A)
+#define HSYNC_END_MASK REG_GENMASK(31, 16)
+#define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end))
+#define HSYNC_START_MASK REG_GENMASK(15, 0)
+#define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start))
+
+#define _TRANS_VTOTAL_A 0x6000c
+#define _TRANS_VTOTAL_B 0x6100c
+#define TRANS_VTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A)
+#define VTOTAL_MASK REG_GENMASK(31, 16)
+#define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal))
+#define VACTIVE_MASK REG_GENMASK(15, 0)
+#define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay))
+
+#define _TRANS_VBLANK_A 0x60010
+#define _TRANS_VBLANK_B 0x61010
+#define TRANS_VBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A)
+#define VBLANK_END_MASK REG_GENMASK(31, 16)
+#define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end))
+#define VBLANK_START_MASK REG_GENMASK(15, 0)
+#define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start))
+
+#define _TRANS_VSYNC_A 0x60014
+#define _TRANS_VSYNC_B 0x61014
+#define TRANS_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A)
+#define VSYNC_END_MASK REG_GENMASK(31, 16)
+#define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end))
+#define VSYNC_START_MASK REG_GENMASK(15, 0)
+#define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start))
+
+#define _PIPEASRC 0x6001c
+#define _PIPEBSRC 0x6101c
+#define PIPESRC(dev_priv, pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC)
+#define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16)
+#define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w))
+#define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0)
+#define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h))
+
+#define _BCLRPAT_A 0x60020
+#define _BCLRPAT_B 0x61020
+#define BCLRPAT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _BCLRPAT_A)
+
+#define _TRANS_VSYNCSHIFT_A 0x60028
+#define _TRANS_VSYNCSHIFT_B 0x61028
+#define TRANS_VSYNCSHIFT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNCSHIFT_A)
+
+#define _TRANS_MULT_A 0x6002c
+#define _TRANS_MULT_B 0x6102c
+#define TRANS_MULT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A)
+
+/* Hotplug control (945+ only) */
+#define PORT_HOTPLUG_EN(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
+#define PORTB_HOTPLUG_INT_EN (1 << 29)
+#define PORTC_HOTPLUG_INT_EN (1 << 28)
+#define PORTD_HOTPLUG_INT_EN (1 << 27)
+#define SDVOB_HOTPLUG_INT_EN (1 << 26)
+#define SDVOC_HOTPLUG_INT_EN (1 << 25)
+#define TV_HOTPLUG_INT_EN (1 << 18)
+#define CRT_HOTPLUG_INT_EN (1 << 9)
+#define HOTPLUG_INT_EN_MASK (PORTB_HOTPLUG_INT_EN | \
+ PORTC_HOTPLUG_INT_EN | \
+ PORTD_HOTPLUG_INT_EN | \
+ SDVOC_HOTPLUG_INT_EN | \
+ SDVOB_HOTPLUG_INT_EN | \
+ CRT_HOTPLUG_INT_EN)
+#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
+/* must use period 64 on GM45 according to docs */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
+
+#define PORT_HOTPLUG_STAT(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
+/* HDMI/DP bits are g4x+ */
+#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
+#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
+#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
+#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
+#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
+#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
+#define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19)
+#define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19)
+#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
+#define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17)
+#define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17)
+/* CRT/TV common between gen3+ */
+#define CRT_HOTPLUG_INT_STATUS (1 << 11)
+#define TV_HOTPLUG_INT_STATUS (1 << 10)
+#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
+#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
+#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
+#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+#define DP_AUX_CHANNEL_D_INT_STATUS_G4X (1 << 6)
+#define DP_AUX_CHANNEL_C_INT_STATUS_G4X (1 << 5)
+#define DP_AUX_CHANNEL_B_INT_STATUS_G4X (1 << 4)
+#define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (7 << 4)
+
+/* SDVO is different across gen3/4 */
+#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
+#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
+/*
+ * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
+ * since reality corrobates that they're the same as on gen3. But keep these
+ * bits here (and the comment!) to help any other lost wanderers back onto the
+ * right tracks.
+ */
+#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
+#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
+#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
+#define HOTPLUG_INT_STATUS_G4X (CRT_HOTPLUG_INT_STATUS | \
+ SDVOB_HOTPLUG_INT_STATUS_G4X | \
+ SDVOC_HOTPLUG_INT_STATUS_G4X | \
+ PORTB_HOTPLUG_INT_STATUS | \
+ PORTC_HOTPLUG_INT_STATUS | \
+ PORTD_HOTPLUG_INT_STATUS)
+
+#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
+ SDVOB_HOTPLUG_INT_STATUS_I915 | \
+ SDVOC_HOTPLUG_INT_STATUS_I915 | \
+ PORTB_HOTPLUG_INT_STATUS | \
+ PORTC_HOTPLUG_INT_STATUS | \
+ PORTD_HOTPLUG_INT_STATUS)
+
+/* SDVO and HDMI port control.
+ * The same register may be used for SDVO or HDMI */
+#define _GEN3_SDVOB 0x61140
+#define _GEN3_SDVOC 0x61160
+#define GEN3_SDVOB _MMIO(_GEN3_SDVOB)
+#define GEN3_SDVOC _MMIO(_GEN3_SDVOC)
+#define GEN4_HDMIB GEN3_SDVOB
+#define GEN4_HDMIC GEN3_SDVOC
+#define VLV_HDMIB _MMIO(VLV_DISPLAY_BASE + 0x61140)
+#define VLV_HDMIC _MMIO(VLV_DISPLAY_BASE + 0x61160)
+#define CHV_HDMID _MMIO(VLV_DISPLAY_BASE + 0x6116C)
+#define PCH_SDVOB _MMIO(0xe1140)
+#define PCH_HDMIB PCH_SDVOB
+#define PCH_HDMIC _MMIO(0xe1150)
+#define PCH_HDMID _MMIO(0xe1160)
+
+#define PORT_DFT_I9XX _MMIO(0x61150)
+#define DC_BALANCE_RESET (1 << 25)
+#define PORT_DFT2_G4X(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
+#define DC_BALANCE_RESET_VLV (1 << 31)
+#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
+#define PIPE_C_SCRAMBLE_RESET REG_BIT(14) /* chv */
+#define PIPE_B_SCRAMBLE_RESET REG_BIT(1)
+#define PIPE_A_SCRAMBLE_RESET REG_BIT(0)
+
+/* Gen 3 SDVO bits: */
+#define SDVO_ENABLE (1 << 31)
+#define SDVO_PIPE_SEL_SHIFT 30
+#define SDVO_PIPE_SEL_MASK (1 << 30)
+#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
+#define SDVO_STALL_SELECT (1 << 29)
+#define SDVO_INTERRUPT_ENABLE (1 << 26)
+/*
+ * 915G/GM SDVO pixel multiplier.
+ * Programmed value is multiplier - 1, up to 5x.
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT 23
+#define SDVO_PHASE_SELECT_MASK (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
+#define SDVOC_GANG_MODE (1 << 16) /* Port C only */
+#define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */
+#define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */
+#define SDVO_DETECTED (1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
+ SDVO_INTERRUPT_ENABLE)
+#define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
+
+/* Gen 4 SDVO/HDMI bits: */
+#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
+#define SDVO_COLOR_FORMAT_MASK (7 << 26)
+#define SDVO_ENCODING_SDVO (0 << 10)
+#define SDVO_ENCODING_HDMI (2 << 10)
+#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
+#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */
+#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */
+#define HDMI_AUDIO_ENABLE (1 << 6) /* HDMI only */
+/* VSYNC/HSYNC bits new with 965, default is to be set */
+#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
+#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
+
+/* Gen 5 (IBX) SDVO/HDMI bits: */
+#define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */
+#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
+
+/* Gen 6 (CPT) SDVO/HDMI bits: */
+#define SDVO_PIPE_SEL_SHIFT_CPT 29
+#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
+#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
+
+/* CHV SDVO/HDMI bits: */
+#define SDVO_PIPE_SEL_SHIFT_CHV 24
+#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
+#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
+
+/* Video Data Island Packet control */
+#define VIDEO_DIP_DATA _MMIO(0x61178)
+/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
+ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+ * of the infoframe structure specified by CEA-861. */
+#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_ASYNC_DATA_SIZE 36
+#define VIDEO_DIP_GMP_DATA_SIZE 36
+#define VIDEO_DIP_VSC_DATA_SIZE 36
+#define VIDEO_DIP_PPS_DATA_SIZE 132
+#define VIDEO_DIP_CTL _MMIO(0x61170)
+/* Pre HSW: */
+#define VIDEO_DIP_ENABLE (1 << 31)
+#define VIDEO_DIP_PORT(port) ((port) << 29)
+#define VIDEO_DIP_PORT_MASK (3 << 29)
+#define VIDEO_DIP_ENABLE_GCP (1 << 25) /* ilk+ */
+#define VIDEO_DIP_ENABLE_AVI (1 << 21)
+#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
+#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) /* ilk+ */
+#define VIDEO_DIP_ENABLE_SPD (8 << 21)
+#define VIDEO_DIP_SELECT_AVI (0 << 19)
+#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
+#define VIDEO_DIP_SELECT_GAMUT (2 << 19)
+#define VIDEO_DIP_SELECT_SPD (3 << 19)
+#define VIDEO_DIP_SELECT_MASK (3 << 19)
+#define VIDEO_DIP_FREQ_ONCE (0 << 16)
+#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
+#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
+#define VIDEO_DIP_FREQ_MASK (3 << 16)
+/* HSW and later: */
+#define VIDEO_DIP_ENABLE_DRM_GLK (1 << 28)
+#define PSR_VSC_BIT_7_SET (1 << 27)
+#define VSC_SELECT_MASK (0x3 << 25)
+#define VSC_SELECT_SHIFT 25
+#define VSC_DIP_HW_HEA_DATA (0 << 25)
+#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
+#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
+#define VSC_DIP_SW_HEA_DATA (3 << 25)
+#define VDIP_ENABLE_PPS (1 << 24)
+#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
+#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
+#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
+#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
+#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
+#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
+/* ADL and later: */
+#define VIDEO_DIP_ENABLE_AS_ADL REG_BIT(23)
+
+#define PCH_GTC_CTL _MMIO(0xe7000)
+#define PCH_GTC_ENABLE (1 << 31)
+
+/* Display Port */
+#define DP_A _MMIO(0x64000) /* eDP */
+#define DP_B _MMIO(0x64100)
+#define DP_C _MMIO(0x64200)
+#define DP_D _MMIO(0x64300)
+#define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100)
+#define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200)
+#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
+#define DP_PORT_EN REG_BIT(31)
+#define DP_PIPE_SEL_MASK REG_GENMASK(30, 30)
+#define DP_PIPE_SEL(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK, (pipe))
+#define DP_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29)
+#define DP_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_IVB, (pipe))
+#define DP_PIPE_SEL_SHIFT_CHV 16
+#define DP_PIPE_SEL_MASK_CHV REG_GENMASK(17, 16)
+#define DP_PIPE_SEL_CHV(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_CHV, (pipe))
+#define DP_LINK_TRAIN_MASK REG_GENMASK(29, 28)
+#define DP_LINK_TRAIN_PAT_1 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 0)
+#define DP_LINK_TRAIN_PAT_2 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 1)
+#define DP_LINK_TRAIN_PAT_IDLE REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 2)
+#define DP_LINK_TRAIN_OFF REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 3)
+#define DP_LINK_TRAIN_MASK_CPT REG_GENMASK(10, 8)
+#define DP_LINK_TRAIN_PAT_1_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 0)
+#define DP_LINK_TRAIN_PAT_2_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 1)
+#define DP_LINK_TRAIN_PAT_IDLE_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 2)
+#define DP_LINK_TRAIN_OFF_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 3)
+#define DP_VOLTAGE_MASK REG_GENMASK(27, 25)
+#define DP_VOLTAGE_0_4 REG_FIELD_PREP(DP_VOLTAGE_MASK, 0)
+#define DP_VOLTAGE_0_6 REG_FIELD_PREP(DP_VOLTAGE_MASK, 1)
+#define DP_VOLTAGE_0_8 REG_FIELD_PREP(DP_VOLTAGE_MASK, 2)
+#define DP_VOLTAGE_1_2 REG_FIELD_PREP(DP_VOLTAGE_MASK, 3)
+#define DP_PRE_EMPHASIS_MASK REG_GENMASK(24, 22)
+#define DP_PRE_EMPHASIS_0 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 0)
+#define DP_PRE_EMPHASIS_3_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 1)
+#define DP_PRE_EMPHASIS_6 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 2)
+#define DP_PRE_EMPHASIS_9_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 3)
+#define DP_PORT_WIDTH_MASK REG_GENMASK(21, 19)
+#define DP_PORT_WIDTH(width) REG_FIELD_PREP(DP_PORT_WIDTH_MASK, (width) - 1)
+#define DP_ENHANCED_FRAMING REG_BIT(18)
+#define EDP_PLL_FREQ_MASK REG_GENMASK(17, 16)
+#define EDP_PLL_FREQ_270MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 0)
+#define EDP_PLL_FREQ_162MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 1)
+#define DP_PORT_REVERSAL REG_BIT(15)
+#define EDP_PLL_ENABLE REG_BIT(14)
+#define DP_CLOCK_OUTPUT_ENABLE REG_BIT(13)
+#define DP_SCRAMBLING_DISABLE REG_BIT(12)
+#define DP_SCRAMBLING_DISABLE_ILK REG_BIT(7)
+#define DP_COLOR_RANGE_16_235 REG_BIT(8)
+#define DP_AUDIO_OUTPUT_ENABLE REG_BIT(6)
+#define DP_SYNC_VS_HIGH REG_BIT(4)
+#define DP_SYNC_HS_HIGH REG_BIT(3)
+#define DP_DETECTED REG_BIT(2)
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+#define _PIPEA_DATA_M_G4X 0x70050
+#define _PIPEB_DATA_M_G4X 0x71050
+#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define TU_SIZE_MASK REG_GENMASK(30, 25)
+#define TU_SIZE(x) REG_FIELD_PREP(TU_SIZE_MASK, (x) - 1) /* default size 64 */
+#define DATA_LINK_M_N_MASK REG_GENMASK(23, 0)
+#define DATA_LINK_N_MAX (0x800000)
+
+#define _PIPEA_DATA_N_G4X 0x70054
+#define _PIPEB_DATA_N_G4X 0x71054
+#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+#define _PIPEA_LINK_M_G4X 0x70060
+#define _PIPEB_LINK_M_G4X 0x71060
+#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
+
+#define _PIPEA_LINK_N_G4X 0x70064
+#define _PIPEB_LINK_N_G4X 0x71064
+#define PIPE_LINK_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
+
+/* Pipe A */
+#define _PIPEADSL 0x70000
+#define PIPEDSL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL)
+#define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */
+#define PIPEDSL_LINE_MASK REG_GENMASK(19, 0)
+
+#define _TRANSACONF 0x70008
+#define TRANSCONF(dev_priv, trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF)
+#define TRANSCONF_ENABLE REG_BIT(31)
+#define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */
+#define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */
+#define TRANSCONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */
+#define TRANSCONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */
+#define TRANSCONF_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANSCONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */
+#define TRANSCONF_PIPE_LOCKED REG_BIT(25)
+#define TRANSCONF_FORCE_BORDER REG_BIT(25)
+#define TRANSCONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */
+#define TRANSCONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */
+#define TRANSCONF_GAMMA_MODE_8BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 0)
+#define TRANSCONF_GAMMA_MODE_10BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 1)
+#define TRANSCONF_GAMMA_MODE_12BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */
+#define TRANSCONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */
+#define TRANSCONF_GAMMA_MODE(x) REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */
+#define TRANSCONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */
+#define TRANSCONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 0)
+#define TRANSCONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 4) /* gen4 only */
+#define TRANSCONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 5) /* gen4 only */
+#define TRANSCONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 6)
+#define TRANSCONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 7) /* gen3 only */
+/*
+ * ilk+: PF/D=progressive fetch/display, IF/D=interlaced fetch/display,
+ * DBL=power saving pixel doubling, PF-ID* requires panel fitter
+ */
+#define TRANSCONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */
+#define TRANSCONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */
+#define TRANSCONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 0)
+#define TRANSCONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 1)
+#define TRANSCONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 3)
+#define TRANSCONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */
+#define TRANSCONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */
+#define TRANSCONF_REFRESH_RATE_ALT_ILK REG_BIT(20)
+#define TRANSCONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */
+#define TRANSCONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(TRANSCONF_MSA_TIMING_DELAY_MASK, (x))
+#define TRANSCONF_CXSR_DOWNCLOCK REG_BIT(16)
+#define TRANSCONF_WGC_ENABLE REG_BIT(15) /* vlv/chv only */
+#define TRANSCONF_REFRESH_RATE_ALT_VLV REG_BIT(14)
+#define TRANSCONF_COLOR_RANGE_SELECT REG_BIT(13)
+#define TRANSCONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */
+#define TRANSCONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */
+#define TRANSCONF_BPC_8 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 0)
+#define TRANSCONF_BPC_10 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 1)
+#define TRANSCONF_BPC_6 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 2)
+#define TRANSCONF_BPC_12 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 3)
+#define TRANSCONF_DITHER_EN REG_BIT(4)
+#define TRANSCONF_DITHER_TYPE_MASK REG_GENMASK(3, 2)
+#define TRANSCONF_DITHER_TYPE_SP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 0)
+#define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1)
+#define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2)
+#define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3)
+#define TRANSCONF_PIXEL_COUNT_SCALING_MASK REG_GENMASK(1, 0)
+#define TRANSCONF_PIXEL_COUNT_SCALING_X4 1
+
+#define _PIPEASTAT 0x70024
+#define PIPESTAT(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT)
+#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
+#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
+#define PIPE_CRC_ERROR_ENABLE (1UL << 29)
+#define PIPE_CRC_DONE_ENABLE (1UL << 28)
+#define PERF_COUNTER2_INTERRUPT_EN (1UL << 27)
+#define PIPE_GMBUS_EVENT_ENABLE (1UL << 27)
+#define PLANE_FLIP_DONE_INT_EN_VLV (1UL << 26)
+#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL << 26)
+#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL << 25)
+#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL << 24)
+#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL << 22)
+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
+#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL << 21)
+#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL << 20)
+#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL << 19)
+#define PERF_COUNTER_INTERRUPT_EN (1UL << 19)
+#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL << 18) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL << 17)
+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
+#define PIPEA_HBLANK_INT_EN_VLV (1UL << 16)
+#define PIPE_OVERLAY_UPDATED_ENABLE (1UL << 16)
+#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL << 15)
+#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL << 14)
+#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL << 13)
+#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL << 12)
+#define PERF_COUNTER2_INTERRUPT_STATUS (1UL << 11)
+#define PIPE_GMBUS_INTERRUPT_STATUS (1UL << 11)
+#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL << 10)
+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
+#define PIPE_VSYNC_INTERRUPT_STATUS (1UL << 9)
+#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL << 8)
+#define PIPE_DPST_EVENT_STATUS (1UL << 7)
+#define PIPE_A_PSR_STATUS_VLV (1UL << 6)
+#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL << 6)
+#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL << 5)
+#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL << 4)
+#define PIPE_B_PSR_STATUS_VLV (1UL << 3)
+#define PERF_COUNTER_INTERRUPT_STATUS (1UL << 3)
+#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL << 2) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_HBLANK_INT_STATUS (1UL << 0)
+#define PIPE_OVERLAY_UPDATED_STATUS (1UL << 0)
+#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
+#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
+
+#define _PIPE_ARB_CTL_A 0x70028 /* icl+ */
+#define PIPE_ARB_CTL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPE_ARB_CTL_A)
+#define PIPE_ARB_USE_PROG_SLOTS REG_BIT(13)
+
+#define _PIPE_MISC_A 0x70030
+#define _PIPE_MISC_B 0x71030
+#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
+#define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */
+#define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */
+#define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */
+#define PIPE_MISC_PSR_MASK_PRIMARY_FLIP REG_BIT(23) /* bdw */
+#define PIPE_MISC_PSR_MASK_SPRITE_ENABLE REG_BIT(22) /* bdw */
+#define PIPE_MISC_PSR_MASK_PIPE_REG_WRITE REG_BIT(21) /* skl+ */
+#define PIPE_MISC_PSR_MASK_CURSOR_MOVE REG_BIT(21) /* bdw */
+#define PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT REG_BIT(20)
+#define PIPE_MISC_OUTPUT_COLORSPACE_YUV REG_BIT(11)
+#define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
+/*
+ * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
+ * valid values of: 6, 8, 10 BPC.
+ * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
+ * 6, 8, 10, 12 BPC.
+ */
+#define PIPE_MISC_BPC_MASK REG_GENMASK(7, 5)
+#define PIPE_MISC_BPC_8 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 0)
+#define PIPE_MISC_BPC_10 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 1)
+#define PIPE_MISC_BPC_6 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 2)
+#define PIPE_MISC_BPC_12_ADLP REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 4) /* adlp+ */
+#define PIPE_MISC_DITHER_ENABLE REG_BIT(4)
+#define PIPE_MISC_DITHER_TYPE_MASK REG_GENMASK(3, 2)
+#define PIPE_MISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 0)
+#define PIPE_MISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 1)
+#define PIPE_MISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 2)
+#define PIPE_MISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 3)
+
+#define _PIPE_MISC2_A 0x7002C
+#define _PIPE_MISC2_B 0x7102C
+#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B)
+#define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24)
+#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80)
+#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20)
+#define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */
+#define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id))
+
+#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
+#define DPINVGTT_EN_MASK_CHV REG_GENMASK(27, 16)
+#define DPINVGTT_EN_MASK_VLV REG_GENMASK(23, 16)
+#define SPRITEF_INVALID_GTT_INT_EN REG_BIT(27)
+#define SPRITEE_INVALID_GTT_INT_EN REG_BIT(26)
+#define PLANEC_INVALID_GTT_INT_EN REG_BIT(25)
+#define CURSORC_INVALID_GTT_INT_EN REG_BIT(24)
+#define CURSORB_INVALID_GTT_INT_EN REG_BIT(23)
+#define CURSORA_INVALID_GTT_INT_EN REG_BIT(22)
+#define SPRITED_INVALID_GTT_INT_EN REG_BIT(21)
+#define SPRITEC_INVALID_GTT_INT_EN REG_BIT(20)
+#define PLANEB_INVALID_GTT_INT_EN REG_BIT(19)
+#define SPRITEB_INVALID_GTT_INT_EN REG_BIT(18)
+#define SPRITEA_INVALID_GTT_INT_EN REG_BIT(17)
+#define PLANEA_INVALID_GTT_INT_EN REG_BIT(16)
+#define DPINVGTT_STATUS_MASK_CHV REG_GENMASK(11, 0)
+#define DPINVGTT_STATUS_MASK_VLV REG_GENMASK(7, 0)
+#define SPRITEF_INVALID_GTT_STATUS REG_BIT(11)
+#define SPRITEE_INVALID_GTT_STATUS REG_BIT(10)
+#define PLANEC_INVALID_GTT_STATUS REG_BIT(9)
+#define CURSORC_INVALID_GTT_STATUS REG_BIT(8)
+#define CURSORB_INVALID_GTT_STATUS REG_BIT(7)
+#define CURSORA_INVALID_GTT_STATUS REG_BIT(6)
+#define SPRITED_INVALID_GTT_STATUS REG_BIT(5)
+#define SPRITEC_INVALID_GTT_STATUS REG_BIT(4)
+#define PLANEB_INVALID_GTT_STATUS REG_BIT(3)
+#define SPRITEB_INVALID_GTT_STATUS REG_BIT(2)
+#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1)
+#define PLANEA_INVALID_GTT_STATUS REG_BIT(0)
+
+#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
+#define CBR_PND_DEADLINE_DISABLE (1 << 31)
+#define CBR_PWM_CLOCK_MUX_SELECT (1 << 30)
+
+#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
+#define CBR_DPLLBMD_PIPE(pipe) (1 << (7 + (pipe) * 11)) /* pipes B and C */
+
+/*
+ * The two pipe frame counter registers are not synchronized, so
+ * reading a stable value is somewhat tricky. The following code
+ * should work:
+ *
+ * do {
+ * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ * PIPE_FRAME_HIGH_SHIFT;
+ * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
+ * PIPE_FRAME_LOW_SHIFT);
+ * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ * PIPE_FRAME_HIGH_SHIFT);
+ * } while (high1 != high2);
+ * frame = (high1 << 8) | low1;
+ */
+#define _PIPEAFRAMEHIGH 0x70040
+#define PIPEFRAME(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH)
+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT 0
+
+#define _PIPEAFRAMEPIXEL 0x70044
+#define PIPEFRAMEPIXEL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL)
+#define PIPE_FRAME_LOW_MASK 0xff000000
+#define PIPE_FRAME_LOW_SHIFT 24
+#define PIPE_PIXEL_MASK 0x00ffffff
+#define PIPE_PIXEL_SHIFT 0
+
+/* GM45+ just has to be different */
+#define _PIPEA_FRMCOUNT_G4X 0x70040
+#define PIPE_FRMCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FRMCOUNT_G4X)
+
+#define _PIPEA_FLIPCOUNT_G4X 0x70044
+#define PIPE_FLIPCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FLIPCOUNT_G4X)
+
+/* CHV pipe B blender */
+#define _CHV_BLEND_A 0x60a00
+#define CHV_BLEND(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A)
+#define CHV_BLEND_MASK REG_GENMASK(31, 30)
+#define CHV_BLEND_LEGACY REG_FIELD_PREP(CHV_BLEND_MASK, 0)
+#define CHV_BLEND_ANDROID REG_FIELD_PREP(CHV_BLEND_MASK, 1)
+#define CHV_BLEND_MPO REG_FIELD_PREP(CHV_BLEND_MASK, 2)
+
+#define _CHV_CANVAS_A 0x60a04
+#define CHV_CANVAS(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A)
+#define CHV_CANVAS_RED_MASK REG_GENMASK(29, 20)
+#define CHV_CANVAS_GREEN_MASK REG_GENMASK(19, 10)
+#define CHV_CANVAS_BLUE_MASK REG_GENMASK(9, 0)
+
+/* Display/Sprite base address macros */
+#define DISP_BASEADDR_MASK (0xfffff000)
+#define I915_LO_DISPBASE(val) ((val) & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val) ((val) & DISP_BASEADDR_MASK)
+
+/*
+ * VBIOS flags
+ * gen2:
+ * [00:06] alm,mgm
+ * [10:16] all
+ * [30:32] alm,mgm
+ * gen3+:
+ * [00:0f] all
+ * [10:1f] all
+ * [30:32] all
+ */
+#define SWF0(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70410 + (i) * 4)
+#define SWF1(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x71410 + (i) * 4)
+#define SWF3(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
+#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
+
+#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030)
+#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
+#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_6ms (2 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_100ms (3 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_PULSE_DURATION_MASK (3 << 2) /* pre-HSW */
+#define DIGITAL_PORTA_HOTPLUG_STATUS_MASK (3 << 0)
+#define DIGITAL_PORTA_HOTPLUG_NO_DETECT (0 << 0)
+#define DIGITAL_PORTA_HOTPLUG_SHORT_DETECT (1 << 0)
+#define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0)
+
+/* refresh rate hardware control */
+#define RR_HW_CTL _MMIO(0x45300)
+#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
+#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
+
+#define _PIPEA_DATA_M1 0x60030
+#define _PIPEB_DATA_M1 0x61030
+#define PIPE_DATA_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M1)
+
+#define _PIPEA_DATA_N1 0x60034
+#define _PIPEB_DATA_N1 0x61034
+#define PIPE_DATA_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N1)
+
+#define _PIPEA_DATA_M2 0x60038
+#define _PIPEB_DATA_M2 0x61038
+#define PIPE_DATA_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M2)
+
+#define _PIPEA_DATA_N2 0x6003c
+#define _PIPEB_DATA_N2 0x6103c
+#define PIPE_DATA_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N2)
+
+#define _PIPEA_LINK_M1 0x60040
+#define _PIPEB_LINK_M1 0x61040
+#define PIPE_LINK_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M1)
+
+#define _PIPEA_LINK_N1 0x60044
+#define _PIPEB_LINK_N1 0x61044
+#define PIPE_LINK_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N1)
+
+#define _PIPEA_LINK_M2 0x60048
+#define _PIPEB_LINK_M2 0x61048
+#define PIPE_LINK_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M2)
+
+#define _PIPEA_LINK_N2 0x6004c
+#define _PIPEB_LINK_N2 0x6104c
+#define PIPE_LINK_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2)
+
+/*
+ * Skylake scalers
+ */
+#define _ID(id, a, b) _PICK_EVEN(id, a, b)
+#define _PS_1A_CTRL 0x68180
+#define _PS_2A_CTRL 0x68280
+#define _PS_1B_CTRL 0x68980
+#define _PS_2B_CTRL 0x68A80
+#define _PS_1C_CTRL 0x69180
+#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
+ _ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
+#define PS_SCALER_EN REG_BIT(31)
+#define PS_SCALER_TYPE_MASK REG_BIT(30) /* icl+ */
+#define PS_SCALER_TYPE_NON_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 0)
+#define PS_SCALER_TYPE_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 1)
+#define SKL_PS_SCALER_MODE_MASK REG_GENMASK(29, 28) /* skl/bxt */
+#define SKL_PS_SCALER_MODE_DYN REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 0)
+#define SKL_PS_SCALER_MODE_HQ REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 1)
+#define SKL_PS_SCALER_MODE_NV12 REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 2)
+#define PS_SCALER_MODE_MASK REG_BIT(29) /* glk-tgl */
+#define PS_SCALER_MODE_NORMAL REG_FIELD_PREP(PS_SCALER_MODE_MASK, 0)
+#define PS_SCALER_MODE_PLANAR REG_FIELD_PREP(PS_SCALER_MODE_MASK, 1)
+#define PS_ADAPTIVE_FILTERING_EN REG_BIT(28) /* icl+ */
+#define PS_BINDING_MASK REG_GENMASK(27, 25)
+#define PS_BINDING_PIPE REG_FIELD_PREP(PS_BINDING_MASK, 0)
+#define PS_BINDING_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_MASK, (plane_id) + 1)
+#define PS_FILTER_MASK REG_GENMASK(24, 23)
+#define PS_FILTER_MEDIUM REG_FIELD_PREP(PS_FILTER_MASK, 0)
+#define PS_FILTER_PROGRAMMED REG_FIELD_PREP(PS_FILTER_MASK, 1)
+#define PS_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_FILTER_MASK, 2)
+#define PS_FILTER_BILINEAR REG_FIELD_PREP(PS_FILTER_MASK, 3)
+#define PS_ADAPTIVE_FILTER_MASK REG_BIT(22) /* icl+ */
+#define PS_ADAPTIVE_FILTER_MEDIUM REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 0)
+#define PS_ADAPTIVE_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 1)
+#define PS_PIPE_SCALER_LOC_MASK REG_BIT(21) /* icl+ */
+#define PS_PIPE_SCALER_LOC_AFTER_OUTPUT_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 0) /* non-linear */
+#define PS_PIPE_SCALER_LOC_AFTER_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 1) /* linear */
+#define PS_VERT3TAP REG_BIT(21) /* skl/bxt */
+#define PS_VERT_INT_INVERT_FIELD REG_BIT(20)
+#define PS_PROG_SCALE_FACTOR REG_BIT(19) /* tgl+ */
+#define PS_PWRUP_PROGRESS REG_BIT(17)
+#define PS_V_FILTER_BYPASS REG_BIT(8)
+#define PS_VADAPT_EN REG_BIT(7) /* skl/bxt */
+#define PS_VADAPT_MODE_MASK REG_GENMASK(6, 5) /* skl/bxt */
+#define PS_VADAPT_MODE_LEAST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 0)
+#define PS_VADAPT_MODE_MOD_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 1)
+#define PS_VADAPT_MODE_MOST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 3)
+#define PS_BINDING_Y_MASK REG_GENMASK(7, 5) /* icl-tgl */
+#define PS_BINDING_Y_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_Y_MASK, (plane_id) + 1)
+#define PS_Y_VERT_FILTER_SELECT_MASK REG_BIT(4) /* glk+ */
+#define PS_Y_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_VERT_FILTER_SELECT_MASK, (set))
+#define PS_Y_HORZ_FILTER_SELECT_MASK REG_BIT(3) /* glk+ */
+#define PS_Y_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_HORZ_FILTER_SELECT_MASK, (set))
+#define PS_UV_VERT_FILTER_SELECT_MASK REG_BIT(2) /* glk+ */
+#define PS_UV_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_VERT_FILTER_SELECT_MASK, (set))
+#define PS_UV_HORZ_FILTER_SELECT_MASK REG_BIT(1) /* glk+ */
+#define PS_UV_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_HORZ_FILTER_SELECT_MASK, (set))
+
+#define _PS_PWR_GATE_1A 0x68160
+#define _PS_PWR_GATE_2A 0x68260
+#define _PS_PWR_GATE_1B 0x68960
+#define _PS_PWR_GATE_2B 0x68A60
+#define _PS_PWR_GATE_1C 0x69160
+#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
+ _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
+#define PS_PWR_GATE_DIS_OVERRIDE REG_BIT(31)
+#define PS_PWR_GATE_SETTLING_TIME_MASK REG_GENMASK(4, 3)
+#define PS_PWR_GATE_SETTLING_TIME_32 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 0)
+#define PS_PWR_GATE_SETTLING_TIME_64 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 1)
+#define PS_PWR_GATE_SETTLING_TIME_96 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 2)
+#define PS_PWR_GATE_SETTLING_TIME_128 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 3)
+#define PS_PWR_GATE_SLPEN_MASK REG_GENMASK(1, 0)
+#define PS_PWR_GATE_SLPEN_8 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 0)
+#define PS_PWR_GATE_SLPEN_16 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 1)
+#define PS_PWR_GATE_SLPEN_24 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 2)
+#define PS_PWR_GATE_SLPEN_32 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 3)
+
+#define _PS_WIN_POS_1A 0x68170
+#define _PS_WIN_POS_2A 0x68270
+#define _PS_WIN_POS_1B 0x68970
+#define _PS_WIN_POS_2B 0x68A70
+#define _PS_WIN_POS_1C 0x69170
+#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
+ _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
+#define PS_WIN_XPOS_MASK REG_GENMASK(31, 16)
+#define PS_WIN_XPOS(x) REG_FIELD_PREP(PS_WIN_XPOS_MASK, (x))
+#define PS_WIN_YPOS_MASK REG_GENMASK(15, 0)
+#define PS_WIN_YPOS(y) REG_FIELD_PREP(PS_WIN_YPOS_MASK, (y))
+
+#define _PS_WIN_SZ_1A 0x68174
+#define _PS_WIN_SZ_2A 0x68274
+#define _PS_WIN_SZ_1B 0x68974
+#define _PS_WIN_SZ_2B 0x68A74
+#define _PS_WIN_SZ_1C 0x69174
+#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \
+ _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
+#define PS_WIN_XSIZE_MASK REG_GENMASK(31, 16)
+#define PS_WIN_XSIZE(w) REG_FIELD_PREP(PS_WIN_XSIZE_MASK, (w))
+#define PS_WIN_YSIZE_MASK REG_GENMASK(15, 0)
+#define PS_WIN_YSIZE(h) REG_FIELD_PREP(PS_WIN_YSIZE_MASK, (h))
+
+#define _PS_VSCALE_1A 0x68184
+#define _PS_VSCALE_2A 0x68284
+#define _PS_VSCALE_1B 0x68984
+#define _PS_VSCALE_2B 0x68A84
+#define _PS_VSCALE_1C 0x69184
+#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \
+ _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
+
+#define _PS_HSCALE_1A 0x68190
+#define _PS_HSCALE_2A 0x68290
+#define _PS_HSCALE_1B 0x68990
+#define _PS_HSCALE_2B 0x68A90
+#define _PS_HSCALE_1C 0x69190
+#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \
+ _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
+
+#define _PS_VPHASE_1A 0x68188
+#define _PS_VPHASE_2A 0x68288
+#define _PS_VPHASE_1B 0x68988
+#define _PS_VPHASE_2B 0x68A88
+#define _PS_VPHASE_1C 0x69188
+#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \
+ _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
+#define PS_Y_PHASE_MASK REG_GENMASK(31, 16)
+#define PS_Y_PHASE(x) REG_FIELD_PREP(PS_Y_PHASE_MASK, (x))
+#define PS_UV_RGB_PHASE_MASK REG_GENMASK(15, 0)
+#define PS_UV_RGB_PHASE(x) REG_FIELD_PREP(PS_UV_RGB_PHASE_MASK, (x))
+#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */
+#define PS_PHASE_TRIP (1 << 0)
+
+#define _PS_HPHASE_1A 0x68194
+#define _PS_HPHASE_2A 0x68294
+#define _PS_HPHASE_1B 0x68994
+#define _PS_HPHASE_2B 0x68A94
+#define _PS_HPHASE_1C 0x69194
+#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \
+ _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
+
+#define _PS_ECC_STAT_1A 0x681D0
+#define _PS_ECC_STAT_2A 0x682D0
+#define _PS_ECC_STAT_1B 0x689D0
+#define _PS_ECC_STAT_2B 0x68AD0
+#define _PS_ECC_STAT_1C 0x691D0
+#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \
+ _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
+
+#define _PS_COEF_SET0_INDEX_1A 0x68198
+#define _PS_COEF_SET0_INDEX_2A 0x68298
+#define _PS_COEF_SET0_INDEX_1B 0x68998
+#define _PS_COEF_SET0_INDEX_2B 0x68A98
+#define GLK_PS_COEF_INDEX_SET(pipe, id, set) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \
+ _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8)
+#define PS_COEF_INDEX_AUTO_INC REG_BIT(10)
+
+#define _PS_COEF_SET0_DATA_1A 0x6819C
+#define _PS_COEF_SET0_DATA_2A 0x6829C
+#define _PS_COEF_SET0_DATA_1B 0x6899C
+#define _PS_COEF_SET0_DATA_2B 0x68A9C
+#define GLK_PS_COEF_DATA_SET(pipe, id, set) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_COEF_SET0_DATA_1A, _PS_COEF_SET0_DATA_2A) + (set) * 8, \
+ _ID(id, _PS_COEF_SET0_DATA_1B, _PS_COEF_SET0_DATA_2B) + (set) * 8)
+
+/* More Ivybridge lolz */
+#define DE_ERR_INT_IVB (1 << 30)
+#define DE_GSE_IVB (1 << 29)
+#define DE_PCH_EVENT_IVB (1 << 28)
+#define DE_DP_A_HOTPLUG_IVB (1 << 27)
+#define DE_AUX_CHANNEL_A_IVB (1 << 26)
+#define DE_EDP_PSR_INT_HSW (1 << 19)
+#define DE_SPRITEC_FLIP_DONE_IVB (1 << 14)
+#define DE_PLANEC_FLIP_DONE_IVB (1 << 13)
+#define DE_PIPEC_VBLANK_IVB (1 << 10)
+#define DE_SPRITEB_FLIP_DONE_IVB (1 << 9)
+#define DE_PLANEB_FLIP_DONE_IVB (1 << 8)
+#define DE_PIPEB_VBLANK_IVB (1 << 5)
+#define DE_SPRITEA_FLIP_DONE_IVB (1 << 4)
+#define DE_PLANEA_FLIP_DONE_IVB (1 << 3)
+#define DE_PLANE_FLIP_DONE_IVB(plane) (1 << (3 + 5 * (plane)))
+#define DE_PIPEA_VBLANK_IVB (1 << 0)
+#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
+
+#define XELPD_DISPLAY_ERR_FATAL_MASK _MMIO(0x4421c)
+
+#define GEN8_DE_PIPE_ISR(pipe) _MMIO(0x44400 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IMR(pipe) _MMIO(0x44404 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IIR(pipe) _MMIO(0x44408 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IER(pipe) _MMIO(0x4440c + (0x10 * (pipe)))
+#define GEN8_PIPE_FIFO_UNDERRUN REG_BIT(31)
+#define GEN8_PIPE_CDCLK_CRC_ERROR REG_BIT(29)
+#define GEN8_PIPE_CDCLK_CRC_DONE REG_BIT(28)
+#define GEN12_PIPEDMC_INTERRUPT REG_BIT(26) /* tgl+ */
+#define GEN12_PIPEDMC_FAULT REG_BIT(25) /* tgl-mtl */
+#define MTL_PIPEDMC_ATS_FAULT REG_BIT(24) /* mtl */
+#define GEN12_PIPEDMC_FLIPQ_DONE REG_BIT(24) /* tgl-adl */
+#define GEN11_PIPE_PLANE7_FAULT REG_BIT(22) /* icl/tgl */
+#define GEN11_PIPE_PLANE6_FAULT REG_BIT(21) /* icl/tgl */
+#define GEN11_PIPE_PLANE5_FAULT REG_BIT(20) /* icl+ */
+#define GEN12_PIPE_VBLANK_UNMOD REG_BIT(19) /* tgl+ */
+#define MTL_PLANE_ATS_FAULT REG_BIT(18) /* mtl+ */
+#define GEN11_PIPE_PLANE7_FLIP_DONE REG_BIT(18) /* icl/tgl */
+#define MTL_PIPEDMC_FLIPQ_DONE REG_BIT(17) /* mtl */
+#define GEN11_PIPE_PLANE6_FLIP_DONE REG_BIT(17) /* icl/tgl */
+#define GEN11_PIPE_PLANE5_FLIP_DONE REG_BIT(16) /* icl+ */
+#define GEN12_DSB_2_INT REG_BIT(15) /* tgl+ */
+#define GEN12_DSB_1_INT REG_BIT(14) /* tgl+ */
+#define GEN12_DSB_0_INT REG_BIT(13) /* tgl+ */
+#define GEN12_DSB_INT(dsb_id) REG_BIT(13 + (dsb_id))
+#define GEN9_PIPE_CURSOR_FAULT REG_BIT(11) /* skl+ */
+#define GEN9_PIPE_PLANE4_FAULT REG_BIT(10) /* skl+ */
+#define GEN8_PIPE_CURSOR_FAULT REG_BIT(10) /* bdw */
+#define GEN9_PIPE_PLANE3_FAULT REG_BIT(9) /* skl+ */
+#define GEN8_PIPE_SPRITE_FAULT REG_BIT(9) /* bdw */
+#define GEN9_PIPE_PLANE2_FAULT REG_BIT(8) /* skl+ */
+#define GEN8_PIPE_PRIMARY_FAULT REG_BIT(8) /* bdw */
+#define GEN9_PIPE_PLANE1_FAULT REG_BIT(7) /* skl+ */
+#define GEN9_PIPE_PLANE4_FLIP_DONE REG_BIT(6) /* skl+ */
+#define GEN9_PIPE_PLANE3_FLIP_DONE REG_BIT(5) /* skl+ */
+#define GEN8_PIPE_SPRITE_FLIP_DONE REG_BIT(5) /* bdw */
+#define GEN9_PIPE_PLANE2_FLIP_DONE REG_BIT(4) /* skl+ */
+#define GEN8_PIPE_PRIMARY_FLIP_DONE REG_BIT(4) /* bdw */
+#define GEN9_PIPE_PLANE1_FLIP_DONE REG_BIT(3) /* skl+ */
+#define GEN9_PIPE_PLANE_FLIP_DONE(plane_id) \
+ REG_BIT(((plane_id) >= PLANE_5 ? 16 - PLANE_5 : 3 - PLANE_1) + (plane_id)) /* skl+ */
+#define GEN8_PIPE_SCAN_LINE_EVENT REG_BIT(2)
+#define GEN8_PIPE_VSYNC REG_BIT(1)
+#define GEN8_PIPE_VBLANK REG_BIT(0)
+
+#define GEN8_DE_PIPE_IRQ_REGS(pipe) I915_IRQ_REGS(GEN8_DE_PIPE_IMR(pipe), \
+ GEN8_DE_PIPE_IER(pipe), \
+ GEN8_DE_PIPE_IIR(pipe))
+
+#define _HPD_PIN_DDI(hpd_pin) ((hpd_pin) - HPD_PORT_A)
+#define _HPD_PIN_TC(hpd_pin) ((hpd_pin) - HPD_PORT_TC1)
+
+#define GEN8_DE_PORT_ISR _MMIO(0x44440)
+#define GEN8_DE_PORT_IMR _MMIO(0x44444)
+#define GEN8_DE_PORT_IIR _MMIO(0x44448)
+#define GEN8_DE_PORT_IER _MMIO(0x4444c)
+#define DSI1_NON_TE (1 << 31)
+#define DSI0_NON_TE (1 << 30)
+#define ICL_AUX_CHANNEL_E (1 << 29)
+#define ICL_AUX_CHANNEL_F (1 << 28)
+#define GEN9_AUX_CHANNEL_D (1 << 27)
+#define GEN9_AUX_CHANNEL_C (1 << 26)
+#define GEN9_AUX_CHANNEL_B (1 << 25)
+#define DSI1_TE (1 << 24)
+#define DSI0_TE (1 << 23)
+#define GEN8_DE_PORT_HOTPLUG(hpd_pin) REG_BIT(3 + _HPD_PIN_DDI(hpd_pin))
+#define BXT_DE_PORT_HOTPLUG_MASK (GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) | \
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | \
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_C))
+#define BDW_DE_PORT_HOTPLUG_MASK GEN8_DE_PORT_HOTPLUG(HPD_PORT_A)
+#define BXT_DE_PORT_GMBUS (1 << 1)
+#define GEN8_AUX_CHANNEL_A (1 << 0)
+#define TGL_DE_PORT_AUX_USBC6 REG_BIT(13)
+#define XELPD_DE_PORT_AUX_DDIE REG_BIT(13)
+#define TGL_DE_PORT_AUX_USBC5 REG_BIT(12)
+#define XELPD_DE_PORT_AUX_DDID REG_BIT(12)
+#define TGL_DE_PORT_AUX_USBC4 REG_BIT(11)
+#define TGL_DE_PORT_AUX_USBC3 REG_BIT(10)
+#define TGL_DE_PORT_AUX_USBC2 REG_BIT(9)
+#define TGL_DE_PORT_AUX_USBC1 REG_BIT(8)
+#define TGL_DE_PORT_AUX_DDIC REG_BIT(2)
+#define TGL_DE_PORT_AUX_DDIB REG_BIT(1)
+#define TGL_DE_PORT_AUX_DDIA REG_BIT(0)
+
+#define GEN8_DE_PORT_IRQ_REGS I915_IRQ_REGS(GEN8_DE_PORT_IMR, \
+ GEN8_DE_PORT_IER, \
+ GEN8_DE_PORT_IIR)
+
+#define GEN8_DE_MISC_ISR _MMIO(0x44460)
+#define GEN8_DE_MISC_IMR _MMIO(0x44464)
+#define GEN8_DE_MISC_IIR _MMIO(0x44468)
+#define GEN8_DE_MISC_IER _MMIO(0x4446c)
+#define XELPDP_RM_TIMEOUT REG_BIT(29)
+#define XELPDP_PMDEMAND_RSPTOUT_ERR REG_BIT(27)
+#define GEN8_DE_MISC_GSE REG_BIT(27)
+#define GEN8_DE_EDP_PSR REG_BIT(19)
+#define XELPDP_PMDEMAND_RSP REG_BIT(3)
+#define XE2LPD_DBUF_OVERLAP_DETECTED REG_BIT(1)
+
+#define GEN8_DE_MISC_IRQ_REGS I915_IRQ_REGS(GEN8_DE_MISC_IMR, \
+ GEN8_DE_MISC_IER, \
+ GEN8_DE_MISC_IIR)
+
+#define GEN11_DISPLAY_INT_CTL _MMIO(0x44200)
+#define GEN11_DISPLAY_IRQ_ENABLE (1 << 31)
+#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
+#define GEN11_DE_PCH_IRQ (1 << 23)
+#define GEN11_DE_MISC_IRQ (1 << 22)
+#define GEN11_DE_HPD_IRQ (1 << 21)
+#define GEN11_DE_PORT_IRQ (1 << 20)
+#define GEN11_DE_PIPE_C (1 << 18)
+#define GEN11_DE_PIPE_B (1 << 17)
+#define GEN11_DE_PIPE_A (1 << 16)
+
+#define GEN11_DE_HPD_ISR _MMIO(0x44470)
+#define GEN11_DE_HPD_IMR _MMIO(0x44474)
+#define GEN11_DE_HPD_IIR _MMIO(0x44478)
+#define GEN11_DE_HPD_IER _MMIO(0x4447c)
+#define GEN11_TC_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
+#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC_HOTPLUG(HPD_PORT_TC6) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC5) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC4) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC3) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC2) | \
+ GEN11_TC_HOTPLUG(HPD_PORT_TC1))
+#define GEN11_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
+#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT_HOTPLUG(HPD_PORT_TC6) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC5) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC4) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC3) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC2) | \
+ GEN11_TBT_HOTPLUG(HPD_PORT_TC1))
+
+#define GEN11_DE_HPD_IRQ_REGS I915_IRQ_REGS(GEN11_DE_HPD_IMR, \
+ GEN11_DE_HPD_IER, \
+ GEN11_DE_HPD_IIR)
+
+#define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030)
+#define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038)
+#define GEN11_HOTPLUG_CTL_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define GEN11_HOTPLUG_CTL_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define GEN11_HOTPLUG_CTL_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define GEN11_HOTPLUG_CTL_NO_DETECT(hpd_pin) (0 << (_HPD_PIN_TC(hpd_pin) * 4))
+
+#define PICAINTERRUPT_ISR _MMIO(0x16FE50)
+#define PICAINTERRUPT_IMR _MMIO(0x16FE54)
+#define PICAINTERRUPT_IIR _MMIO(0x16FE58)
+#define PICAINTERRUPT_IER _MMIO(0x16FE5C)
+#define XELPDP_DP_ALT_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
+#define XELPDP_DP_ALT_HOTPLUG_MASK REG_GENMASK(19, 16)
+#define XELPDP_AUX_TC(hpd_pin) REG_BIT(8 + _HPD_PIN_TC(hpd_pin))
+#define XELPDP_AUX_TC_MASK REG_GENMASK(11, 8)
+#define XE2LPD_AUX_DDI(hpd_pin) REG_BIT(6 + _HPD_PIN_DDI(hpd_pin))
+#define XE2LPD_AUX_DDI_MASK REG_GENMASK(7, 6)
+#define XELPDP_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
+#define XELPDP_TBT_HOTPLUG_MASK REG_GENMASK(3, 0)
+
+#define PICAINTERRUPT_IRQ_REGS I915_IRQ_REGS(PICAINTERRUPT_IMR, \
+ PICAINTERRUPT_IER, \
+ PICAINTERRUPT_IIR)
+
+#define XELPDP_PORT_HOTPLUG_CTL(hpd_pin) _MMIO(0x16F270 + (_HPD_PIN_TC(hpd_pin) * 0x200))
+#define XELPDP_TBT_HOTPLUG_ENABLE REG_BIT(6)
+#define XELPDP_TBT_HPD_LONG_DETECT REG_BIT(5)
+#define XELPDP_TBT_HPD_SHORT_DETECT REG_BIT(4)
+#define XELPDP_DP_ALT_HOTPLUG_ENABLE REG_BIT(2)
+#define XELPDP_DP_ALT_HPD_LONG_DETECT REG_BIT(1)
+#define XELPDP_DP_ALT_HPD_SHORT_DETECT REG_BIT(0)
+
+#define XELPDP_INITIATE_PMDEMAND_REQUEST(dword) _MMIO(0x45230 + 4 * (dword))
+#define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16)
+#define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12)
+#define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8)
+#define XE3_PMDEMAND_PIPES_MASK REG_GENMASK(7, 4)
+#define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6)
+#define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4)
+#define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0)
+
+#define XELPDP_PMDEMAND_REQ_ENABLE REG_BIT(31)
+#define XELPDP_PMDEMAND_CDCLK_FREQ_MASK REG_GENMASK(30, 20)
+#define XELPDP_PMDEMAND_DDICLK_FREQ_MASK REG_GENMASK(18, 8)
+#define XELPDP_PMDEMAND_SCALERS_MASK REG_GENMASK(6, 4)
+#define XELPDP_PMDEMAND_PLLS_MASK REG_GENMASK(2, 0)
+
+#define GEN12_DCPR_STATUS_1 _MMIO(0x46440)
+#define XELPDP_PMDEMAND_INFLIGHT_STATUS REG_BIT(26)
+
+#define FUSE_STRAP _MMIO(0x42014)
+#define ILK_INTERNAL_GRAPHICS_DISABLE REG_BIT(31)
+#define ILK_INTERNAL_DISPLAY_DISABLE REG_BIT(30)
+#define ILK_DISPLAY_DEBUG_DISABLE REG_BIT(29)
+#define IVB_PIPE_C_DISABLE REG_BIT(28)
+#define ILK_HDCP_DISABLE REG_BIT(25)
+#define ILK_eDP_A_DISABLE REG_BIT(24)
+#define HSW_CDCLK_LIMIT REG_BIT(24)
+#define ILK_DESKTOP REG_BIT(23)
+#define HSW_CPU_SSC_ENABLE REG_BIT(21)
+
+#define FUSE_STRAP3 _MMIO(0x42020)
+#define HSW_REF_CLK_SELECT REG_BIT(1)
+
+#define CHICKEN_MISC_2 _MMIO(0x42084)
+#define CHICKEN_MISC_DISABLE_DPT REG_BIT(30) /* adl,dg2 */
+#define BMG_DARB_HALF_BLK_END_BURST REG_BIT(27)
+#define KBL_ARB_FILL_SPARE_14 REG_BIT(14)
+#define KBL_ARB_FILL_SPARE_13 REG_BIT(13)
+#define GLK_CL2_PWR_DOWN REG_BIT(12)
+#define GLK_CL1_PWR_DOWN REG_BIT(11)
+#define GLK_CL0_PWR_DOWN REG_BIT(10)
+
+#define CHICKEN_MISC_3 _MMIO(0x42088)
+#define DP_MST_DPT_DPTP_ALIGN_WA(trans) REG_BIT(9 + (trans) - TRANSCODER_A)
+#define DP_MST_SHORT_HBLANK_WA(trans) REG_BIT(5 + (trans) - TRANSCODER_A)
+#define DP_MST_FEC_BS_JITTER_WA(trans) REG_BIT(0 + (trans) - TRANSCODER_A)
+
+#define CHICKEN_MISC_4 _MMIO(0x4208c)
+#define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13)
+#define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0)
+#define CHICKEN_FBC_STRIDE(x) REG_FIELD_PREP(CHICKEN_FBC_STRIDE_MASK, (x))
+
+#define _CHICKEN_TRANS_A 0x420c0
+#define _CHICKEN_TRANS_B 0x420c4
+#define _CHICKEN_TRANS_C 0x420c8
+#define _CHICKEN_TRANS_EDP 0x420cc
+#define _CHICKEN_TRANS_D 0x420d8
+#define _CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
+ [TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \
+ [TRANSCODER_A] = _CHICKEN_TRANS_A, \
+ [TRANSCODER_B] = _CHICKEN_TRANS_B, \
+ [TRANSCODER_C] = _CHICKEN_TRANS_C, \
+ [TRANSCODER_D] = _CHICKEN_TRANS_D))
+#define _MTL_CHICKEN_TRANS_A 0x604e0
+#define _MTL_CHICKEN_TRANS_B 0x614e0
+#define _MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
+ _MTL_CHICKEN_TRANS_A, \
+ _MTL_CHICKEN_TRANS_B)
+#define CHICKEN_TRANS(display, trans) (DISPLAY_VER(display) >= 14 ? _MTL_CHICKEN_TRANS(trans) : _CHICKEN_TRANS(trans))
+#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
+#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
+#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
+#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
+#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */
+#define FECSTALL_DIS_DPTSTREAM_DPTTG REG_BIT(23)
+#define DDI_TRAINING_OVERRIDE_ENABLE REG_BIT(19)
+#define ADLP_1_BASED_X_GRANULARITY REG_BIT(18)
+#define DDI_TRAINING_OVERRIDE_VALUE REG_BIT(18)
+#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */
+#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */
+#define PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15)
+#define DP_FEC_BS_JITTER_WA REG_BIT(15)
+#define PSR2_VSC_ENABLE_PROG_HEADER REG_BIT(12)
+#define DP_DSC_INSERT_SF_AT_EOL_WA REG_BIT(4)
+#define HDCP_LINE_REKEY_DISABLE REG_BIT(0)
+
+#define DISP_ARB_CTL2 _MMIO(0x45004)
+#define DISP_DATA_PARTITION_5_6 REG_BIT(6)
+#define DISP_IPC_ENABLE REG_BIT(3)
+
+#define GEN7_MSG_CTL _MMIO(0x45010)
+#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
+#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
+
+#define _BW_BUDDY0_CTL 0x45130
+#define _BW_BUDDY1_CTL 0x45140
+#define BW_BUDDY_CTL(x) _MMIO(_PICK_EVEN(x, \
+ _BW_BUDDY0_CTL, \
+ _BW_BUDDY1_CTL))
+#define BW_BUDDY_DISABLE REG_BIT(31)
+#define BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16)
+#define BW_BUDDY_TLB_REQ_TIMER(x) REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, x)
+
+#define _BW_BUDDY0_PAGE_MASK 0x45134
+#define _BW_BUDDY1_PAGE_MASK 0x45144
+#define BW_BUDDY_PAGE_MASK(x) _MMIO(_PICK_EVEN(x, \
+ _BW_BUDDY0_PAGE_MASK, \
+ _BW_BUDDY1_PAGE_MASK))
+
+#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
+#define MTL_RESET_PICA_HANDSHAKE_EN REG_BIT(6)
+#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4)
+
+#define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434)
+#define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27)
+#define DCPR_MASK_LPMODE REG_BIT(26)
+#define DCPR_SEND_RESP_IMM REG_BIT(25)
+#define DCPR_CLEAR_MEMSTAT_DIS REG_BIT(24)
+
+#define XELPD_CHICKEN_DCPR_3 _MMIO(0x46438)
+#define DMD_RSP_TIMEOUT_DISABLE REG_BIT(19)
+
+#define SKL_DFSM _MMIO(0x51000)
+#define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27)
+#define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25)
+#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
+#define ICL_DFSM_DMC_DISABLE (1 << 23)
+#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
+#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
+#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
+#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
+#define GLK_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
+#define XE2LPD_DFSM_DBUF_OVERLAP_DISABLE (1 << 3)
+
+#define XE2LPD_DE_CAP _MMIO(0x41100)
+#define XE2LPD_DE_CAP_3DLUT_MASK REG_GENMASK(31, 30)
+#define XE2LPD_DE_CAP_DSC_MASK REG_GENMASK(29, 28)
+#define XE2LPD_DE_CAP_DSC_REMOVED 1
+#define XE2LPD_DE_CAP_SCALER_MASK REG_GENMASK(27, 26)
+#define XE2LPD_DE_CAP_SCALER_SINGLE 1
+
+#define SKL_DSSM _MMIO(0x51004)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_MASK (7 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_24MHz (0 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz (1 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29)
+
+/*GEN11 chicken */
+#define _PIPEA_CHICKEN 0x70038
+#define _PIPEB_CHICKEN 0x71038
+#define _PIPEC_CHICKEN 0x72038
+#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
+ _PIPEB_CHICKEN)
+#define UNDERRUN_RECOVERY_DISABLE_ADLP REG_BIT(30)
+#define UNDERRUN_RECOVERY_ENABLE_DG2 REG_BIT(30)
+#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU REG_BIT(15)
+#define DG2_RENDER_CCSTAG_4_3_EN REG_BIT(12)
+#define PER_PIXEL_ALPHA_BYPASS_EN REG_BIT(7)
+
+#define PCH_DISPLAY_BASE 0xc0000u
+
+/* south display engine interrupt: IBX */
+#define SDE_AUDIO_POWER_D (1 << 27)
+#define SDE_AUDIO_POWER_C (1 << 26)
+#define SDE_AUDIO_POWER_B (1 << 25)
+#define SDE_AUDIO_POWER_SHIFT (25)
+#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT)
+#define SDE_GMBUS (1 << 24)
+#define SDE_AUDIO_HDCP_TRANSB (1 << 23)
+#define SDE_AUDIO_HDCP_TRANSA (1 << 22)
+#define SDE_AUDIO_HDCP_MASK (3 << 22)
+#define SDE_AUDIO_TRANSB (1 << 21)
+#define SDE_AUDIO_TRANSA (1 << 20)
+#define SDE_AUDIO_TRANS_MASK (3 << 20)
+#define SDE_POISON (1 << 19)
+/* 18 reserved */
+#define SDE_FDI_RXB (1 << 17)
+#define SDE_FDI_RXA (1 << 16)
+#define SDE_FDI_MASK (3 << 16)
+#define SDE_AUXD (1 << 15)
+#define SDE_AUXC (1 << 14)
+#define SDE_AUXB (1 << 13)
+#define SDE_AUX_MASK (7 << 13)
+/* 12 reserved */
+#define SDE_CRT_HOTPLUG (1 << 11)
+#define SDE_PORTD_HOTPLUG (1 << 10)
+#define SDE_PORTC_HOTPLUG (1 << 9)
+#define SDE_PORTB_HOTPLUG (1 << 8)
+#define SDE_SDVOB_HOTPLUG (1 << 6)
+#define SDE_HOTPLUG_MASK (SDE_CRT_HOTPLUG | \
+ SDE_SDVOB_HOTPLUG | \
+ SDE_PORTB_HOTPLUG | \
+ SDE_PORTC_HOTPLUG | \
+ SDE_PORTD_HOTPLUG)
+#define SDE_TRANSB_CRC_DONE (1 << 5)
+#define SDE_TRANSB_CRC_ERR (1 << 4)
+#define SDE_TRANSB_FIFO_UNDER (1 << 3)
+#define SDE_TRANSA_CRC_DONE (1 << 2)
+#define SDE_TRANSA_CRC_ERR (1 << 1)
+#define SDE_TRANSA_FIFO_UNDER (1 << 0)
+#define SDE_TRANS_MASK (0x3f)
+
+/* south display engine interrupt: CPT - CNP */
+#define SDE_AUDIO_POWER_D_CPT (1 << 31)
+#define SDE_AUDIO_POWER_C_CPT (1 << 30)
+#define SDE_AUDIO_POWER_B_CPT (1 << 29)
+#define SDE_AUDIO_POWER_SHIFT_CPT 29
+#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
+#define SDE_AUXD_CPT (1 << 27)
+#define SDE_AUXC_CPT (1 << 26)
+#define SDE_AUXB_CPT (1 << 25)
+#define SDE_AUX_MASK_CPT (7 << 25)
+#define SDE_PORTE_HOTPLUG_SPT (1 << 25)
+#define SDE_PORTA_HOTPLUG_SPT (1 << 24)
+#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+#define SDE_SDVOB_HOTPLUG_CPT (1 << 18)
+#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
+ SDE_SDVOB_HOTPLUG_CPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT)
+#define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT | \
+ SDE_PORTA_HOTPLUG_SPT)
+#define SDE_GMBUS_CPT (1 << 17)
+#define SDE_ERROR_CPT (1 << 16)
+#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
+#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
+#define SDE_FDI_RXC_CPT (1 << 8)
+#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
+#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
+#define SDE_FDI_RXB_CPT (1 << 4)
+#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
+#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
+#define SDE_FDI_RXA_CPT (1 << 0)
+#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
+ SDE_AUDIO_CP_REQ_B_CPT | \
+ SDE_AUDIO_CP_REQ_A_CPT)
+#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
+ SDE_AUDIO_CP_CHG_B_CPT | \
+ SDE_AUDIO_CP_CHG_A_CPT)
+#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
+ SDE_FDI_RXB_CPT | \
+ SDE_FDI_RXA_CPT)
+
+/* south display engine interrupt: ICP/TGP/MTP */
+#define SDE_PICAINTERRUPT REG_BIT(31)
+#define SDE_GMBUS_ICP (1 << 23)
+#define SDE_TC_HOTPLUG_ICP(hpd_pin) REG_BIT(24 + _HPD_PIN_TC(hpd_pin))
+#define SDE_TC_HOTPLUG_DG2(hpd_pin) REG_BIT(25 + _HPD_PIN_TC(hpd_pin)) /* sigh */
+#define SDE_DDI_HOTPLUG_ICP(hpd_pin) REG_BIT(16 + _HPD_PIN_DDI(hpd_pin))
+#define SDE_DDI_HOTPLUG_MASK_ICP (SDE_DDI_HOTPLUG_ICP(HPD_PORT_D) | \
+ SDE_DDI_HOTPLUG_ICP(HPD_PORT_C) | \
+ SDE_DDI_HOTPLUG_ICP(HPD_PORT_B) | \
+ SDE_DDI_HOTPLUG_ICP(HPD_PORT_A))
+#define SDE_TC_HOTPLUG_MASK_ICP (SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2) | \
+ SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1))
+
+#define SDE_IRQ_REGS I915_IRQ_REGS(SDEIMR, \
+ SDEIER, \
+ SDEIIR)
+
+#define SERR_INT _MMIO(0xc4040)
+#define SERR_INT_POISON (1 << 31)
+#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
+
+/* digital port hotplug */
+#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
+#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
+#define BXT_DDIA_HPD_INVERT (1 << 27)
+#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
+#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
+#define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */
+#define PORTA_HOTPLUG_LONG_DETECT (2 << 24) /* SPT+ & BXT */
+#define PORTD_HOTPLUG_ENABLE (1 << 20)
+#define PORTD_PULSE_DURATION_2ms (0 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_4_5ms (1 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_6ms (2 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_100ms (3 << 18) /* pre-LPT */
+#define PORTD_PULSE_DURATION_MASK (3 << 18) /* pre-LPT */
+#define PORTD_HOTPLUG_STATUS_MASK (3 << 16)
+#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
+#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
+#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
+#define PORTC_HOTPLUG_ENABLE (1 << 12)
+#define BXT_DDIC_HPD_INVERT (1 << 11)
+#define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_100ms (3 << 10) /* pre-LPT */
+#define PORTC_PULSE_DURATION_MASK (3 << 10) /* pre-LPT */
+#define PORTC_HOTPLUG_STATUS_MASK (3 << 8)
+#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
+#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
+#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
+#define PORTB_HOTPLUG_ENABLE (1 << 4)
+#define BXT_DDIB_HPD_INVERT (1 << 3)
+#define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_100ms (3 << 2) /* pre-LPT */
+#define PORTB_PULSE_DURATION_MASK (3 << 2) /* pre-LPT */
+#define PORTB_HOTPLUG_STATUS_MASK (3 << 0)
+#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
+#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
+#define BXT_DDI_HPD_INVERT_MASK (BXT_DDIA_HPD_INVERT | \
+ BXT_DDIB_HPD_INVERT | \
+ BXT_DDIC_HPD_INVERT)
+
+#define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */
+#define PORTE_HOTPLUG_ENABLE (1 << 4)
+#define PORTE_HOTPLUG_STATUS_MASK (3 << 0)
+#define PORTE_HOTPLUG_NO_DETECT (0 << 0)
+#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
+
+/* This register is a reuse of PCH_PORT_HOTPLUG register. The
+ * functionality covered in PCH_PORT_HOTPLUG is split into
+ * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
+ */
+#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
+#define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin) (0x4 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin) (0x0 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin) (0x1 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(hpd_pin) (0x2 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
+
+#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
+#define ICP_TC_HPD_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define ICP_TC_HPD_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define ICP_TC_HPD_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
+
+#define SHPD_FILTER_CNT _MMIO(0xc4038)
+#define SHPD_FILTER_CNT_500_ADJ 0x001D9
+#define SHPD_FILTER_CNT_250 0x000F8
+
+#define _PCH_DPLL_A 0xc6014
+#define _PCH_DPLL_B 0xc6018
+#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+
+#define _PCH_FPA0 0xc6040
+#define _PCH_FPB0 0xc6048
+#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define FP_CB_TUNE (0x3 << 22)
+
+#define _PCH_FPA1 0xc6044
+#define _PCH_FPB1 0xc604c
+#define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1)
+
+#define PCH_DPLL_TEST _MMIO(0xc606c)
+
+#define PCH_DREF_CONTROL _MMIO(0xC6200)
+#define DREF_CONTROL_MASK 0x7fc3
+#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_MASK (3 << 13)
+#define DREF_SSC_SOURCE_DISABLE (0 << 11)
+#define DREF_SSC_SOURCE_ENABLE (2 << 11)
+#define DREF_SSC_SOURCE_MASK (3 << 11)
+#define DREF_NONSPREAD_SOURCE_DISABLE (0 << 9)
+#define DREF_NONSPREAD_CK505_ENABLE (1 << 9)
+#define DREF_NONSPREAD_SOURCE_ENABLE (2 << 9)
+#define DREF_NONSPREAD_SOURCE_MASK (3 << 9)
+#define DREF_SUPERSPREAD_SOURCE_DISABLE (0 << 7)
+#define DREF_SUPERSPREAD_SOURCE_ENABLE (2 << 7)
+#define DREF_SUPERSPREAD_SOURCE_MASK (3 << 7)
+#define DREF_SSC4_DOWNSPREAD (0 << 6)
+#define DREF_SSC4_CENTERSPREAD (1 << 6)
+#define DREF_SSC1_DISABLE (0 << 1)
+#define DREF_SSC1_ENABLE (1 << 1)
+#define DREF_SSC4_DISABLE (0)
+#define DREF_SSC4_ENABLE (1)
+
+#define PCH_RAWCLK_FREQ _MMIO(0xc6204)
+#define FDL_TP1_TIMER_SHIFT 12
+#define FDL_TP1_TIMER_MASK (3 << 12)
+#define FDL_TP2_TIMER_SHIFT 10
+#define FDL_TP2_TIMER_MASK (3 << 10)
+#define RAWCLK_FREQ_MASK 0x3ff
+#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
+#define CNP_RAWCLK_DIV(div) ((div) << 16)
+#define CNP_RAWCLK_FRAC_MASK (0xf << 26)
+#define CNP_RAWCLK_DEN(den) ((den) << 26)
+#define ICP_RAWCLK_NUM(num) ((num) << 11)
+
+#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
+
+#define PCH_SSC4_PARMS _MMIO(0xc6210)
+#define PCH_SSC4_AUX_PARMS _MMIO(0xc6214)
+
+#define PCH_DPLL_SEL _MMIO(0xc7000)
+#define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4))
+#define TRANS_DPLLA_SEL(pipe) 0
+#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
+
+/* transcoder */
+#define _PCH_TRANS_HTOTAL_A 0xe0000
+#define _PCH_TRANS_HTOTAL_B 0xe1000
+#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
+#define TRANS_HTOTAL_SHIFT 16
+#define TRANS_HACTIVE_SHIFT 0
+
+#define _PCH_TRANS_HBLANK_A 0xe0004
+#define _PCH_TRANS_HBLANK_B 0xe1004
+#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
+#define TRANS_HBLANK_END_SHIFT 16
+#define TRANS_HBLANK_START_SHIFT 0
+
+#define _PCH_TRANS_HSYNC_A 0xe0008
+#define _PCH_TRANS_HSYNC_B 0xe1008
+#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
+#define TRANS_HSYNC_END_SHIFT 16
+#define TRANS_HSYNC_START_SHIFT 0
+
+#define _PCH_TRANS_VTOTAL_A 0xe000c
+#define _PCH_TRANS_VTOTAL_B 0xe100c
+#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
+#define TRANS_VTOTAL_SHIFT 16
+#define TRANS_VACTIVE_SHIFT 0
+
+#define _PCH_TRANS_VBLANK_A 0xe0010
+#define _PCH_TRANS_VBLANK_B 0xe1010
+#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
+#define TRANS_VBLANK_END_SHIFT 16
+#define TRANS_VBLANK_START_SHIFT 0
+
+#define _PCH_TRANS_VSYNC_A 0xe0014
+#define _PCH_TRANS_VSYNC_B 0xe1014
+#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
+#define TRANS_VSYNC_END_SHIFT 16
+#define TRANS_VSYNC_START_SHIFT 0
+
+#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
+#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
+#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B)
+
+#define _PCH_TRANSA_DATA_M1 0xe0030
+#define _PCH_TRANSB_DATA_M1 0xe1030
+#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
+
+#define _PCH_TRANSA_DATA_N1 0xe0034
+#define _PCH_TRANSB_DATA_N1 0xe1034
+#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
+
+#define _PCH_TRANSA_DATA_M2 0xe0038
+#define _PCH_TRANSB_DATA_M2 0xe1038
+#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
+
+#define _PCH_TRANSA_DATA_N2 0xe003c
+#define _PCH_TRANSB_DATA_N2 0xe103c
+#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
+
+#define _PCH_TRANSA_LINK_M1 0xe0040
+#define _PCH_TRANSB_LINK_M1 0xe1040
+#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
+
+#define _PCH_TRANSA_LINK_N1 0xe0044
+#define _PCH_TRANSB_LINK_N1 0xe1044
+#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
+
+#define _PCH_TRANSA_LINK_M2 0xe0048
+#define _PCH_TRANSB_LINK_M2 0xe1048
+#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
+
+#define _PCH_TRANSA_LINK_N2 0xe004c
+#define _PCH_TRANSB_LINK_N2 0xe104c
+#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
+
+/* Per-transcoder DIP controls (PCH) */
+#define _VIDEO_DIP_CTL_A 0xe0200
+#define _VIDEO_DIP_CTL_B 0xe1200
+#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
+
+#define _VIDEO_DIP_DATA_A 0xe0208
+#define _VIDEO_DIP_DATA_B 0xe1208
+#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
+
+#define _VIDEO_DIP_GCP_A 0xe0210
+#define _VIDEO_DIP_GCP_B 0xe1210
+#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+#define GCP_COLOR_INDICATION (1 << 2)
+#define GCP_DEFAULT_PHASE_ENABLE (1 << 1)
+#define GCP_AV_MUTE (1 << 0)
+
+/* Per-transcoder DIP controls (VLV) */
+#define _VLV_VIDEO_DIP_CTL_A 0x60200
+#define _VLV_VIDEO_DIP_CTL_B 0x61170
+#define _CHV_VIDEO_DIP_CTL_C 0x611f0
+#define VLV_TVIDEO_DIP_CTL(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_CTL_A, \
+ _VLV_VIDEO_DIP_CTL_B, \
+ _CHV_VIDEO_DIP_CTL_C)
+
+#define _VLV_VIDEO_DIP_DATA_A 0x60208
+#define _VLV_VIDEO_DIP_DATA_B 0x61174
+#define _CHV_VIDEO_DIP_DATA_C 0x611f4
+#define VLV_TVIDEO_DIP_DATA(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_DATA_A, \
+ _VLV_VIDEO_DIP_DATA_B, \
+ _CHV_VIDEO_DIP_DATA_C)
+
+#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
+#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
+#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C 0x611f8
+#define VLV_TVIDEO_DIP_GCP(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
+ _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, \
+ _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
+
+/* Haswell DIP controls */
+#define _HSW_VIDEO_DIP_CTL_A 0x60200
+#define _HSW_VIDEO_DIP_CTL_B 0x61200
+#define HSW_TVIDEO_DIP_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A)
+
+#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220
+#define _HSW_VIDEO_DIP_AVI_DATA_B 0x61220
+#define HSW_TVIDEO_DIP_AVI_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260
+#define _HSW_VIDEO_DIP_VS_DATA_B 0x61260
+#define HSW_TVIDEO_DIP_VS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
+#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
+#define HSW_TVIDEO_DIP_SPD_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
+#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
+#define HSW_TVIDEO_DIP_GMP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define HSW_TVIDEO_DIP_VSC_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
+
+/*ADLP and later: */
+#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484
+#define _ADL_VIDEO_DIP_AS_DATA_B 0x61484
+#define ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans,\
+ _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4)
+
+#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440
+#define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440
+#define GLK_TVIDEO_DIP_DRM_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
+#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
+#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
+#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
+#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
+#define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
+#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300
+#define _HSW_VIDEO_DIP_GMP_ECC_B 0x61300
+#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344
+#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
+
+#define _HSW_VIDEO_DIP_GCP_A 0x60210
+#define _HSW_VIDEO_DIP_GCP_B 0x61210
+#define HSW_TVIDEO_DIP_GCP(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A)
+
+#define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350
+#define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350
+#define ICL_VIDEO_DIP_PPS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
+
+#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
+#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
+#define ICL_VIDEO_DIP_PPS_ECC(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
+
+#define _HSW_STEREO_3D_CTL_A 0x70020
+#define _HSW_STEREO_3D_CTL_B 0x71020
+#define HSW_STEREO_3D_CTL(dev_priv, trans) _MMIO_PIPE2(dev_priv, trans, _HSW_STEREO_3D_CTL_A)
+#define S3D_ENABLE (1 << 31)
+
+#define _PCH_TRANSACONF 0xf0008
+#define _PCH_TRANSBCONF 0xf1008
+#define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
+#define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
+#define TRANS_ENABLE REG_BIT(31)
+#define TRANS_STATE_ENABLE REG_BIT(30)
+#define TRANS_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* ibx */
+#define TRANS_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANS_FRAME_START_DELAY_MASK, (x)) /* ibx: 0-3 */
+#define TRANS_INTERLACE_MASK REG_GENMASK(23, 21)
+#define TRANS_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANS_INTERLACE_MASK, 0)
+#define TRANS_INTERLACE_LEGACY_VSYNC_IBX REG_FIELD_PREP(TRANS_INTERLACE_MASK, 2) /* ibx */
+#define TRANS_INTERLACE_INTERLACED REG_FIELD_PREP(TRANS_INTERLACE_MASK, 3)
+#define TRANS_BPC_MASK REG_GENMASK(7, 5) /* ibx */
+#define TRANS_BPC_8 REG_FIELD_PREP(TRANS_BPC_MASK, 0)
+#define TRANS_BPC_10 REG_FIELD_PREP(TRANS_BPC_MASK, 1)
+#define TRANS_BPC_6 REG_FIELD_PREP(TRANS_BPC_MASK, 2)
+#define TRANS_BPC_12 REG_FIELD_PREP(TRANS_BPC_MASK, 3)
+
+#define PCH_DP_B _MMIO(0xe4100)
+#define PCH_DP_C _MMIO(0xe4200)
+#define PCH_DP_D _MMIO(0xe4300)
+
+/* CPT */
+#define _TRANS_DP_CTL_A 0xe0300
+#define _TRANS_DP_CTL_B 0xe1300
+#define _TRANS_DP_CTL_C 0xe2300
+#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
+#define TRANS_DP_OUTPUT_ENABLE REG_BIT(31)
+#define TRANS_DP_PORT_SEL_MASK REG_GENMASK(30, 29)
+#define TRANS_DP_PORT_SEL_NONE REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, 3)
+#define TRANS_DP_PORT_SEL(port) REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, (port) - PORT_B)
+#define TRANS_DP_AUDIO_ONLY REG_BIT(26)
+#define TRANS_DP_ENH_FRAMING REG_BIT(18)
+#define TRANS_DP_BPC_MASK REG_GENMASK(10, 9)
+#define TRANS_DP_BPC_8 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 0)
+#define TRANS_DP_BPC_10 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 1)
+#define TRANS_DP_BPC_6 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 2)
+#define TRANS_DP_BPC_12 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 3)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH REG_BIT(4)
+#define TRANS_DP_HSYNC_ACTIVE_HIGH REG_BIT(3)
+
+#define _TRANS_DP2_CTL_A 0x600a0
+#define _TRANS_DP2_CTL_B 0x610a0
+#define _TRANS_DP2_CTL_C 0x620a0
+#define _TRANS_DP2_CTL_D 0x630a0
+#define TRANS_DP2_CTL(trans) _MMIO_TRANS(trans, _TRANS_DP2_CTL_A, _TRANS_DP2_CTL_B)
+#define TRANS_DP2_128B132B_CHANNEL_CODING REG_BIT(31)
+#define TRANS_DP2_PANEL_REPLAY_ENABLE REG_BIT(30)
+#define TRANS_DP2_DEBUG_ENABLE REG_BIT(23)
+
+#define _TRANS_DP2_VFREQHIGH_A 0x600a4
+#define _TRANS_DP2_VFREQHIGH_B 0x610a4
+#define _TRANS_DP2_VFREQHIGH_C 0x620a4
+#define _TRANS_DP2_VFREQHIGH_D 0x630a4
+#define TRANS_DP2_VFREQHIGH(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQHIGH_A, _TRANS_DP2_VFREQHIGH_B)
+#define TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK REG_GENMASK(31, 8)
+#define TRANS_DP2_VFREQ_PIXEL_CLOCK(clk_hz) REG_FIELD_PREP(TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK, (clk_hz))
+
+#define _TRANS_DP2_VFREQLOW_A 0x600a8
+#define _TRANS_DP2_VFREQLOW_B 0x610a8
+#define _TRANS_DP2_VFREQLOW_C 0x620a8
+#define _TRANS_DP2_VFREQLOW_D 0x630a8
+#define TRANS_DP2_VFREQLOW(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQLOW_A, _TRANS_DP2_VFREQLOW_B)
+
+#define _DP_MIN_HBLANK_CTL_A 0x600ac
+#define _DP_MIN_HBLANK_CTL_B 0x610ac
+#define DP_MIN_HBLANK_CTL(trans) _MMIO_TRANS(trans, _DP_MIN_HBLANK_CTL_A, _DP_MIN_HBLANK_CTL_B)
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
+/* SNB B-stepping */
+#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1 << 22)
+#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a << 22)
+#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39 << 22)
+#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38 << 22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f << 22)
+
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f << 22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 << 22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e << 22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 << 22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 << 22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 << 22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 << 22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 << 22)
+
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f << 22)
+
+#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C)
+#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
+#define PIXEL_OVERLAP_CNT_SHIFT 30
+
+/*
+ * HSW - ICL power wells
+ *
+ * Platforms have up to 3 power well control register sets, each set
+ * controlling up to 16 power wells via a request/status HW flag tuple:
+ * - main (HSW_PWR_WELL_CTL[1-4])
+ * - AUX (ICL_PWR_WELL_CTL_AUX[1-4])
+ * - DDI (ICL_PWR_WELL_CTL_DDI[1-4])
+ * Each control register set consists of up to 4 registers used by different
+ * sources that can request a power well to be enabled:
+ * - BIOS (HSW_PWR_WELL_CTL1/ICL_PWR_WELL_CTL_AUX1/ICL_PWR_WELL_CTL_DDI1)
+ * - DRIVER (HSW_PWR_WELL_CTL2/ICL_PWR_WELL_CTL_AUX2/ICL_PWR_WELL_CTL_DDI2)
+ * - KVMR (HSW_PWR_WELL_CTL3) (only in the main register set)
+ * - DEBUG (HSW_PWR_WELL_CTL4/ICL_PWR_WELL_CTL_AUX4/ICL_PWR_WELL_CTL_DDI4)
+ */
+#define HSW_PWR_WELL_CTL1 _MMIO(0x45400)
+#define HSW_PWR_WELL_CTL2 _MMIO(0x45404)
+#define HSW_PWR_WELL_CTL3 _MMIO(0x45408)
+#define HSW_PWR_WELL_CTL4 _MMIO(0x4540C)
+#define HSW_PWR_WELL_CTL_REQ(pw_idx) (0x2 << ((pw_idx) * 2))
+#define HSW_PWR_WELL_CTL_STATE(pw_idx) (0x1 << ((pw_idx) * 2))
+
+/* HSW/BDW power well */
+#define HSW_PW_CTL_IDX_GLOBAL 15
+
+/* SKL/BXT/GLK power wells */
+#define SKL_PW_CTL_IDX_PW_2 15
+#define SKL_PW_CTL_IDX_PW_1 14
+#define GLK_PW_CTL_IDX_AUX_C 10
+#define GLK_PW_CTL_IDX_AUX_B 9
+#define GLK_PW_CTL_IDX_AUX_A 8
+#define SKL_PW_CTL_IDX_DDI_D 4
+#define SKL_PW_CTL_IDX_DDI_C 3
+#define SKL_PW_CTL_IDX_DDI_B 2
+#define SKL_PW_CTL_IDX_DDI_A_E 1
+#define GLK_PW_CTL_IDX_DDI_A 1
+#define SKL_PW_CTL_IDX_MISC_IO 0
+
+/* ICL/TGL - power wells */
+#define TGL_PW_CTL_IDX_PW_5 4
+#define ICL_PW_CTL_IDX_PW_4 3
+#define ICL_PW_CTL_IDX_PW_3 2
+#define ICL_PW_CTL_IDX_PW_2 1
+#define ICL_PW_CTL_IDX_PW_1 0
+
+/* XE_LPD - power wells */
+#define XELPD_PW_CTL_IDX_PW_D 8
+#define XELPD_PW_CTL_IDX_PW_C 7
+#define XELPD_PW_CTL_IDX_PW_B 6
+#define XELPD_PW_CTL_IDX_PW_A 5
+
+#define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440)
+#define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444)
+#define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C)
+#define TGL_PW_CTL_IDX_AUX_TBT6 14
+#define TGL_PW_CTL_IDX_AUX_TBT5 13
+#define TGL_PW_CTL_IDX_AUX_TBT4 12
+#define ICL_PW_CTL_IDX_AUX_TBT4 11
+#define TGL_PW_CTL_IDX_AUX_TBT3 11
+#define ICL_PW_CTL_IDX_AUX_TBT3 10
+#define TGL_PW_CTL_IDX_AUX_TBT2 10
+#define ICL_PW_CTL_IDX_AUX_TBT2 9
+#define TGL_PW_CTL_IDX_AUX_TBT1 9
+#define ICL_PW_CTL_IDX_AUX_TBT1 8
+#define TGL_PW_CTL_IDX_AUX_TC6 8
+#define XELPD_PW_CTL_IDX_AUX_E 8
+#define TGL_PW_CTL_IDX_AUX_TC5 7
+#define XELPD_PW_CTL_IDX_AUX_D 7
+#define TGL_PW_CTL_IDX_AUX_TC4 6
+#define ICL_PW_CTL_IDX_AUX_F 5
+#define TGL_PW_CTL_IDX_AUX_TC3 5
+#define ICL_PW_CTL_IDX_AUX_E 4
+#define TGL_PW_CTL_IDX_AUX_TC2 4
+#define ICL_PW_CTL_IDX_AUX_D 3
+#define TGL_PW_CTL_IDX_AUX_TC1 3
+#define ICL_PW_CTL_IDX_AUX_C 2
+#define ICL_PW_CTL_IDX_AUX_B 1
+#define ICL_PW_CTL_IDX_AUX_A 0
+
+#define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450)
+#define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454)
+#define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C)
+#define XELPD_PW_CTL_IDX_DDI_E 8
+#define TGL_PW_CTL_IDX_DDI_TC6 8
+#define XELPD_PW_CTL_IDX_DDI_D 7
+#define TGL_PW_CTL_IDX_DDI_TC5 7
+#define TGL_PW_CTL_IDX_DDI_TC4 6
+#define ICL_PW_CTL_IDX_DDI_F 5
+#define TGL_PW_CTL_IDX_DDI_TC3 5
+#define ICL_PW_CTL_IDX_DDI_E 4
+#define TGL_PW_CTL_IDX_DDI_TC2 4
+#define ICL_PW_CTL_IDX_DDI_D 3
+#define TGL_PW_CTL_IDX_DDI_TC1 3
+#define ICL_PW_CTL_IDX_DDI_C 2
+#define ICL_PW_CTL_IDX_DDI_B 1
+#define ICL_PW_CTL_IDX_DDI_A 0
+
+/* HSW - power well misc debug registers */
+#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
+#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1 << 31)
+#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1 << 20)
+#define HSW_PWR_WELL_FORCE_ON (1 << 19)
+#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
+
+/* SKL Fuse Status */
+enum skl_power_gate {
+ SKL_PG0,
+ SKL_PG1,
+ SKL_PG2,
+ ICL_PG3,
+ ICL_PG4,
+};
+
+#define SKL_FUSE_STATUS _MMIO(0x42000)
+#define SKL_FUSE_DOWNLOAD_STATUS (1 << 31)
+#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
+
+/* Per-pipe DDI Function Control */
+#define _TRANS_DDI_FUNC_CTL_A 0x60400
+#define _TRANS_DDI_FUNC_CTL_B 0x61400
+#define _TRANS_DDI_FUNC_CTL_C 0x62400
+#define _TRANS_DDI_FUNC_CTL_D 0x63400
+#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
+#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
+#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
+#define TRANS_DDI_FUNC_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL_A)
+
+#define TRANS_DDI_FUNC_ENABLE (1 << 31)
+/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
+#define TRANS_DDI_PORT_SHIFT 28
+#define TGL_TRANS_DDI_PORT_SHIFT 27
+#define TRANS_DDI_PORT_MASK (7 << TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT)
+#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT)
+#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
+#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
+#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
+#define TRANS_DDI_MODE_SELECT_DP_SST (2 << 24)
+#define TRANS_DDI_MODE_SELECT_DP_MST (3 << 24)
+#define TRANS_DDI_MODE_SELECT_FDI_OR_128B132B (4 << 24)
+#define TRANS_DDI_BPC_MASK (7 << 20)
+#define TRANS_DDI_BPC_8 (0 << 20)
+#define TRANS_DDI_BPC_10 (1 << 20)
+#define TRANS_DDI_BPC_6 (2 << 20)
+#define TRANS_DDI_BPC_12 (3 << 20)
+#define TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK REG_GENMASK(19, 18)
+#define TRANS_DDI_PORT_SYNC_MASTER_SELECT(x) REG_FIELD_PREP(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, (x))
+#define TRANS_DDI_PVSYNC (1 << 17)
+#define TRANS_DDI_PHSYNC (1 << 16)
+#define TRANS_DDI_PORT_SYNC_ENABLE REG_BIT(15)
+#define XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(15)
+#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
+#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
+#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
+#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
+#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
+#define TRANS_DDI_EDP_INPUT_D_ONOFF (7 << 12)
+#define TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(12)
+#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10)
+#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \
+ REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans)
+#define TRANS_DDI_HDCP_SIGNALLING (1 << 9)
+#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
+#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
+#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
+#define TRANS_DDI_HDCP_SELECT REG_BIT(5)
+#define TRANS_DDI_BFI_ENABLE (1 << 4)
+#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
+#define TRANS_DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
+#define TRANS_DDI_PORT_WIDTH(width) REG_FIELD_PREP(TRANS_DDI_PORT_WIDTH_MASK, (width) - 1)
+#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0)
+#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
+ | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
+ | TRANS_DDI_HDMI_SCRAMBLING)
+
+#define _TRANS_DDI_FUNC_CTL2_A 0x60404
+#define _TRANS_DDI_FUNC_CTL2_B 0x61404
+#define _TRANS_DDI_FUNC_CTL2_C 0x62404
+#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
+#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
+#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
+#define TRANS_DDI_FUNC_CTL2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL2_A)
+#define PORT_SYNC_MODE_ENABLE REG_BIT(4)
+#define CMTG_SECONDARY_MODE REG_BIT(3)
+#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0)
+#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
+
+#define TRANS_CMTG_CHICKEN _MMIO(0x6fa90)
+#define DISABLE_DPT_CLK_GATING REG_BIT(1)
+
+/* DisplayPort Transport Control */
+#define _DP_TP_CTL_A 0x64040
+#define _DP_TP_CTL_B 0x64140
+#define _TGL_DP_TP_CTL_A 0x60540
+#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
+#define TGL_DP_TP_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_CTL_A)
+#define DP_TP_CTL_ENABLE REG_BIT(31)
+#define DP_TP_CTL_FEC_ENABLE REG_BIT(30)
+#define DP_TP_CTL_MODE_MASK REG_BIT(27)
+#define DP_TP_CTL_MODE_SST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 0)
+#define DP_TP_CTL_MODE_MST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 1)
+#define DP_TP_CTL_FORCE_ACT REG_BIT(25)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK REG_GENMASK(20, 19)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 0)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 1)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 2)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE REG_BIT(18)
+#define DP_TP_CTL_FDI_AUTOTRAIN REG_BIT(15)
+#define DP_TP_CTL_LINK_TRAIN_MASK REG_GENMASK(10, 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 0)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 1)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 4)
+#define DP_TP_CTL_LINK_TRAIN_PAT4 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 5)
+#define DP_TP_CTL_LINK_TRAIN_IDLE REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 2)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 3)
+#define DP_TP_CTL_SCRAMBLE_DISABLE REG_BIT(7)
+
+/* DisplayPort Transport Status */
+#define _DP_TP_STATUS_A 0x64044
+#define _DP_TP_STATUS_B 0x64144
+#define _TGL_DP_TP_STATUS_A 0x60544
+#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
+#define TGL_DP_TP_STATUS(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_STATUS_A)
+#define DP_TP_STATUS_FEC_ENABLE_LIVE REG_BIT(28)
+#define DP_TP_STATUS_IDLE_DONE REG_BIT(25)
+#define DP_TP_STATUS_ACT_SENT REG_BIT(24)
+#define DP_TP_STATUS_MODE_STATUS_MST REG_BIT(23)
+#define DP_TP_STATUS_STREAMS_ENABLED_MASK REG_GENMASK(18, 16) /* 17:16 on hsw but bit 18 mbz */
+#define DP_TP_STATUS_AUTOTRAIN_DONE REG_BIT(12)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2_MASK REG_GENMASK(9, 8)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1_MASK REG_GENMASK(5, 4)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0_MASK REG_GENMASK(1, 0)
+
+/* DDI Buffer Control */
+#define _DDI_BUF_CTL_A 0x64000
+#define _DDI_BUF_CTL_B 0x64100
+/* Known as DDI_CTL_DE in MTL+ */
+#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
+#define DDI_BUF_CTL_ENABLE REG_BIT(31)
+#define XE2LPD_DDI_BUF_D2D_LINK_ENABLE REG_BIT(29)
+#define XE2LPD_DDI_BUF_D2D_LINK_STATE REG_BIT(28)
+#define DDI_BUF_EMP_MASK REG_GENMASK(27, 24)
+#define DDI_BUF_TRANS_SELECT(n) REG_FIELD_PREP(DDI_BUF_EMP_MASK, (n))
+#define DDI_BUF_PHY_LINK_RATE_MASK REG_GENMASK(23, 20)
+#define DDI_BUF_PHY_LINK_RATE(r) REG_FIELD_PREP(DDI_BUF_PHY_LINK_RATE_MASK, (r))
+#define DDI_BUF_PORT_DATA_MASK REG_GENMASK(19, 18)
+#define DDI_BUF_PORT_DATA_10BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 0)
+#define DDI_BUF_PORT_DATA_20BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 1)
+#define DDI_BUF_PORT_DATA_40BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 2)
+#define DDI_BUF_PORT_REVERSAL REG_BIT(16)
+#define DDI_BUF_LANE_STAGGER_DELAY_MASK REG_GENMASK(15, 8)
+#define DDI_BUF_LANE_STAGGER_DELAY(symbols) REG_FIELD_PREP(DDI_BUF_LANE_STAGGER_DELAY_MASK, \
+ (symbols))
+#define DDI_BUF_IS_IDLE REG_BIT(7)
+#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
+#define DDI_A_4_LANES REG_BIT(4)
+#define DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
+#define DDI_PORT_WIDTH(width) REG_FIELD_PREP(DDI_PORT_WIDTH_MASK, \
+ ((width) == 3 ? 4 : (width) - 1))
+#define DDI_PORT_WIDTH_SHIFT 1
+#define DDI_INIT_DISPLAY_DETECTED REG_BIT(0)
+
+/* DDI Buffer Translations */
+#define _DDI_BUF_TRANS_A 0x64E00
+#define _DDI_BUF_TRANS_B 0x64E60
+#define DDI_BUF_TRANS_LO(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8)
+#define DDI_BUF_BALANCE_LEG_ENABLE (1 << 31)
+#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
+
+/* DDI DP Compliance Control */
+#define _DDI_DP_COMP_CTL_A 0x605F0
+#define _DDI_DP_COMP_CTL_B 0x615F0
+#define DDI_DP_COMP_CTL(pipe) _MMIO_PIPE(pipe, _DDI_DP_COMP_CTL_A, _DDI_DP_COMP_CTL_B)
+#define DDI_DP_COMP_CTL_ENABLE (1 << 31)
+#define DDI_DP_COMP_CTL_D10_2 (0 << 28)
+#define DDI_DP_COMP_CTL_SCRAMBLED_0 (1 << 28)
+#define DDI_DP_COMP_CTL_PRBS7 (2 << 28)
+#define DDI_DP_COMP_CTL_CUSTOM80 (3 << 28)
+#define DDI_DP_COMP_CTL_HBR2 (4 << 28)
+#define DDI_DP_COMP_CTL_SCRAMBLED_1 (5 << 28)
+#define DDI_DP_COMP_CTL_HBR2_RESET (0xFC << 0)
+
+/* DDI DP Compliance Pattern */
+#define _DDI_DP_COMP_PAT_A 0x605F4
+#define _DDI_DP_COMP_PAT_B 0x615F4
+#define DDI_DP_COMP_PAT(pipe, i) _MMIO(_PIPE(pipe, _DDI_DP_COMP_PAT_A, _DDI_DP_COMP_PAT_B) + (i) * 4)
+
+/* LPT PIXCLK_GATE */
+#define PIXCLK_GATE _MMIO(0xC6020)
+#define PIXCLK_GATE_UNGATE (1 << 0)
+#define PIXCLK_GATE_GATE (0 << 0)
+
+/* SPLL */
+#define SPLL_CTL _MMIO(0x46020)
+#define SPLL_PLL_ENABLE (1 << 31)
+#define SPLL_REF_BCLK (0 << 28)
+#define SPLL_REF_MUXED_SSC (1 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
+#define SPLL_REF_NON_SSC_HSW (2 << 28)
+#define SPLL_REF_PCH_SSC_BDW (2 << 28)
+#define SPLL_REF_LCPLL (3 << 28)
+#define SPLL_REF_MASK (3 << 28)
+#define SPLL_FREQ_810MHz (0 << 26)
+#define SPLL_FREQ_1350MHz (1 << 26)
+#define SPLL_FREQ_2700MHz (2 << 26)
+#define SPLL_FREQ_MASK (3 << 26)
+
+/* WRPLL */
+#define _WRPLL_CTL1 0x46040
+#define _WRPLL_CTL2 0x46060
+#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
+#define WRPLL_PLL_ENABLE (1 << 31)
+#define WRPLL_REF_BCLK (0 << 28)
+#define WRPLL_REF_PCH_SSC (1 << 28)
+#define WRPLL_REF_MUXED_SSC_BDW (2 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
+#define WRPLL_REF_SPECIAL_HSW (2 << 28) /* muxed SSC (ULT), non-SSC (non-ULT) */
+#define WRPLL_REF_LCPLL (3 << 28)
+#define WRPLL_REF_MASK (3 << 28)
+/* WRPLL divider programming */
+#define WRPLL_DIVIDER_REFERENCE(x) ((x) << 0)
+#define WRPLL_DIVIDER_REF_MASK (0xff)
+#define WRPLL_DIVIDER_POST(x) ((x) << 8)
+#define WRPLL_DIVIDER_POST_MASK (0x3f << 8)
+#define WRPLL_DIVIDER_POST_SHIFT 8
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x) << 16)
+#define WRPLL_DIVIDER_FB_SHIFT 16
+#define WRPLL_DIVIDER_FB_MASK (0xff << 16)
+
+/* Port clock selection */
+#define _PORT_CLK_SEL_A 0x46100
+#define _PORT_CLK_SEL_B 0x46104
+#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
+#define PORT_CLK_SEL_MASK REG_GENMASK(31, 29)
+#define PORT_CLK_SEL_LCPLL_2700 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 0)
+#define PORT_CLK_SEL_LCPLL_1350 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 1)
+#define PORT_CLK_SEL_LCPLL_810 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 2)
+#define PORT_CLK_SEL_SPLL REG_FIELD_PREP(PORT_CLK_SEL_MASK, 3)
+#define PORT_CLK_SEL_WRPLL(pll) REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4 + (pll))
+#define PORT_CLK_SEL_WRPLL1 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4)
+#define PORT_CLK_SEL_WRPLL2 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 5)
+#define PORT_CLK_SEL_NONE REG_FIELD_PREP(PORT_CLK_SEL_MASK, 7)
+
+/* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
+#define DDI_CLK_SEL(port) PORT_CLK_SEL(port)
+#define DDI_CLK_SEL_MASK REG_GENMASK(31, 28)
+#define DDI_CLK_SEL_NONE REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x0)
+#define DDI_CLK_SEL_MG REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x8)
+#define DDI_CLK_SEL_TBT_162 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xC)
+#define DDI_CLK_SEL_TBT_270 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xD)
+#define DDI_CLK_SEL_TBT_540 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xE)
+#define DDI_CLK_SEL_TBT_810 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xF)
+
+/* Transcoder clock selection */
+#define _TRANS_CLK_SEL_A 0x46140
+#define _TRANS_CLK_SEL_B 0x46144
+#define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B)
+/* For each transcoder, we need to select the corresponding port clock */
+#define TRANS_CLK_SEL_DISABLED (0x0 << 29)
+#define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29)
+#define TGL_TRANS_CLK_SEL_DISABLED (0x0 << 28)
+#define TGL_TRANS_CLK_SEL_PORT(x) (((x) + 1) << 28)
+
+#define CDCLK_FREQ _MMIO(0x46200)
+
+#define _TRANSA_MSA_MISC 0x60410
+#define _TRANSB_MSA_MISC 0x61410
+#define _TRANSC_MSA_MISC 0x62410
+#define _TRANS_EDP_MSA_MISC 0x6f410
+#define TRANS_MSA_MISC(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANSA_MSA_MISC)
+/* See DP_MSA_MISC_* for the bit definitions */
+
+#define _TRANS_A_SET_CONTEXT_LATENCY 0x6007C
+#define _TRANS_B_SET_CONTEXT_LATENCY 0x6107C
+#define _TRANS_C_SET_CONTEXT_LATENCY 0x6207C
+#define _TRANS_D_SET_CONTEXT_LATENCY 0x6307C
+#define TRANS_SET_CONTEXT_LATENCY(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_A_SET_CONTEXT_LATENCY)
+#define TRANS_SET_CONTEXT_LATENCY_MASK REG_GENMASK(15, 0)
+#define TRANS_SET_CONTEXT_LATENCY_VALUE(x) REG_FIELD_PREP(TRANS_SET_CONTEXT_LATENCY_MASK, (x))
+
+/* LCPLL Control */
+#define LCPLL_CTL _MMIO(0x130040)
+#define LCPLL_PLL_DISABLE (1 << 31)
+#define LCPLL_PLL_LOCK (1 << 30)
+#define LCPLL_REF_NON_SSC (0 << 28)
+#define LCPLL_REF_BCLK (2 << 28)
+#define LCPLL_REF_PCH_SSC (3 << 28)
+#define LCPLL_REF_MASK (3 << 28)
+#define LCPLL_CLK_FREQ_MASK (3 << 26)
+#define LCPLL_CLK_FREQ_450 (0 << 26)
+#define LCPLL_CLK_FREQ_54O_BDW (1 << 26)
+#define LCPLL_CLK_FREQ_337_5_BDW (2 << 26)
+#define LCPLL_CLK_FREQ_675_BDW (3 << 26)
+#define LCPLL_CD_CLOCK_DISABLE (1 << 25)
+#define LCPLL_ROOT_CD_CLOCK_DISABLE (1 << 24)
+#define LCPLL_CD2X_CLOCK_DISABLE (1 << 23)
+#define LCPLL_POWER_DOWN_ALLOW (1 << 22)
+#define LCPLL_CD_SOURCE_FCLK (1 << 21)
+#define LCPLL_CD_SOURCE_FCLK_DONE (1 << 19)
+
+/*
+ * SKL Clocks
+ */
+/* CDCLK_CTL */
+#define CDCLK_CTL _MMIO(0x46000)
+#define CDCLK_FREQ_SEL_MASK REG_GENMASK(27, 26)
+#define CDCLK_FREQ_450_432 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 0)
+#define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1)
+#define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2)
+#define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3)
+#define MDCLK_SOURCE_SEL_MASK REG_GENMASK(25, 25)
+#define MDCLK_SOURCE_SEL_CD2XCLK REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 0)
+#define MDCLK_SOURCE_SEL_CDCLK_PLL REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 1)
+#define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22)
+#define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0)
+#define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1)
+#define BXT_CDCLK_CD2X_DIV_SEL_2 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 2)
+#define BXT_CDCLK_CD2X_DIV_SEL_4 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 3)
+#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
+#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
+#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
+#define ICL_CDCLK_CD2X_PIPE(pipe) (_PICK(pipe, 0, 2, 6) << 19)
+#define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19)
+#define TGL_CDCLK_CD2X_PIPE(pipe) BXT_CDCLK_CD2X_PIPE(pipe)
+#define TGL_CDCLK_CD2X_PIPE_NONE ICL_CDCLK_CD2X_PIPE_NONE
+#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
+#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
+
+/* CDCLK_SQUASH_CTL */
+#define CDCLK_SQUASH_CTL _MMIO(0x46008)
+#define CDCLK_SQUASH_ENABLE REG_BIT(31)
+#define CDCLK_SQUASH_WINDOW_SIZE_MASK REG_GENMASK(27, 24)
+#define CDCLK_SQUASH_WINDOW_SIZE(x) REG_FIELD_PREP(CDCLK_SQUASH_WINDOW_SIZE_MASK, (x))
+#define CDCLK_SQUASH_WAVEFORM_MASK REG_GENMASK(15, 0)
+#define CDCLK_SQUASH_WAVEFORM(x) REG_FIELD_PREP(CDCLK_SQUASH_WAVEFORM_MASK, (x))
+
+/* LCPLL_CTL */
+#define LCPLL1_CTL _MMIO(0x46010)
+#define LCPLL2_CTL _MMIO(0x46014)
+#define LCPLL_PLL_ENABLE (1 << 31)
+
+/* DPLL control1 */
+#define DPLL_CTRL1 _MMIO(0x6C058)
+#define DPLL_CTRL1_HDMI_MODE(id) (1 << ((id) * 6 + 5))
+#define DPLL_CTRL1_SSC(id) (1 << ((id) * 6 + 4))
+#define DPLL_CTRL1_LINK_RATE_MASK(id) (7 << ((id) * 6 + 1))
+#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id) * 6 + 1)
+#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate) << ((id) * 6 + 1))
+#define DPLL_CTRL1_OVERRIDE(id) (1 << ((id) * 6))
+#define DPLL_CTRL1_LINK_RATE_2700 0
+#define DPLL_CTRL1_LINK_RATE_1350 1
+#define DPLL_CTRL1_LINK_RATE_810 2
+#define DPLL_CTRL1_LINK_RATE_1620 3
+#define DPLL_CTRL1_LINK_RATE_1080 4
+#define DPLL_CTRL1_LINK_RATE_2160 5
+
+/* DPLL control2 */
+#define DPLL_CTRL2 _MMIO(0x6C05C)
+#define DPLL_CTRL2_DDI_CLK_OFF(port) (1 << ((port) + 15))
+#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3 << ((port) * 3 + 1))
+#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port) * 3 + 1)
+#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk) << ((port) * 3 + 1))
+#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1 << ((port) * 3))
+
+/* DPLL Status */
+#define DPLL_STATUS _MMIO(0x6C060)
+#define DPLL_LOCK(id) (1 << ((id) * 8))
+
+/* DPLL cfg */
+#define _DPLL1_CFGCR1 0x6C040
+#define _DPLL2_CFGCR1 0x6C048
+#define _DPLL3_CFGCR1 0x6C050
+#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
+#define DPLL_CFGCR1_FREQ_ENABLE (1 << 31)
+#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9)
+#define DPLL_CFGCR1_DCO_FRACTION(x) ((x) << 9)
+#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
+
+#define _DPLL1_CFGCR2 0x6C044
+#define _DPLL2_CFGCR2 0x6C04C
+#define _DPLL3_CFGCR2 0x6C054
+#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
+#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff << 8)
+#define DPLL_CFGCR2_QDIV_RATIO(x) ((x) << 8)
+#define DPLL_CFGCR2_QDIV_MODE(x) ((x) << 7)
+#define DPLL_CFGCR2_KDIV_MASK (3 << 5)
+#define DPLL_CFGCR2_KDIV(x) ((x) << 5)
+#define DPLL_CFGCR2_KDIV_5 (0 << 5)
+#define DPLL_CFGCR2_KDIV_2 (1 << 5)
+#define DPLL_CFGCR2_KDIV_3 (2 << 5)
+#define DPLL_CFGCR2_KDIV_1 (3 << 5)
+#define DPLL_CFGCR2_PDIV_MASK (7 << 2)
+#define DPLL_CFGCR2_PDIV(x) ((x) << 2)
+#define DPLL_CFGCR2_PDIV_1 (0 << 2)
+#define DPLL_CFGCR2_PDIV_2 (1 << 2)
+#define DPLL_CFGCR2_PDIV_3 (2 << 2)
+#define DPLL_CFGCR2_PDIV_7 (4 << 2)
+#define DPLL_CFGCR2_PDIV_7_INVALID (5 << 2)
+#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
+
+/* ICL Clocks */
+#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24, 4, 5))
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10)
+#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < TC_PORT_4 ? \
+ (tc_port) + 12 : \
+ (tc_port) - TC_PORT_4 + 21))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2)
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) _PICK(phy, 0, 2, 4, 27)
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) \
+ (3 << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) \
+ ((pll) << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+
+/*
+ * DG1 Clocks
+ * First registers controls the first A and B, while the second register
+ * controls the phy C and D. The bits on these registers are the
+ * same, but refer to different phys
+ */
+#define _DG1_DPCLKA_CFGCR0 0x164280
+#define _DG1_DPCLKA1_CFGCR0 0x16C280
+#define _DG1_DPCLKA_PHY_IDX(phy) ((phy) % 2)
+#define _DG1_DPCLKA_PLL_IDX(pll) ((pll) % 2)
+#define DG1_DPCLKA_CFGCR0(phy) _MMIO_PHY((phy) / 2, \
+ _DG1_DPCLKA_CFGCR0, \
+ _DG1_DPCLKA1_CFGCR0)
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT(_DG1_DPCLKA_PHY_IDX(phy) + 10)
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) (_DG1_DPCLKA_PHY_IDX(phy) * 2)
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) (_DG1_DPCLKA_PLL_IDX(pll) << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (0x3 << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+
+/* ADLS Clocks */
+#define _ADLS_DPCLKA_CFGCR0 0x164280
+#define _ADLS_DPCLKA_CFGCR1 0x1642BC
+#define ADLS_DPCLKA_CFGCR(phy) _MMIO_PHY((phy) / 3, \
+ _ADLS_DPCLKA_CFGCR0, \
+ _ADLS_DPCLKA_CFGCR1)
+#define ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy) (((phy) % 3) * 2)
+/* ADLS DPCLKA_CFGCR0 DDI mask */
+#define ADLS_DPCLKA_DDII_SEL_MASK REG_GENMASK(5, 4)
+#define ADLS_DPCLKA_DDIB_SEL_MASK REG_GENMASK(3, 2)
+#define ADLS_DPCLKA_DDIA_SEL_MASK REG_GENMASK(1, 0)
+/* ADLS DPCLKA_CFGCR1 DDI mask */
+#define ADLS_DPCLKA_DDIK_SEL_MASK REG_GENMASK(3, 2)
+#define ADLS_DPCLKA_DDIJ_SEL_MASK REG_GENMASK(1, 0)
+#define ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy) _PICK((phy), \
+ ADLS_DPCLKA_DDIA_SEL_MASK, \
+ ADLS_DPCLKA_DDIB_SEL_MASK, \
+ ADLS_DPCLKA_DDII_SEL_MASK, \
+ ADLS_DPCLKA_DDIJ_SEL_MASK, \
+ ADLS_DPCLKA_DDIK_SEL_MASK)
+
+/* ICL PLL */
+#define _DPLL0_ENABLE 0x46010
+#define _DPLL1_ENABLE 0x46014
+#define _ADLS_DPLL2_ENABLE 0x46018
+#define _ADLS_DPLL3_ENABLE 0x46030
+#define PLL_ENABLE REG_BIT(31)
+#define PLL_LOCK REG_BIT(30)
+#define PLL_POWER_ENABLE REG_BIT(27)
+#define PLL_POWER_STATE REG_BIT(26)
+#define ICL_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _ADLS_DPLL3_ENABLE, _ADLS_DPLL3_ENABLE))
+
+#define _DG2_PLL3_ENABLE 0x4601C
+
+#define DG2_PLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _DG2_PLL3_ENABLE, _DG2_PLL3_ENABLE))
+
+#define TBT_PLL_ENABLE _MMIO(0x46020)
+
+#define _MG_PLL1_ENABLE 0x46030
+#define _MG_PLL2_ENABLE 0x46034
+#define _MG_PLL3_ENABLE 0x46038
+#define _MG_PLL4_ENABLE 0x4603C
+/* Bits are the same as _DPLL0_ENABLE */
+#define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \
+ _MG_PLL2_ENABLE)
+
+/* DG1 PLL */
+#define DG1_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _MG_PLL1_ENABLE, _MG_PLL2_ENABLE))
+
+/* ADL-P Type C PLL */
+#define PORTTC1_PLL_ENABLE 0x46038
+#define PORTTC2_PLL_ENABLE 0x46040
+#define ADLP_PORTTC_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), \
+ PORTTC1_PLL_ENABLE, \
+ PORTTC2_PLL_ENABLE)
+
+#define _ICL_DPLL0_CFGCR0 0x164000
+#define _ICL_DPLL1_CFGCR0 0x164080
+#define ICL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR0, \
+ _ICL_DPLL1_CFGCR0)
+#define DPLL_CFGCR0_HDMI_MODE (1 << 30)
+#define DPLL_CFGCR0_SSC_ENABLE (1 << 29)
+#define DPLL_CFGCR0_SSC_ENABLE_ICL (1 << 25)
+#define DPLL_CFGCR0_LINK_RATE_MASK (0xf << 25)
+#define DPLL_CFGCR0_LINK_RATE_2700 (0 << 25)
+#define DPLL_CFGCR0_LINK_RATE_1350 (1 << 25)
+#define DPLL_CFGCR0_LINK_RATE_810 (2 << 25)
+#define DPLL_CFGCR0_LINK_RATE_1620 (3 << 25)
+#define DPLL_CFGCR0_LINK_RATE_1080 (4 << 25)
+#define DPLL_CFGCR0_LINK_RATE_2160 (5 << 25)
+#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
+#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
+#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
+#define DPLL_CFGCR0_DCO_FRACTION_SHIFT (10)
+#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
+#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
+
+#define _ICL_DPLL0_CFGCR1 0x164004
+#define _ICL_DPLL1_CFGCR1 0x164084
+#define ICL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \
+ _ICL_DPLL1_CFGCR1)
+#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
+#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
+#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
+#define DPLL_CFGCR1_QDIV_MODE_SHIFT (9)
+#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
+#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
+#define DPLL_CFGCR1_KDIV_SHIFT (6)
+#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
+#define DPLL_CFGCR1_KDIV_1 (1 << 6)
+#define DPLL_CFGCR1_KDIV_2 (2 << 6)
+#define DPLL_CFGCR1_KDIV_3 (4 << 6)
+#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
+#define DPLL_CFGCR1_PDIV_SHIFT (2)
+#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
+#define DPLL_CFGCR1_PDIV_2 (1 << 2)
+#define DPLL_CFGCR1_PDIV_3 (2 << 2)
+#define DPLL_CFGCR1_PDIV_5 (4 << 2)
+#define DPLL_CFGCR1_PDIV_7 (8 << 2)
+#define DPLL_CFGCR1_CENTRAL_FREQ (3 << 0)
+#define DPLL_CFGCR1_CENTRAL_FREQ_8400 (3 << 0)
+#define TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL (0 << 0)
+
+#define _TGL_DPLL0_CFGCR0 0x164284
+#define _TGL_DPLL1_CFGCR0 0x16428C
+#define _TGL_TBTPLL_CFGCR0 0x16429C
+#define TGL_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _TGL_TBTPLL_CFGCR0, _TGL_TBTPLL_CFGCR0))
+#define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \
+ _TGL_DPLL1_CFGCR0)
+
+#define _TGL_DPLL0_DIV0 0x164B00
+#define _TGL_DPLL1_DIV0 0x164C00
+#define TGL_DPLL0_DIV0(pll) _MMIO_PLL(pll, _TGL_DPLL0_DIV0, _TGL_DPLL1_DIV0)
+#define TGL_DPLL0_DIV0_AFC_STARTUP_MASK REG_GENMASK(27, 25)
+#define TGL_DPLL0_DIV0_AFC_STARTUP(val) REG_FIELD_PREP(TGL_DPLL0_DIV0_AFC_STARTUP_MASK, (val))
+
+#define _TGL_DPLL0_CFGCR1 0x164288
+#define _TGL_DPLL1_CFGCR1 0x164290
+#define _TGL_TBTPLL_CFGCR1 0x1642A0
+#define TGL_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _TGL_TBTPLL_CFGCR1, _TGL_TBTPLL_CFGCR1))
+#define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \
+ _TGL_DPLL1_CFGCR1)
+
+#define _DG1_DPLL2_CFGCR0 0x16C284
+#define _DG1_DPLL3_CFGCR0 0x16C28C
+#define DG1_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _DG1_DPLL2_CFGCR0, _DG1_DPLL3_CFGCR0))
+
+#define _DG1_DPLL2_CFGCR1 0x16C288
+#define _DG1_DPLL3_CFGCR1 0x16C290
+#define DG1_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _DG1_DPLL2_CFGCR1, _DG1_DPLL3_CFGCR1))
+
+/* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */
+#define _ADLS_DPLL4_CFGCR0 0x164294
+#define _ADLS_DPLL3_CFGCR0 0x1642C0
+#define ADLS_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _ADLS_DPLL4_CFGCR0, _ADLS_DPLL3_CFGCR0))
+
+#define _ADLS_DPLL4_CFGCR1 0x164298
+#define _ADLS_DPLL3_CFGCR1 0x1642C4
+#define ADLS_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _ADLS_DPLL4_CFGCR1, _ADLS_DPLL3_CFGCR1))
+
+/* BXT display engine PLL */
+#define BXT_DE_PLL_CTL _MMIO(0x6d000)
+#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
+#define BXT_DE_PLL_RATIO_MASK 0xff
+
+#define BXT_DE_PLL_ENABLE _MMIO(0x46070)
+#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
+#define BXT_DE_PLL_LOCK (1 << 30)
+#define BXT_DE_PLL_FREQ_REQ (1 << 23)
+#define BXT_DE_PLL_FREQ_REQ_ACK (1 << 22)
+#define ICL_CDCLK_PLL_RATIO(x) (x)
+#define ICL_CDCLK_PLL_RATIO_MASK 0xff
+
+/* GEN9 DC */
+#define DC_STATE_EN _MMIO(0x45504)
+#define DC_STATE_DISABLE 0
+#define DC_STATE_EN_DC3CO REG_BIT(30)
+#define DC_STATE_DC3CO_STATUS REG_BIT(29)
+#define HOLD_PHY_CLKREQ_PG1_LATCH REG_BIT(21)
+#define HOLD_PHY_PG1_LATCH REG_BIT(20)
+#define DC_STATE_EN_UPTO_DC5 (1 << 0)
+#define DC_STATE_EN_DC9 (1 << 3)
+#define DC_STATE_EN_UPTO_DC6 (2 << 0)
+#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
+
+#define DC_STATE_DEBUG _MMIO(0x45520)
+#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
+#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
+
+#define D_COMP_BDW _MMIO(0x138144)
+
+/* Pipe WM_LINETIME - watermark line time */
+#define _WM_LINETIME_A 0x45270
+#define _WM_LINETIME_B 0x45274
+#define WM_LINETIME(pipe) _MMIO_PIPE(pipe, _WM_LINETIME_A, _WM_LINETIME_B)
+#define HSW_LINETIME_MASK REG_GENMASK(8, 0)
+#define HSW_LINETIME(x) REG_FIELD_PREP(HSW_LINETIME_MASK, (x))
+#define HSW_IPS_LINETIME_MASK REG_GENMASK(24, 16)
+#define HSW_IPS_LINETIME(x) REG_FIELD_PREP(HSW_IPS_LINETIME_MASK, (x))
+
+/* SFUSE_STRAP */
+#define SFUSE_STRAP _MMIO(0xc2014)
+#define SFUSE_STRAP_FUSE_LOCK (1 << 13)
+#define SFUSE_STRAP_RAW_FREQUENCY (1 << 8)
+#define SFUSE_STRAP_DISPLAY_DISABLED (1 << 7)
+#define SFUSE_STRAP_CRT_DISABLED (1 << 6)
+#define SFUSE_STRAP_DDIF_DETECTED (1 << 3)
+#define SFUSE_STRAP_DDIB_DETECTED (1 << 2)
+#define SFUSE_STRAP_DDIC_DETECTED (1 << 1)
+#define SFUSE_STRAP_DDID_DETECTED (1 << 0)
+
+/* Gen4+ Timestamp and Pipe Frame time stamp registers */
+#define GEN4_TIMESTAMP _MMIO(0x2358)
+#define ILK_TIMESTAMP_HI _MMIO(0x70070)
+#define IVB_TIMESTAMP_CTR _MMIO(0x44070)
+
+/* g4x+, except vlv/chv! */
+#define _PIPE_FRMTMSTMP_A 0x70048
+#define _PIPE_FRMTMSTMP_B 0x71048
+#define PIPE_FRMTMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FRMTMSTMP_A, _PIPE_FRMTMSTMP_B)
+
+/* g4x+, except vlv/chv! */
+#define _PIPE_FLIPTMSTMP_A 0x7004C
+#define _PIPE_FLIPTMSTMP_B 0x7104C
+#define PIPE_FLIPTMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FLIPTMSTMP_A, _PIPE_FLIPTMSTMP_B)
+
+/* tgl+ */
+#define _PIPE_FLIPDONETMSTMP_A 0x70054
+#define _PIPE_FLIPDONETMSTMP_B 0x71054
+#define PIPE_FLIPDONETIMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FLIPDONETMSTMP_A, _PIPE_FLIPDONETMSTMP_B)
+
+#define _VLV_PIPE_MSA_MISC_A 0x70048
+#define VLV_PIPE_MSA_MISC(__display, pipe) \
+ _MMIO_PIPE2(__display, pipe, _VLV_PIPE_MSA_MISC_A)
+#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31)
+#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */
+
+#define _ICL_PHY_MISC_A 0x64C00
+#define _ICL_PHY_MISC_B 0x64C04
+#define _DG2_PHY_MISC_TC1 0x64C14 /* TC1="PHY E" but offset as if "PHY F" */
+#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, _ICL_PHY_MISC_B)
+#define DG2_PHY_MISC(port) ((port) == PHY_E ? _MMIO(_DG2_PHY_MISC_TC1) : \
+ ICL_PHY_MISC(port))
+#define ICL_PHY_MISC_MUX_DDID (1 << 28)
+#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
+#define DG2_PHY_DP_TX_ACK_MASK REG_GENMASK(23, 20)
+
+#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
+#define MODULAR_FIA_MASK (1 << 4)
+#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6))
+#define TC_LIVE_STATE_TC(idx) (1 << ((idx) * 8 + 5))
+#define DP_LANE_ASSIGNMENT_SHIFT(idx) ((idx) * 8)
+#define DP_LANE_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 8))
+#define DP_LANE_ASSIGNMENT(idx, x) ((x) << ((idx) * 8))
+
+#define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890)
+#define DP_PHY_MODE_STATUS_COMPLETED(idx) (1 << (idx))
+
+#define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894)
+#define DP_PHY_MODE_STATUS_NOT_SAFE(idx) (1 << (idx))
+
+#define PORT_TX_DFLEXPA1(fia) _MMIO_FIA((fia), 0x00880)
+#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4)
+#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
+#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
+
+#define _TCSS_DDI_STATUS_1 0x161500
+#define _TCSS_DDI_STATUS_2 0x161504
+#define TCSS_DDI_STATUS(tc) _MMIO(_PICK_EVEN(tc, \
+ _TCSS_DDI_STATUS_1, \
+ _TCSS_DDI_STATUS_2))
+#define TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK REG_GENMASK(28, 25)
+#define TCSS_DDI_STATUS_READY REG_BIT(2)
+#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT REG_BIT(1)
+#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT REG_BIT(0)
+
+#define CLKREQ_POLICY _MMIO(0x101038)
+#define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1)
+
+#define CLKGATE_DIS_MISC _MMIO(0x46534)
+#define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21)
+
+#define _MTL_CLKGATE_DIS_TRANS_A 0x604E8
+#define _MTL_CLKGATE_DIS_TRANS_B 0x614E8
+#define MTL_CLKGATE_DIS_TRANS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A)
+#define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7)
+
+#define _MTL_PIPE_CLKGATE_DIS2_A 0x60114
+#define _MTL_PIPE_CLKGATE_DIS2_B 0x61114
+#define MTL_PIPE_CLKGATE_DIS2(pipe) _MMIO_PIPE(pipe, _MTL_PIPE_CLKGATE_DIS2_A, _MTL_PIPE_CLKGATE_DIS2_B)
+#define MTL_DPFC_GATING_DIS REG_BIT(6)
+
+#define MTL_MEM_SS_INFO_QGV_POINT_OFFSET 0x45710
+#define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8)
+#define MTL_TRCD_MASK REG_GENMASK(31, 24)
+#define MTL_TRP_MASK REG_GENMASK(23, 16)
+#define MTL_DCLK_MASK REG_GENMASK(15, 0)
+
+#define MTL_MEM_SS_INFO_QGV_POINT_HIGH(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8 + 4)
+#define MTL_TRAS_MASK REG_GENMASK(16, 8)
+#define MTL_TRDPRE_MASK REG_GENMASK(7, 0)
+
+
+
+#endif /* __INTEL_DISPLAY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index 1dbd3e841df3..f5f38dca14d7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -8,6 +8,7 @@
#include "i915_drv.h"
#include "intel_clock_gating.h"
#include "intel_cx0_phy.h"
+#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_reset.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.c b/drivers/gpu/drm/i915/display/intel_display_rpm.c
index 48da67dd0136..56c4024201c1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rpm.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.c
@@ -2,6 +2,7 @@
/* Copyright © 2025 Intel Corporation */
#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_runtime_pm.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c
index 678b24115951..82ea1ec482e4 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.c
@@ -9,6 +9,7 @@
#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_display_core.h"
#include "intel_display_irq.h"
#include "intel_display_rps.h"
#include "intel_display_types.h"
@@ -45,12 +46,13 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
struct dma_fence *fence)
{
+ struct intel_display *display = to_intel_display(crtc->dev);
struct wait_rps_boost *wait;
if (!dma_fence_is_i915(fence))
return;
- if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
+ if (DISPLAY_VER(display) < 6)
return;
if (drm_crtc_vblank_get(crtc))
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index d6d0440dcee9..ce45261c4a8f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -37,6 +37,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_panel.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank_work.h>
#include <drm/intel/i915_hdcp_interface.h>
@@ -145,6 +146,8 @@ struct intel_framebuffer {
unsigned int min_alignment;
unsigned int vtd_guard;
+
+ unsigned int (*panic_tiling)(unsigned int x, unsigned int y, unsigned int width);
};
enum intel_hotplug_state {
@@ -384,6 +387,9 @@ struct intel_vbt_panel_data {
};
struct intel_panel {
+ /* Simple drm_panel */
+ struct drm_panel *base;
+
/* Fixed EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
const struct drm_edid *fixed_edid;
@@ -550,6 +556,10 @@ struct intel_connector {
struct intel_dp *dp;
} mst;
+ struct {
+ int force_bpp_x16;
+ } link;
+
/* Work struct to schedule a uevent on link train failure */
struct work_struct modeset_retry_work;
@@ -591,7 +601,7 @@ struct intel_atomic_state {
bool dpll_set, modeset;
- struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
+ struct intel_dpll_state dpll_state[I915_NUM_PLLS];
struct intel_dp_tunnel_inherited_state *inherited_dp_tunnels;
@@ -1075,8 +1085,8 @@ struct intel_crtc_state {
* haswell. */
struct dpll dpll;
- /* Selected dpll when shared or NULL. */
- struct intel_shared_dpll *shared_dpll;
+ /* Selected dpll or NULL. */
+ struct intel_dpll *intel_dpll;
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
@@ -1086,7 +1096,7 @@ struct intel_crtc_state {
* setting shared_dpll and dpll_hw_state to one of these reserved ones.
*/
struct icl_port_dpll {
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
struct intel_dpll_hw_state hw_state;
} icl_port_dplls[ICL_PORT_DPLL_COUNT];
@@ -1293,8 +1303,9 @@ struct intel_crtc_state {
enum transcoder mst_master_transcoder;
/* For DSB based pipe updates */
- struct intel_dsb *dsb_color_vblank, *dsb_commit;
+ struct intel_dsb *dsb_color, *dsb_commit;
bool use_dsb;
+ bool use_flipq;
u32 psr2_man_track_ctl;
@@ -1361,6 +1372,21 @@ struct intel_pipe_crc {
enum intel_pipe_crc_source source;
};
+enum intel_flipq_id {
+ INTEL_FLIPQ_PLANE_1,
+ INTEL_FLIPQ_PLANE_2,
+ INTEL_FLIPQ_PLANE_3,
+ INTEL_FLIPQ_GENERAL,
+ INTEL_FLIPQ_FAST,
+ MAX_INTEL_FLIPQ,
+};
+
+struct intel_flipq {
+ u32 start_mmioaddr;
+ enum intel_flipq_id flipq_id;
+ u8 tail;
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -1387,11 +1413,15 @@ struct intel_crtc {
struct drm_pending_vblank_event *flip_done_event;
/* armed event for DSB based updates */
struct drm_pending_vblank_event *dsb_event;
+ /* armed event for flip queue based updates */
+ struct drm_pending_vblank_event *flipq_event;
/* Access to these should be protected by display->irq.lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
+ struct intel_flipq flipq[MAX_INTEL_FLIPQ];
+
/* per-pipe watermark state */
struct {
/* watermarks currently being used */
@@ -1513,6 +1543,8 @@ struct intel_plane {
bool async_flip);
void (*enable_flip_done)(struct intel_plane *plane);
void (*disable_flip_done)(struct intel_plane *plane);
+ /* For drm_panic */
+ void (*disable_tiling)(struct intel_plane *plane);
};
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
@@ -1665,7 +1697,9 @@ struct intel_dp {
bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
- u8 pr_dpcd;
+ u8 pr_dpcd[DP_PANEL_REPLAY_CAP_SIZE];
+#define INTEL_PR_DPCD_INDEX(pr_dpcd_register) ((pr_dpcd_register) - DP_PANEL_REPLAY_CAP_SUPPORT)
+
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE];
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index da429c332914..f57280e9d041 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -6,6 +6,7 @@
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_core.h"
+#include "intel_display_regs.h"
#include "intel_display_wa.h"
static void gen11_display_wa_apply(struct intel_display *display)
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
index dad7192132ad..35e919eae369 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
@@ -25,7 +25,9 @@ dkl_phy_set_hip_idx(struct intel_display *display, struct intel_dkl_phy_reg reg)
{
enum tc_port tc_port = DKL_REG_TC_PORT(reg);
- drm_WARN_ON(display->drm, tc_port < TC_PORT_1 || tc_port >= I915_MAX_TC_PORTS);
+ if (drm_WARN_ON(display->drm,
+ tc_port < TC_PORT_1 || tc_port >= I915_MAX_TC_PORTS))
+ return;
intel_de_write(display,
HIP_INDEX_REG(tc_port),
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h b/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h
index 56085b32956d..f8ffeec29e93 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+#include "intel_display_reg_defs.h"
+
struct intel_dkl_phy_reg {
u32 reg:24;
u32 bank_idx:4;
@@ -151,6 +153,7 @@ struct intel_dkl_phy_reg {
#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK, (val))
#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK REG_GENMASK(6, 5)
#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, (val))
+#define LOADGEN_SHARING_PMD_DISABLE REG_BIT(12)
#define _DKL_TX_FW_CALIB_LN0 0x02F8
#define _DKL_TX_FW_CALIB_LN1 0x12F8
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index b58189d24e7e..744f51c0eab8 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -24,14 +24,22 @@
#include <linux/debugfs.h>
#include <linux/firmware.h>
+#include <drm/drm_vblank.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
+#include "intel_crtc.h"
#include "intel_de.h"
-#include "intel_display_rpm.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
+#include "intel_display_rpm.h"
+#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
+#include "intel_flipq.h"
#include "intel_step.h"
/**
@@ -176,7 +184,8 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size
const char *fw_path = NULL;
u32 max_fw_size = 0;
- if (DISPLAY_VERx100(display) == 3000) {
+ if (DISPLAY_VERx100(display) == 3002 ||
+ DISPLAY_VERx100(display) == 3000) {
fw_path = XE3LPD_DMC_PATH;
max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VERx100(display) == 2000) {
@@ -425,29 +434,26 @@ static void disable_event_handler(struct intel_display *display,
REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
DMC_EVT_CTL_TYPE_EDGE_0_1) |
REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_FALSE));
+ DMC_EVENT_FALSE));
intel_de_write(display, htp_reg, 0);
}
-static void disable_all_event_handlers(struct intel_display *display)
+static void disable_all_event_handlers(struct intel_display *display,
+ enum intel_dmc_id dmc_id)
{
- enum intel_dmc_id dmc_id;
+ int handler;
/* TODO: disable the event handlers on pre-GEN12 platforms as well */
if (DISPLAY_VER(display) < 12)
return;
- for_each_dmc_id(dmc_id) {
- int handler;
-
- if (!has_dmc_id_fw(display, dmc_id))
- continue;
+ if (!has_dmc_id_fw(display, dmc_id))
+ return;
- for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
- disable_event_handler(display,
- DMC_EVT_CTL(display, dmc_id, handler),
- DMC_EVT_HTP(display, dmc_id, handler));
- }
+ for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
+ disable_event_handler(display,
+ DMC_EVT_CTL(display, dmc_id, handler),
+ DMC_EVT_HTP(display, dmc_id, handler));
}
static void adlp_pipedmc_clock_gating_wa(struct intel_display *display, bool enable)
@@ -479,89 +485,36 @@ static void mtl_pipedmc_clock_gating_wa(struct intel_display *display)
* for pipe A and B.
*/
intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0,
- MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B);
+ MTL_PIPEDMC_GATING_DIS(PIPE_A) |
+ MTL_PIPEDMC_GATING_DIS(PIPE_B));
}
static void pipedmc_clock_gating_wa(struct intel_display *display, bool enable)
{
- if (DISPLAY_VER(display) >= 14 && enable)
+ if (display->platform.meteorlake && enable)
mtl_pipedmc_clock_gating_wa(display);
else if (DISPLAY_VER(display) == 13)
adlp_pipedmc_clock_gating_wa(display, enable);
}
-void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe)
+static u32 pipedmc_interrupt_mask(struct intel_display *display)
{
- enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
-
- if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
- return;
-
- if (DISPLAY_VER(display) >= 14)
- intel_de_rmw(display, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe));
- else
- intel_de_rmw(display, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE);
-}
-
-void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe)
-{
- enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
-
- if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
- return;
-
- if (DISPLAY_VER(display) >= 14)
- intel_de_rmw(display, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0);
- else
- intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
-}
-
-/**
- * intel_dmc_block_pkgc() - block PKG C-state
- * @display: display instance
- * @pipe: pipe which register use to block
- * @block: block/unblock
- *
- * This interface is target for Wa_16025596647 usage. I.e. to set/clear
- * PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS bit in PIPEDMC_BLOCK_PKGC_SW register.
- */
-void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe,
- bool block)
-{
- intel_de_rmw(display, PIPEDMC_BLOCK_PKGC_SW(pipe),
- PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS, block ?
- PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS : 0);
+ /*
+ * FIXME PIPEDMC_ERROR not enabled for now due to LNL pipe B
+ * triggering it during the first DC state transition. Figure
+ * out what is going on...
+ */
+ return PIPEDMC_FLIPQ_PROG_DONE |
+ PIPEDMC_GTT_FAULT |
+ PIPEDMC_ATS_FAULT;
}
-/**
- * intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank() - start of PKG
- * C-state exit
- * @display: display instance
- * @pipe: pipe which register use to block
- * @enable: enable/disable
- *
- * This interface is target for Wa_16025596647 usage. I.e. start the package C
- * exit at the start of the undelayed vblank
- */
-void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
- enum pipe pipe, bool enable)
+static u32 dmc_evt_ctl_disable(void)
{
- u32 val;
-
- if (enable)
- val = DMC_EVT_CTL_ENABLE | DMC_EVT_CTL_RECURRING |
- REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1) |
- REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_VBLANK_A);
- else
- val = REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_FALSE) |
- REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1);
-
- intel_de_write(display, MTL_PIPEDMC_EVT_CTL_4(pipe),
- val);
+ return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVENT_FALSE);
}
static bool is_dmc_evt_ctl_reg(struct intel_display *display,
@@ -584,6 +537,15 @@ static bool is_dmc_evt_htp_reg(struct intel_display *display,
return offset >= start && offset < end;
}
+static bool is_event_handler(struct intel_display *display,
+ enum intel_dmc_id dmc_id,
+ unsigned int event_id,
+ i915_reg_t reg, u32 data)
+{
+ return is_dmc_evt_ctl_reg(display, dmc_id, reg) &&
+ REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;
+}
+
static bool disable_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
@@ -597,12 +559,12 @@ static bool disable_dmc_evt(struct intel_display *display,
/* also disable the flip queue event on the main DMC on TGL */
if (display->platform.tigerlake &&
- REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_CLK_MSEC)
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_CLK_MSEC, reg, data))
return true;
/* also disable the HRR event on the main DMC on TGL/ADLS */
if ((display->platform.tigerlake || display->platform.alderlake_s) &&
- REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_VBLANK_A)
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg, data))
return true;
return false;
@@ -615,14 +577,267 @@ static u32 dmc_mmiodata(struct intel_display *display,
if (disable_dmc_evt(display, dmc_id,
dmc->dmc_info[dmc_id].mmioaddr[i],
dmc->dmc_info[dmc_id].mmiodata[i]))
- return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1) |
- REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_FALSE);
+ return dmc_evt_ctl_disable();
else
return dmc->dmc_info[dmc_id].mmiodata[i];
}
+static void dmc_load_mmio(struct intel_display *display, enum intel_dmc_id dmc_id)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ int i;
+
+ for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
+ intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i],
+ dmc_mmiodata(display, dmc, dmc_id, i));
+ }
+}
+
+static void dmc_load_program(struct intel_display *display, enum intel_dmc_id dmc_id)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ int i;
+
+ disable_all_event_handlers(display, dmc_id);
+
+ preempt_disable();
+
+ for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
+ intel_de_write_fw(display,
+ DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i),
+ dmc->dmc_info[dmc_id].payload[i]);
+ }
+
+ preempt_enable();
+
+ dmc_load_mmio(display, dmc_id);
+}
+
+static void assert_dmc_loaded(struct intel_display *display,
+ enum intel_dmc_id dmc_id)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ u32 expected, found;
+ int i;
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
+ return;
+
+ found = intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, 0));
+ expected = dmc->dmc_info[dmc_id].payload[0];
+
+ drm_WARN(display->drm, found != expected,
+ "DMC %d program storage start incorrect (expected 0x%x, current 0x%x)\n",
+ dmc_id, expected, found);
+
+ for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
+ i915_reg_t reg = dmc->dmc_info[dmc_id].mmioaddr[i];
+
+ found = intel_de_read(display, reg);
+ expected = dmc_mmiodata(display, dmc, dmc_id, i);
+
+ /* once set DMC_EVT_CTL_ENABLE can't be cleared :/ */
+ if (is_dmc_evt_ctl_reg(display, dmc_id, reg)) {
+ found &= ~DMC_EVT_CTL_ENABLE;
+ expected &= ~DMC_EVT_CTL_ENABLE;
+ }
+
+ drm_WARN(display->drm, found != expected,
+ "DMC %d mmio[%d]/0x%x incorrect (expected 0x%x, current 0x%x)\n",
+ dmc_id, i, i915_mmio_reg_offset(reg), expected, found);
+ }
+}
+
+void assert_main_dmc_loaded(struct intel_display *display)
+{
+ assert_dmc_loaded(display, DMC_FW_MAIN);
+}
+
+static bool need_pipedmc_load_program(struct intel_display *display)
+{
+ /* On TGL/derivatives pipe DMC state is lost when PG1 is disabled */
+ return DISPLAY_VER(display) == 12;
+}
+
+static bool need_pipedmc_load_mmio(struct intel_display *display, enum pipe pipe)
+{
+ /*
+ * PTL:
+ * - pipe A/B DMC doesn't need save/restore
+ * - pipe C/D DMC is in PG0, needs manual save/restore
+ */
+ if (DISPLAY_VER(display) == 30)
+ return pipe >= PIPE_C;
+
+ /*
+ * FIXME LNL unclear, main DMC firmware has the pipe DMC A/B PG0
+ * save/restore, but so far unable to see the loss of pipe DMC state
+ * in action. Are we just failing to turn off PG0 due to some other
+ * SoC level stuff?
+ */
+ if (DISPLAY_VER(display) == 20)
+ return false;
+
+ /*
+ * FIXME BMG untested, main DMC firmware has the
+ * pipe DMC A/B PG0 save/restore...
+ */
+ if (display->platform.battlemage)
+ return false;
+
+ /*
+ * DG2:
+ * - Pipe DMCs presumably in PG0?
+ * - No DC6, and even DC9 doesn't seem to result
+ * in loss of DMC state for whatever reason
+ */
+ if (display->platform.dg2)
+ return false;
+
+ /*
+ * ADL/MTL:
+ * - pipe A/B DMC is in PG0, saved/restored by the main DMC
+ * - pipe C/D DMC is in PG0, needs manual save/restore
+ */
+ if (IS_DISPLAY_VER(display, 13, 14))
+ return pipe >= PIPE_C;
+
+ return false;
+}
+
+static bool can_enable_pipedmc(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ /*
+ * On TGL/derivatives pipe DMC state is lost when PG1 is disabled.
+ * Do not even enable the pipe DMC when that can happen outside
+ * of driver control (PSR+DC5/6).
+ */
+ if (DISPLAY_VER(display) == 12 && crtc_state->has_psr)
+ return false;
+
+ return true;
+}
+
+void intel_dmc_enable_pipe(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
+ return;
+
+ if (!can_enable_pipedmc(crtc_state)) {
+ intel_dmc_disable_pipe(crtc_state);
+ return;
+ }
+
+ if (need_pipedmc_load_program(display))
+ dmc_load_program(display, dmc_id);
+ else if (need_pipedmc_load_mmio(display, pipe))
+ dmc_load_mmio(display, dmc_id);
+
+ assert_dmc_loaded(display, dmc_id);
+
+ if (DISPLAY_VER(display) >= 20) {
+ intel_flipq_reset(display, pipe);
+
+ intel_de_write(display, PIPEDMC_INTERRUPT(pipe), pipedmc_interrupt_mask(display));
+ intel_de_write(display, PIPEDMC_INTERRUPT_MASK(pipe), ~pipedmc_interrupt_mask(display));
+ }
+
+ if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe));
+ else
+ intel_de_rmw(display, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE);
+}
+
+void intel_dmc_disable_pipe(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
+ return;
+
+ if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0);
+ else
+ intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
+
+ if (DISPLAY_VER(display) >= 20) {
+ intel_de_write(display, PIPEDMC_INTERRUPT_MASK(pipe), ~0);
+ intel_de_write(display, PIPEDMC_INTERRUPT(pipe), pipedmc_interrupt_mask(display));
+
+ intel_flipq_reset(display, pipe);
+ }
+}
+
+static void dmc_configure_event(struct intel_display *display,
+ enum intel_dmc_id dmc_id,
+ unsigned int event_id,
+ bool enable)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ int num_handlers = 0;
+ int i;
+
+ for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
+ i915_reg_t reg = dmc->dmc_info[dmc_id].mmioaddr[i];
+ u32 data = dmc->dmc_info[dmc_id].mmiodata[i];
+
+ if (!is_event_handler(display, dmc_id, event_id, reg, data))
+ continue;
+
+ intel_de_write(display, reg, enable ? data : dmc_evt_ctl_disable());
+ num_handlers++;
+ }
+
+ drm_WARN_ONCE(display->drm, num_handlers != 1,
+ "DMC %d has %d handlers for event 0x%x\n",
+ dmc_id, num_handlers, event_id);
+}
+
+/**
+ * intel_dmc_block_pkgc() - block PKG C-state
+ * @display: display instance
+ * @pipe: pipe which register use to block
+ * @block: block/unblock
+ *
+ * This interface is target for Wa_16025596647 usage. I.e. to set/clear
+ * PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS bit in PIPEDMC_BLOCK_PKGC_SW register.
+ */
+void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe,
+ bool block)
+{
+ intel_de_rmw(display, PIPEDMC_BLOCK_PKGC_SW(pipe),
+ PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS, block ?
+ PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS : 0);
+}
+
+/**
+ * intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank() - start of PKG
+ * C-state exit
+ * @display: display instance
+ * @pipe: pipe which register use to block
+ * @enable: enable/disable
+ *
+ * This interface is target for Wa_16025596647 usage. I.e. start the package C
+ * exit at the start of the undelayed vblank
+ */
+void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
+ enum pipe pipe, bool enable)
+{
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ dmc_configure_event(display, dmc_id, PIPEDMC_EVENT_VBLANK, enable);
+}
+
/**
* intel_dmc_load_program() - write the firmware from memory to register.
* @display: display instance
@@ -634,37 +849,26 @@ static u32 dmc_mmiodata(struct intel_display *display,
void intel_dmc_load_program(struct intel_display *display)
{
struct i915_power_domains *power_domains = &display->power.domains;
- struct intel_dmc *dmc = display_to_dmc(display);
enum intel_dmc_id dmc_id;
- u32 i;
if (!intel_dmc_has_payload(display))
return;
- pipedmc_clock_gating_wa(display, true);
-
- disable_all_event_handlers(display);
-
assert_display_rpm_held(display);
- preempt_disable();
+ pipedmc_clock_gating_wa(display, true);
for_each_dmc_id(dmc_id) {
- for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
- intel_de_write_fw(display,
- DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i),
- dmc->dmc_info[dmc_id].payload[i]);
- }
+ dmc_load_program(display, dmc_id);
+ assert_dmc_loaded(display, dmc_id);
}
- preempt_enable();
-
- for_each_dmc_id(dmc_id) {
- for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
- intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i],
- dmc_mmiodata(display, dmc, dmc_id, i));
- }
- }
+ if (DISPLAY_VER(display) >= 20)
+ intel_de_write(display, DMC_FQ_W2_PTS_CFG_SEL,
+ PIPE_D_DMC_W2_PTS_CONFIG_SELECT(PIPE_D) |
+ PIPE_C_DMC_W2_PTS_CONFIG_SELECT(PIPE_C) |
+ PIPE_B_DMC_W2_PTS_CONFIG_SELECT(PIPE_B) |
+ PIPE_A_DMC_W2_PTS_CONFIG_SELECT(PIPE_A));
power_domains->dc_state = 0;
@@ -682,26 +886,17 @@ void intel_dmc_load_program(struct intel_display *display)
*/
void intel_dmc_disable_program(struct intel_display *display)
{
+ enum intel_dmc_id dmc_id;
+
if (!intel_dmc_has_payload(display))
return;
pipedmc_clock_gating_wa(display, true);
- disable_all_event_handlers(display);
- pipedmc_clock_gating_wa(display, false);
-}
-void assert_dmc_loaded(struct intel_display *display)
-{
- struct intel_dmc *dmc = display_to_dmc(display);
+ for_each_dmc_id(dmc_id)
+ disable_all_event_handlers(display, dmc_id);
- drm_WARN_ONCE(display->drm, !dmc, "DMC not initialized\n");
- drm_WARN_ONCE(display->drm, dmc &&
- !intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
- "DMC program storage start is NULL\n");
- drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_SSP_BASE),
- "DMC SSP Base Not fine\n");
- drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_HTP_SKL),
- "DMC HTP Not fine\n");
+ pipedmc_clock_gating_wa(display, false);
}
static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info,
@@ -1120,7 +1315,6 @@ out:
*/
void intel_dmc_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_dmc *dmc;
if (!HAS_DMC(display))
@@ -1163,7 +1357,7 @@ void intel_dmc_init(struct intel_display *display)
display->dmc.dmc = dmc;
drm_dbg_kms(display->drm, "Loading %s\n", dmc->fw_path);
- queue_work(i915->unordered_wq, &dmc->work);
+ queue_work(display->wq.unordered, &dmc->work);
return;
@@ -1194,6 +1388,17 @@ void intel_dmc_suspend(struct intel_display *display)
intel_dmc_runtime_pm_put(display);
}
+void intel_dmc_wait_fw_load(struct intel_display *display)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+
+ if (!HAS_DMC(display))
+ return;
+
+ if (dmc)
+ flush_work(&dmc->work);
+}
+
/**
* intel_dmc_resume() - init DMC firmware during system resume
* @display: display instance
@@ -1403,3 +1608,73 @@ void intel_dmc_debugfs_register(struct intel_display *display)
debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root,
display, &intel_dmc_debugfs_status_fops);
}
+
+void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
+ u32 tmp = 0, int_vector;
+
+ if (DISPLAY_VER(display) >= 20) {
+ tmp = intel_de_read(display, PIPEDMC_INTERRUPT(pipe));
+ intel_de_write(display, PIPEDMC_INTERRUPT(pipe), tmp);
+
+ if (tmp & PIPEDMC_FLIPQ_PROG_DONE) {
+ spin_lock(&display->drm->event_lock);
+
+ if (crtc->flipq_event) {
+ /*
+ * Update vblank counter/timestamp in case it
+ * hasn't been done yet for this frame.
+ */
+ drm_crtc_accurate_vblank_count(&crtc->base);
+
+ drm_crtc_send_vblank_event(&crtc->base, crtc->flipq_event);
+ crtc->flipq_event = NULL;
+ }
+
+ spin_unlock(&display->drm->event_lock);
+ }
+
+ if (tmp & PIPEDMC_ATS_FAULT)
+ drm_err_ratelimited(display->drm, "[CRTC:%d:%s] PIPEDMC ATS fault\n",
+ crtc->base.base.id, crtc->base.name);
+ if (tmp & PIPEDMC_GTT_FAULT)
+ drm_err_ratelimited(display->drm, "[CRTC:%d:%s] PIPEDMC GTT fault\n",
+ crtc->base.base.id, crtc->base.name);
+ if (tmp & PIPEDMC_ERROR)
+ drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC error\n",
+ crtc->base.base.id, crtc->base.name);
+ }
+
+ int_vector = intel_de_read(display, PIPEDMC_STATUS(pipe)) & PIPEDMC_INT_VECTOR_MASK;
+ if (tmp == 0 && int_vector != 0)
+ drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC interrupt vector 0x%x\n",
+ crtc->base.base.id, crtc->base.name, tmp);
+}
+
+void intel_pipedmc_enable_event(struct intel_crtc *crtc,
+ enum pipedmc_event_id event)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe);
+
+ dmc_configure_event(display, dmc_id, event, true);
+}
+
+void intel_pipedmc_disable_event(struct intel_crtc *crtc,
+ enum pipedmc_event_id event)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe);
+
+ dmc_configure_event(display, dmc_id, event, false);
+}
+
+u32 intel_pipedmc_start_mmioaddr(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_dmc *dmc = display_to_dmc(display);
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe);
+
+ return dmc ? dmc->dmc_info[dmc_id].start_mmioaddr : 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index bd1c459b0075..40e9dcb033cc 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -9,15 +9,19 @@
#include <linux/types.h>
enum pipe;
+enum pipedmc_event_id;
struct drm_printer;
+struct intel_crtc;
+struct intel_crtc_state;
struct intel_display;
struct intel_dmc_snapshot;
void intel_dmc_init(struct intel_display *display);
void intel_dmc_load_program(struct intel_display *display);
+void intel_dmc_wait_fw_load(struct intel_display *display);
void intel_dmc_disable_program(struct intel_display *display);
-void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe);
-void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe);
+void intel_dmc_enable_pipe(const struct intel_crtc_state *crtc_state);
+void intel_dmc_disable_pipe(const struct intel_crtc_state *crtc_state);
void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe,
bool block);
void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
@@ -32,6 +36,16 @@ struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *disp
void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p);
void intel_dmc_update_dc6_allowed_count(struct intel_display *display, bool start_tracking);
-void assert_dmc_loaded(struct intel_display *display);
+void assert_main_dmc_loaded(struct intel_display *display);
+
+void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe);
+
+u32 intel_pipedmc_start_mmioaddr(struct intel_crtc *crtc);
+void intel_pipedmc_enable_event(struct intel_crtc *crtc,
+ enum pipedmc_event_id event);
+void intel_pipedmc_disable_event(struct intel_crtc *crtc,
+ enum pipedmc_event_id event);
+
+void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe);
#endif /* __INTEL_DMC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index e16ea3f16ed8..c5aa49921cb9 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -6,7 +6,273 @@
#ifndef __INTEL_DMC_REGS_H__
#define __INTEL_DMC_REGS_H__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
+
+enum dmc_event_id {
+ DMC_EVENT_TRUE = 0x0,
+ DMC_EVENT_FALSE = 0x1,
+};
+
+enum maindmc_event_id {
+ MAINDMC_EVENT_CMP_ZERO = 0x8,
+ MAINDMC_EVENT_CMP_ODD = 0x9,
+ MAINDMC_EVENT_CMP_NEG = 0xa,
+ MAINDMC_EVENT_CMP_CARRY = 0xb,
+
+ MAINDMC_EVENT_TMR0_DONE = 0x14,
+ MAINDMC_EVENT_TMR1_DONE = 0x15,
+ MAINDMC_EVENT_TMR2_DONE = 0x16,
+ MAINDMC_EVENT_COUNT0_DONE = 0x17,
+ MAINDMC_EVENT_COUNT1_DONE = 0x18,
+ MAINDMC_EVENT_PERF_CNTR_DARBF = 0x19,
+
+ MAINDMC_EVENT_SCANLINE_INRANGE_FQ_A_TRIGGER = 0x22,
+ MAINDMC_EVENT_SCANLINE_INRANGE_FQ_B_TRIGGER = 0x23,
+ MAINDMC_EVENT_SCANLINE_INRANGE_FQ_C_TRIGGER = 0x24,
+ MAINDMC_EVENT_SCANLINE_INRANGE_FQ_D_TRIGGER = 0x25,
+ MAINDMC_EVENT_1KHZ_FQ_A_TRIGGER = 0x26,
+ MAINDMC_EVENT_1KHZ_FQ_B_TRIGGER = 0x27,
+ MAINDMC_EVENT_1KHZ_FQ_C_TRIGGER = 0x28,
+ MAINDMC_EVENT_1KHZ_FQ_D_TRIGGER = 0x29,
+ MAINDMC_EVENT_SCANLINE_COMP_A = 0x2a,
+ MAINDMC_EVENT_SCANLINE_COMP_B = 0x2b,
+ MAINDMC_EVENT_SCANLINE_COMP_C = 0x2c,
+ MAINDMC_EVENT_SCANLINE_COMP_D = 0x2d,
+ MAINDMC_EVENT_VBLANK_DELAYED_A = 0x2e,
+ MAINDMC_EVENT_VBLANK_DELAYED_B = 0x2f,
+ MAINDMC_EVENT_VBLANK_DELAYED_C = 0x30,
+ MAINDMC_EVENT_VBLANK_DELAYED_D = 0x31,
+ MAINDMC_EVENT_VBLANK_A = 0x32,
+ MAINDMC_EVENT_VBLANK_B = 0x33,
+ MAINDMC_EVENT_VBLANK_C = 0x34,
+ MAINDMC_EVENT_VBLANK_D = 0x35,
+ MAINDMC_EVENT_HBLANK_A = 0x36,
+ MAINDMC_EVENT_HBLANK_B = 0x37,
+ MAINDMC_EVENT_HBLANK_C = 0x38,
+ MAINDMC_EVENT_HBLANK_D = 0x39,
+ MAINDMC_EVENT_VSYNC_A = 0x3a,
+ MAINDMC_EVENT_VSYNC_B = 0x3b,
+ MAINDMC_EVENT_VSYNC_C = 0x3c,
+ MAINDMC_EVENT_VSYNC_D = 0x3d,
+ MAINDMC_EVENT_SCANLINE_A = 0x3e,
+ MAINDMC_EVENT_SCANLINE_B = 0x3f,
+ MAINDMC_EVENT_SCANLINE_C = 0x40,
+ MAINDMC_EVENT_SCANLINE_D = 0x41,
+
+ MAINDMC_EVENT_PLANE1_FLIP_A = 0x42,
+ MAINDMC_EVENT_PLANE2_FLIP_A = 0x43,
+ MAINDMC_EVENT_PLANE3_FLIP_A = 0x44,
+ MAINDMC_EVENT_PLANE4_FLIP_A = 0x45,
+ MAINDMC_EVENT_PLANE5_FLIP_A = 0x46,
+ MAINDMC_EVENT_PLANE6_FLIP_A = 0x47,
+ MAINDMC_EVENT_PLANE7_FLIP_A = 0x48,
+ MAINDMC_EVENT_PLANE1_FLIP_B = 0x49,
+ MAINDMC_EVENT_PLANE2_FLIP_B = 0x4a,
+ MAINDMC_EVENT_PLANE3_FLIP_B = 0x4b,
+ MAINDMC_EVENT_PLANE4_FLIP_B = 0x4c,
+ MAINDMC_EVENT_PLANE5_FLIP_B = 0x4d,
+ MAINDMC_EVENT_PLANE6_FLIP_B = 0x4e,
+ MAINDMC_EVENT_PLANE7_FLIP_B = 0x4f,
+ MAINDMC_EVENT_PLANE1_FLIP_C = 0x50,
+ MAINDMC_EVENT_PLANE2_FLIP_C = 0x51,
+ MAINDMC_EVENT_PLANE3_FLIP_C = 0x52,
+ MAINDMC_EVENT_PLANE4_FLIP_C = 0x53,
+ MAINDMC_EVENT_PLANE5_FLIP_C = 0x54,
+ MAINDMC_EVENT_PLANE6_FLIP_C = 0x55,
+ MAINDMC_EVENT_PLANE7_FLIP_C = 0x56,
+ MAINDMC_EVENT_PLANE1_FLIP_D = 0x57,
+ MAINDMC_EVENT_PLANE2_FLIP_D = 0x58,
+ MAINDMC_EVENT_PLANE3_FLIP_D = 0x59,
+ MAINDMC_EVENT_PLANE4_FLIP_D = 0x5a,
+ MAINDMC_EVENT_PLANE5_FLIP_D = 0x5b,
+ MAINDMC_EVENT_PLANE6_FLIP_D = 0x5c,
+ MAINDMC_EVENT_PLANE7_FLIP_D = 0x5d,
+ MAINDMC_EVENT_PLANE1_FLIP_DONE_A = 0x5e,
+ MAINDMC_EVENT_PLANE2_FLIP_DONE_A = 0x5f,
+ MAINDMC_EVENT_PLANE3_FLIP_DONE_A = 0x60,
+ MAINDMC_EVENT_PLANE4_FLIP_DONE_A = 0x61,
+ MAINDMC_EVENT_PLANE5_FLIP_DONE_A = 0x62,
+ MAINDMC_EVENT_PLANE6_FLIP_DONE_A = 0x63,
+ MAINDMC_EVENT_PLANE7_FLIP_DONE_A = 0x64,
+ MAINDMC_EVENT_PLANE1_FLIP_DONE_B = 0x65,
+ MAINDMC_EVENT_PLANE2_FLIP_DONE_B = 0x66,
+ MAINDMC_EVENT_PLANE3_FLIP_DONE_B = 0x67,
+ MAINDMC_EVENT_PLANE4_FLIP_DONE_B = 0x68,
+ MAINDMC_EVENT_PLANE5_FLIP_DONE_B = 0x69,
+ MAINDMC_EVENT_PLANE6_FLIP_DONE_B = 0x6a,
+ MAINDMC_EVENT_PLANE7_FLIP_DONE_B = 0x6b,
+ MAINDMC_EVENT_PLANE1_FLIP_DONE_C = 0x6c,
+ MAINDMC_EVENT_PLANE2_FLIP_DONE_C = 0x6d,
+ MAINDMC_EVENT_PLANE3_FLIP_DONE_C = 0x6e,
+ MAINDMC_EVENT_PLANE4_FLIP_DONE_C = 0x6f,
+ MAINDMC_EVENT_PLANE5_FLIP_DONE_C = 0x70,
+ MAINDMC_EVENT_PLANE6_FLIP_DONE_C = 0x71,
+ MAINDMC_EVENT_PLANE7_FLIP_DONE_C = 0x72,
+ MAINDMC_EVENT_PLANE1_FLIP_DONE_D = 0x73,
+ MAINDMC_EVENT_PLANE2_FLIP_DONE_D = 0x74,
+ MAINDMC_EVENT_PLANE3_FLIP_DONE_D = 0x75,
+ MAINDMC_EVENT_PLANE4_FLIP_DONE_D = 0x76,
+ MAINDMC_EVENT_PLANE5_FLIP_DONE_D = 0x77,
+ MAINDMC_EVENT_PLANE6_FLIP_DONE_D = 0x78,
+ MAINDMC_EVENT_PLANE7_FLIP_DONE_D = 0x79,
+
+ MAINDMC_EVENT_WIDI_GTT_FAULT_SL1 = 0x7d,
+ MAINDMC_EVENT_WIDI_GTT_FAULT_SL2 = 0x7e,
+ MAINDMC_EVENT_WIDI_CAP_ACTIVE_SL1 = 0x7f,
+ MAINDMC_EVENT_WIDI_CAP_ACTIVE_SL2 = 0x80,
+
+ MAINDMC_EVENT_RENUKE_A = 0x85,
+ MAINDMC_EVENT_RENUKE_B = 0x86,
+ MAINDMC_EVENT_RENUKE_C = 0x87,
+ MAINDMC_EVENT_RENUKE_D = 0x88,
+ MAINDMC_EVENT_DPFC_FIFO_FULL_A = 0x89,
+ MAINDMC_EVENT_DPFC_FIFO_FULL_B = 0x8a,
+ MAINDMC_EVENT_DPFC_FIFO_FULL_C = 0x8b,
+ MAINDMC_EVENT_DPFC_FIFO_FULL_D = 0x8c,
+ MAINDMC_EVENT_DPFC_PIXEL_CNT_MISMATCH_A = 0x8d,
+ MAINDMC_EVENT_DPFC_PIXEL_CNT_MISMATCH_B = 0x8e,
+ MAINDMC_EVENT_DPFC_PIXEL_CNT_MISMATCH_C = 0x8f,
+ MAINDMC_EVENT_DPFC_PIXEL_CNT_MISMATCH_D = 0x90,
+ MAINDMC_EVENT_DPFC_COMPTAG_UNDERRUN_A = 0x91,
+ MAINDMC_EVENT_DPFC_COMPTAG_UNDERRUN_B = 0x92,
+ MAINDMC_EVENT_DPFC_COMPTAG_UNDERRUN_C = 0x93,
+ MAINDMC_EVENT_DPFC_COMPTAG_UNDERRUN_D = 0x94,
+ MAINDMC_EVENT_DPFC_FIFO_NOT_EMPTY_A = 0x95,
+ MAINDMC_EVENT_DPFC_FIFO_NOT_EMPTY_B = 0x96,
+ MAINDMC_EVENT_DPFC_FIFO_NOT_EMPTY_C = 0x97,
+ MAINDMC_EVENT_DPFC_FIFO_NOT_EMPTY_D = 0x98,
+ MAINDMC_EVENT_DPFC_COMPTAG_MISMATCH_A = 0x99,
+ MAINDMC_EVENT_DPFC_COMPTAG_MISMATCH_B = 0x9a,
+ MAINDMC_EVENT_DPFC_COMPTAG_MISMATCH_C = 0x9b,
+ MAINDMC_EVENT_DPFC_COMPTAG_MISMATCH_D = 0x9c,
+ MAINDMC_EVENT_DISP_PCH_INT = 0x9d,
+ MAINDMC_EVENT_GTT_ERR = 0x9e,
+ MAINDMC_EVENT_VTD_ERR = 0x9f,
+ MAINDMC_EVENT_FULL_FQ_WAKE_TRIGGER_A = 0xa0,
+ MAINDMC_EVENT_FULL_FQ_WAKE_TRIGGER_B = 0xa1,
+ MAINDMC_EVENT_FULL_FQ_WAKE_TRIGGER_C = 0xa2,
+ MAINDMC_EVENT_FULL_FQ_WAKE_TRIGGER_D = 0xa3,
+ MAINDMC_EVENT_PIPEDMC_CHICKEN_FW_EVENT_A = 0xa4,
+ MAINDMC_EVENT_PIPEDMC_CHICKEN_FW_EVENT_B = 0xa5,
+ MAINDMC_EVENT_PIPEDMC_CHICKEN_FW_EVENT_C = 0xa6,
+ MAINDMC_EVENT_PIPEDMC_CHICKEN_FW_EVENT_D = 0xa7,
+
+ MAINDMC_EVENT_DC_CLOCK_OFF_START_EDP = 0xb2,
+ MAINDMC_EVENT_DC_CLOCK_OFF_START_DSI = 0xb3,
+ MAINDMC_EVENT_DCPR_DMC_CSR_START = 0xb4,
+ MAINDMC_EVENT_IN_PSR = 0xb5,
+
+ MAINDMC_EVENT_IN_MEMUP = 0xb7,
+ MAINDMC_EVENT_IN_VGA = 0xb8,
+
+ MAINDMC_EVENT_IN_KVM_SESSION = 0xba,
+ MAINDMC_EVENT_DEWAKE = 0xbb,
+
+ MAINDMC_EVENT_TRAP_HIT = 0xbd,
+ MAINDMC_EVENT_CLK_USEC = 0xbe,
+ MAINDMC_EVENT_CLK_MSEC = 0xbf,
+
+ MAINDMC_EVENT_CHICKEN1 = 0xc8,
+ MAINDMC_EVENT_CHICKEN2 = 0xc9,
+ MAINDMC_EVENT_CHICKEN3 = 0xca,
+ MAINDMC_EVENT_DDT_UBP = 0xcb,
+
+ MAINDMC_EVENT_HP_LATENCY = 0xcd,
+ MAINDMC_EVENT_LP_LATENCY = 0xce,
+ MAINDMC_EVENT_WIDI_LP_REQ_SL1 = 0xcf,
+ MAINDMC_EVENT_WIDI_LP_REQ_SL2 = 0xd0,
+
+ MAINDMC_EVENT_DG_DMC_EVT_0 = 0xd3,
+ MAINDMC_EVENT_DG_DMC_EVT_1 = 0xd4,
+ MAINDMC_EVENT_DG_DMC_EVT_2 = 0xd5,
+ MAINDMC_EVENT_DG_DMC_EVT_3 = 0xd6,
+ MAINDMC_EVENT_DG_DMC_EVT_4 = 0xd7,
+ MAINDMC_EVENT_DACFE_CLK_STOP = 0xd8,
+ MAINDMC_EVENT_DACFE_AZILIA_SDI_WAKE = 0xd9,
+ MAINDMC_EVENT_AUDIO_DOUBLE_FUNC_GRP_RST = 0xda,
+ MAINDMC_EVENT_AUDIO_CMD_VALID = 0xdb,
+ MAINDMC_EVENT_AUDIO_FRM_SYNC_BCLK = 0xdc,
+ MAINDMC_EVENT_AUDIO_FRM_SYNC_CDCLK = 0xdd,
+ MAINDMC_EVENT_AUDIO_PRESENCE_DETECT_A = 0xde,
+ MAINDMC_EVENT_AUDIO_PRESENCE_DETECT_B = 0xdf,
+ MAINDMC_EVENT_AUDIO_PRESENCE_DETECT_C = 0xe0,
+ MAINDMC_EVENT_AUDIO_PRESENCE_DETECT_E = 0xe1,
+ MAINDMC_EVENT_CMTG_SCANLINE_IN_GB_DC6v = 0xe2,
+ MAINDMC_EVENT_DCPR_CMTG_SCANLINE_OUTSIDE_GB = 0xe3,
+ MAINDMC_EVENT_DC6v_BACKWARD_COMPAT = 0xe4,
+ MAINDMC_EVENT_DPMA_PM_ABORT = 0xe5,
+
+ MAINDMC_EVENT_STACK_OVF = 0xfc,
+ MAINDMC_EVENT_NO_CLAIM = 0xfd,
+ MAINDMC_EVENT_UNK_CMD = 0xfe,
+ MAINDMC_EVENT_HTP_MOD = 0xff,
+};
+
+enum pipedmc_event_id {
+ PIPEDMC_EVENT_TMR0_DONE = 0x14,
+ PIPEDMC_EVENT_TMR1_DONE = 0x15,
+ PIPEDMC_EVENT_TMR2_DONE = 0x16,
+ PIPEDMC_EVENT_COUNT0_DONE = 0x17,
+ PIPEDMC_EVENT_COUNT1_DONE = 0x18,
+ PIPEDMC_EVENT_PGA_PGB_RESTORE_DONE = 0x19,
+ PIPEDMC_EVENT_PG1_PG2_RESTORE_DONE = 0x1a,
+ PIPEDMC_EVENT_PGA_PGB_SAVE_DONE = 0x1b,
+ PIPEDMC_EVENT_PG1_PG2_SAVE_DONE = 0x1c,
+
+ PIPEDMC_EVENT_FULL_FQ_WAKE_TRIGGER = 0x2b,
+ PIPEDMC_EVENT_1KHZ_FQ_TRIGGER = 0x2c,
+ PIPEDMC_EVENT_SCANLINE_INRANGE_FQ_TRIGGER = 0x2d,
+ PIPEDMC_EVENT_SCANLINE_INRANGE = 0x2e,
+ PIPEDMC_EVENT_SCANLINE_OUTRANGE = 0x2f,
+ PIPEDMC_EVENT_SCANLINE_EQUAL = 0x30,
+ PIPEDMC_EVENT_DELAYED_VBLANK = 0x31,
+ PIPEDMC_EVENT_VBLANK = 0x32,
+ PIPEDMC_EVENT_HBLANK = 0x33,
+ PIPEDMC_EVENT_VSYNC = 0x34,
+ PIPEDMC_EVENT_SCANLINE_FROM_DMUX = 0x35,
+ PIPEDMC_EVENT_PLANE1_FLIP = 0x36,
+ PIPEDMC_EVENT_PLANE2_FLIP = 0x37,
+ PIPEDMC_EVENT_PLANE3_FLIP = 0x38,
+ PIPEDMC_EVENT_PLANE4_FLIP = 0x39,
+ PIPEDMC_EVENT_PLANE5_FLIP = 0x3a,
+ PIPEDMC_EVENT_PLANE6_FLIP = 0x3b,
+ PIPEDMC_EVENT_PLANE7_FLIP = 0x3c,
+ PIPEDMC_EVENT_ADAPTIVE_DCB_TRIGGER = 0x3d,
+
+ PIPEDMC_EVENT_PLANE1_FLIP_DONE = 0x56,
+ PIPEDMC_EVENT_PLANE2_FLIP_DONE = 0x57,
+ PIPEDMC_EVENT_PLANE3_FLIP_DONE = 0x58,
+ PIPEDMC_EVENT_PLANE4_FLIP_DONE = 0x59,
+ PIPEDMC_EVENT_PLANE5_FLIP_DONE = 0x5a,
+ PIPEDMC_EVENT_PLANE6_FLIP_DONE = 0x5b,
+ PIPEDMC_EVENT_PLANE7_FLIP_DONE = 0x5c,
+
+ PIPEDMC_EVENT_GTT_ERR = 0x9b,
+
+ PIPEDMC_EVENT_IN_PSR = 0xb5,
+ PIPEDMC_EVENT_DSI_DMC_IDLE = 0xb6,
+ PIPEDMC_EVENT_PSR2_DMC_IDLE = 0xb7,
+ PIPEDMC_EVENT_IN_VGA = 0xb8,
+
+ PIPEDMC_EVENT_TRAP_HIT = 0xbd,
+ PIPEDMC_EVENT_CLK_USEC = 0xbe,
+ PIPEDMC_EVENT_CLK_MSEC = 0xbf,
+
+ PIPEDMC_EVENT_CHICKEN1 = 0xc8,
+ PIPEDMC_EVENT_CHICKEN2 = 0xc9,
+ PIPEDMC_EVENT_CHICKEN3 = 0xca,
+ PIPEDMC_EVENT_DDT_UBP = 0xcb,
+
+ PIPEDMC_EVENT_LP_LATENCY = 0xce,
+
+ PIPEDMC_EVENT_LACE_PART_A_HIST_TRIGGER = 0xdf,
+ PIPEDMC_EVENT_LACE_PART_B_HIST_TRIGGER = 0xe0,
+
+ PIPEDMC_EVENT_STACK_OVF = 0xfc,
+ PIPEDMC_EVENT_NO_CLAIM = 0xfd,
+ PIPEDMC_EVENT_UNK_CMD = 0xfe,
+ PIPEDMC_EVENT_HTP_MOD = 0xff,
+};
#define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4)
#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
@@ -21,11 +287,170 @@
#define MTL_PIPEDMC_CONTROL _MMIO(0x45250)
#define PIPEDMC_ENABLE_MTL(pipe) REG_BIT(((pipe) - PIPE_A) * 4)
-#define _MTL_PIPEDMC_EVT_CTL_4_A 0x5f044
-#define _MTL_PIPEDMC_EVT_CTL_4_B 0x5f444
-#define MTL_PIPEDMC_EVT_CTL_4(pipe) _MMIO_PIPE(pipe, \
- _MTL_PIPEDMC_EVT_CTL_4_A, \
- _MTL_PIPEDMC_EVT_CTL_4_B)
+#define _PIPEDMC_LOAD_HTP_A 0x5f000
+#define _PIPEDMC_LOAD_HTP_B 0x5f400
+#define PIPEDMC_LOAD_HTP(pipe) _MMIO_PIPE((pipe), _PIPEDMC_LOAD_HTP_A, _PIPEDMC_LOAD_HTP_B)
+
+#define _PIPEDMC_CTL_A 0x5f064
+#define _PIPEDMC_CTL_B 0x5f464
+#define PIPEDMC_CTL(pipe) _MMIO_PIPE((pipe), _PIPEDMC_CTL_A, _PIPEDMC_CTL_B)
+#define PIPEDMC_HALT REG_BIT(31)
+#define PIPEDMC_STEP REG_BIT(27)
+#define PIPEDMC_CLOCKGATE REG_BIT(23)
+
+#define _PIPEDMC_STATUS_A 0x5f06c
+#define _PIPEDMC_STATUS_B 0x5f46c
+#define PIPEDMC_STATUS(pipe) _MMIO_PIPE((pipe), _PIPEDMC_STATUS_A, _PIPEDMC_STATUS_B)
+#define PIPEDMC_SSP REG_GENMASK(31, 16)
+#define PIPEDMC_INT_VECTOR_MASK REG_GENMASK(15, 8)
+/* PIPEDMC_INT_VECTOR values defined by firmware */
+#define PIPEDMC_INT_VECTOR_SCANLINE_COMP_ERROR REG_FIELD_PREP(PIPEDMC_INT_VECTOR_MASK, 0x1)
+#define PIPEDMC_INT_VECTOR_DC6V_FLIPQ_OVERLAP_ERROR REG_FIELD_PREP(PIPEDMC_INT_VECTOR_MASK, 0x2)
+#define PIPEDMC_INT_VECTOR_FLIPQ_PROG_DONE REG_FIELD_PREP(PIPEDMC_INT_VECTOR_MASK, 0xff) /* Wa_16018781658:lnl[a0] */
+#define PIPEDMC_EVT_PENDING REG_GENMASK(7, 0)
+
+#define _PIPEDMC_FQ_CTRL_A 0x5f078
+#define _PIPEDMC_FQ_CTRL_B 0x5f478
+#define PIPEDMC_FQ_CTRL(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FQ_CTRL_A, _PIPEDMC_FQ_CTRL_B)
+#define PIPEDMC_FQ_CTRL_ENABLE REG_BIT(31)
+#define PIPEDMC_FQ_CTRL_ASYNC REG_BIT(29)
+#define PIPEDMC_FQ_CTRL_PREEMPT REG_BIT(0)
+
+#define _PIPEDMC_FQ_STATUS_A 0x5f098
+#define _PIPEDMC_FQ_STATUS_B 0x5f498
+#define PIPEDMC_FQ_STATUS(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FQ_STATUS_A, _PIPEDMC_FQ_STATUS_B)
+#define PIPEDMC_FQ_STATUS_BUSY REG_BIT(31)
+#define PIPEDMC_FQ_STATUS_W2_LIVE_STATUS REG_BIT(1)
+#define PIPEDMC_FQ_STATUS_W1_LIVE_STATUS REG_BIT(0)
+
+#define _PIPEDMC_FPQ_ATOMIC_TP_A 0x5f0a0
+#define _PIPEDMC_FPQ_ATOMIC_TP_B 0x5f4a0
+#define PIPEDMC_FPQ_ATOMIC_TP(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_ATOMIC_TP_A, _PIPEDMC_FPQ_ATOMIC_TP_B)
+#define PIPEDMC_FPQ_PLANEQ_3_TP_MASK REG_GENMASK(31, 26)
+#define PIPEDMC_FPQ_PLANEQ_3_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_PLANEQ_3_TP_MASK, (tail))
+#define PIPEDMC_FPQ_PLANEQ_2_TP_MASK REG_GENMASK(24, 19)
+#define PIPEDMC_FPQ_PLANEQ_2_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_PLANEQ_2_TP_MASK, (tail))
+#define PIPEDMC_FPQ_PLANEQ_1_TP_MASK REG_GENMASK(17, 12)
+#define PIPEDMC_FPQ_PLANEQ_1_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_PLANEQ_1_TP_MASK, (tail))
+#define PIPEDMC_FPQ_FASTQ_TP_MASK REG_GENMASK(10, 6)
+#define PIPEDMC_FPQ_FASTQ_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_FASTQ_TP_MASK, (tail))
+#define PIPEDMC_FPQ_GENERALQ_TP_MASK REG_GENMASK(4, 0)
+#define PIPEDMC_FPQ_GENERALQ_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_GENERALQ_TP_MASK, (tail))
+
+#define _PIPEDMC_FPQ_LINES_TO_W1_A 0x5f0a4
+#define _PIPEDMC_FPQ_LINES_TO_W1_B 0x5f4a4
+#define PIPEDMC_FPQ_LINES_TO_W1 _MMIO_PIPE((pipe), _PIPEDMC_FPQ_LINES_TO_W1_A, _PIPEDMC_FPQ_LINES_TO_W1_B)
+
+#define _PIPEDMC_FPQ_LINES_TO_W2_A 0x5f0a8
+#define _PIPEDMC_FPQ_LINES_TO_W2_B 0x5f4a8
+#define PIPEDMC_FPQ_LINES_TO_W2 _MMIO_PIPE((pipe), _PIPEDMC_FPQ_LINES_TO_W2_A, _PIPEDMC_FPQ_LINES_TO_W2_B)
+
+#define _PIPEDMC_SCANLINECMP_A 0x5f11c
+#define _PIPEDMC_SCANLINECMP_B 0x5f51c
+#define PIPEDMC_SCANLINECMP(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINECMP_A, _PIPEDMC_SCANLINECMP_B)
+#define PIPEDMC_SCANLINECMP_EN REG_BIT(31)
+#define PIPEDMC_SCANLINE_NUMBER REG_GENMASK(20, 0)
+
+#define _PIPEDMC_SCANLINECMPLOWER_A 0x5f120
+#define _PIPEDMC_SCANLINECMPLOWER_B 0x5f520
+#define PIPEDMC_SCANLINECMPLOWER(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINECMPLOWER_A, _PIPEDMC_SCANLINECMPLOWER_B)
+#define PIPEDMC_SCANLINEINRANGECMP_EN REG_BIT(31)
+#define PIPEDMC_SCANLINEOUTRANGECMP_EN REG_BIT(30)
+#define PIPEDMC_SCANLINE_LOWER_MASK REG_GENMASK(20, 0)
+#define PIPEDMC_SCANLINE_LOWER(scanline) REG_FIELD_PREP(PIPEDMC_SCANLINE_LOWER_MASK, (scanline))
+
+#define _PIPEDMC_SCANLINECMPUPPER_A 0x5f124
+#define _PIPEDMC_SCANLINECMPUPPER_B 0x5f524
+#define PIPEDMC_SCANLINECMPUPPER(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINECMPUPPER_A, _PIPEDMC_SCANLINECMPUPPER_B)
+#define PIPEDMC_SCANLINE_UPPER_MASK REG_GENMASK(20, 0)
+#define PIPEDMC_SCANLINE_UPPER(scanline) REG_FIELD_PREP(PIPEDMC_SCANLINE_UPPER_MASK, (scanline))
+
+#define _MMIO_PIPEDMC_FPQ(pipe, fq_id, \
+ reg_fpq1_a, reg_fpq2_a, reg_fpq3_a, reg_fpq4_a, \
+ reg_fpq1_b, reg_fpq2_b, reg_fpq3_b, reg_fpq4_b) \
+ _MMIO(_PICK_EVEN_2RANGES((fq_id), INTEL_FLIPQ_PLANE_3, \
+ _PIPE((pipe), (reg_fpq1_a), (reg_fpq1_b)), \
+ _PIPE((pipe), (reg_fpq2_a), (reg_fpq2_b)), \
+ _PIPE((pipe), (reg_fpq3_a), (reg_fpq3_b)), \
+ _PIPE((pipe), (reg_fpq4_a), (reg_fpq4_b))))
+
+#define _PIPEDMC_FPQ1_HP_A 0x5f128
+#define _PIPEDMC_FPQ2_HP_A 0x5f138
+#define _PIPEDMC_FPQ3_HP_A 0x5f168
+#define _PIPEDMC_FPQ4_HP_A 0x5f174
+#define _PIPEDMC_FPQ5_HP_A 0x5f180
+#define _PIPEDMC_FPQ1_HP_B 0x5f528
+#define _PIPEDMC_FPQ2_HP_B 0x5f538
+#define _PIPEDMC_FPQ3_HP_B 0x5f568
+#define _PIPEDMC_FPQ4_HP_B 0x5f574
+#define _PIPEDMC_FPQ5_HP_B 0x5f580
+#define PIPEDMC_FPQ_HP(pipe, fq_id) _MMIO_PIPEDMC_FPQ((pipe), (fq_id), \
+ _PIPEDMC_FPQ1_HP_A, _PIPEDMC_FPQ2_HP_A, \
+ _PIPEDMC_FPQ3_HP_A, _PIPEDMC_FPQ4_HP_A, \
+ _PIPEDMC_FPQ1_HP_B, _PIPEDMC_FPQ2_HP_B, \
+ _PIPEDMC_FPQ3_HP_B, _PIPEDMC_FPQ4_HP_B)
+
+#define _PIPEDMC_FPQ1_TP_A 0x5f12c
+#define _PIPEDMC_FPQ2_TP_A 0x5f13c
+#define _PIPEDMC_FPQ3_TP_A 0x5f16c
+#define _PIPEDMC_FPQ4_TP_A 0x5f178
+#define _PIPEDMC_FPQ5_TP_A 0x5f184
+#define _PIPEDMC_FPQ1_TP_B 0x5f52c
+#define _PIPEDMC_FPQ2_TP_B 0x5f53c
+#define _PIPEDMC_FPQ3_TP_B 0x5f56c
+#define _PIPEDMC_FPQ4_TP_B 0x5f578
+#define _PIPEDMC_FPQ5_TP_B 0x5f584
+#define PIPEDMC_FPQ_TP(pipe, fq_id) _MMIO_PIPEDMC_FPQ((pipe), (fq_id), \
+ _PIPEDMC_FPQ1_TP_A, _PIPEDMC_FPQ2_TP_A, \
+ _PIPEDMC_FPQ3_TP_A, _PIPEDMC_FPQ4_TP_A, \
+ _PIPEDMC_FPQ1_TP_B, _PIPEDMC_FPQ2_TP_B, \
+ _PIPEDMC_FPQ3_TP_B, _PIPEDMC_FPQ4_TP_B)
+
+#define _PIPEDMC_FPQ1_CHP_A 0x5f130
+#define _PIPEDMC_FPQ2_CHP_A 0x5f140
+#define _PIPEDMC_FPQ3_CHP_A 0x5f170
+#define _PIPEDMC_FPQ4_CHP_A 0x5f17c
+#define _PIPEDMC_FPQ5_CHP_A 0x5f188
+#define _PIPEDMC_FPQ1_CHP_B 0x5f530
+#define _PIPEDMC_FPQ2_CHP_B 0x5f540
+#define _PIPEDMC_FPQ3_CHP_B 0x5f570
+#define _PIPEDMC_FPQ4_CHP_B 0x5f57c
+#define _PIPEDMC_FPQ5_CHP_B 0x5f588
+#define PIPEDMC_FPQ_CHP(pipe, fq_id) _MMIO_PIPEDMC_FPQ((pipe), (fq_id), \
+ _PIPEDMC_FPQ1_CHP_A, _PIPEDMC_FPQ2_CHP_A, \
+ _PIPEDMC_FPQ3_CHP_A, _PIPEDMC_FPQ4_CHP_A, \
+ _PIPEDMC_FPQ1_CHP_B, _PIPEDMC_FPQ2_CHP_B, \
+ _PIPEDMC_FPQ3_CHP_B, _PIPEDMC_FPQ4_CHP_B)
+
+#define _PIPEDMC_FPQ_TS_A 0x5f134
+#define _PIPEDMC_FPQ_TS_B 0x5f534
+#define PIPEDMC_FPQ_TS(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_TS_A, _PIPEDMC_FPQ_TS_B)
+
+#define _PIPEDMC_SCANLINE_RO_A 0x5f144
+#define _PIPEDMC_SCANLINE_RO_B 0x5f544
+#define PIPEDMC_SCANLINE_RO(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINE_RO_A, _PIPEDMC_SCANLINE_RO_B)
+
+#define _PIPEDMC_FPQ_CTL1_A 0x5f160
+#define _PIPEDMC_FPQ_CTL1_B 0x5f560
+#define PIPEDMC_FPQ_CTL1(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_CTL1_A, _PIPEDMC_FPQ_CTL1_B)
+#define PIPEDMC_SW_DMC_WAKE REG_BIT(0)
+
+#define _PIPEDMC_FPQ_CTL2_A 0x5f164
+#define _PIPEDMC_FPQ_CTL2_B 0x5f564
+#define PIPEDMC_FPQ_CTL2(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_CTL2_A, _PIPEDMC_FPQ_CTL2_B)
+#define PIPEDMC_DMC_INT_AT_DELAYED_VBLANK REG_BIT(1)
+#define PIPEDMC_W1_DMC_WAKE REG_BIT(0)
+
+#define _PIPEDMC_INTERRUPT_A 0x5f190 /* lnl+ */
+#define _PIPEDMC_INTERRUPT_B 0x5f590 /* lnl+ */
+#define PIPEDMC_INTERRUPT(pipe) _MMIO_PIPE((pipe), _PIPEDMC_INTERRUPT_A, _PIPEDMC_INTERRUPT_B)
+#define _PIPEDMC_INTERRUPT_MASK_A 0x5f194 /* lnl+ */
+#define _PIPEDMC_INTERRUPT_MASK_B 0x5f594 /* lnl+ */
+#define PIPEDMC_INTERRUPT_MASK(pipe) _MMIO_PIPE((pipe), _PIPEDMC_INTERRUPT_MASK_A, _PIPEDMC_INTERRUPT_MASK_B)
+#define PIPEDMC_FLIPQ_PROG_DONE REG_BIT(3)
+#define PIPEDMC_ERROR REG_BIT(2)
+#define PIPEDMC_GTT_FAULT REG_BIT(1)
+#define PIPEDMC_ATS_FAULT REG_BIT(0)
#define PIPEDMC_BLOCK_PKGC_SW_A 0x5f1d0
#define PIPEDMC_BLOCK_PKGC_SW_B 0x5F5d0
@@ -71,12 +496,7 @@
#define DMC_EVT_CTL_TYPE_LEVEL_1 1
#define DMC_EVT_CTL_TYPE_EDGE_1_0 2
#define DMC_EVT_CTL_TYPE_EDGE_0_1 3
-
#define DMC_EVT_CTL_EVENT_ID_MASK REG_GENMASK(15, 8)
-#define DMC_EVT_CTL_EVENT_ID_FALSE 0x01
-#define DMC_EVT_CTL_EVENT_ID_VBLANK_A 0x32 /* main DMC */
-/* An event handler scheduled to run at a 1 kHz frequency. */
-#define DMC_EVT_CTL_EVENT_ID_CLK_MSEC 0xbf
#define DMC_HTP_ADDR_SKL 0x00500034
#define DMC_SSP_BASE _MMIO(0x8F074)
@@ -117,4 +537,51 @@
#define DMC_WAKELOCK_CTL_REQ REG_BIT(31)
#define DMC_WAKELOCK_CTL_ACK REG_BIT(15)
+#define DMC_FQ_W2_PTS_CFG_SEL _MMIO(0x8f240)
+#define PIPE_D_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(26, 24)
+#define PIPE_D_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_D_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe))
+#define PIPE_C_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(18, 16)
+#define PIPE_C_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_C_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe))
+#define PIPE_B_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(10, 8)
+#define PIPE_B_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_B_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe))
+#define PIPE_A_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(2, 0)
+#define PIPE_A_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_A_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe))
+
+/* plane/general flip queue entries */
+#define PIPEDMC_FQ_RAM(start_mmioaddr, i) _MMIO((start_mmioaddr) + (i) * 4)
+/* LNL */
+/* DW0 pts */
+/* DW1 head */
+/* DW2 size/etc. */
+#define LNL_FQ_INTERRUPT REG_BIT(31)
+#define LNL_FQ_DSB_ID_MASK REG_GENMASK(30, 29)
+#define LNL_FQ_DSB_ID(dsb_id) REG_FIELD_PREP(LNL_FQ_DSB_ID_MASK, (dsb_id))
+#define LNL_FQ_EXECUTED REG_BIT(28)
+#define LNL_FQ_DSB_SIZE_MASK REG_GENMASK(15, 0)
+#define LNL_FQ_DSB_SIZE(size) REG_FIELD_PREP(LNL_FQ_DSB_SIZE_MASK, (size))
+/* DW3 reserved (plane queues) */
+/* DW3 second DSB head (general queue) */
+/* DW4 second DSB size/etc. (general queue) */
+/* DW5 reserved (general queue) */
+
+/* PTL+ */
+/* DW0 pts */
+/* DW1 reserved */
+/* DW2 size/etc. */
+#define PTL_FQ_INTERRUPT REG_BIT(31)
+#define PTL_FQ_NEED_PUSH REG_BIT(30)
+#define PTL_FQ_BLOCK_PUSH REG_BIT(29)
+#define PTL_FQ_EXECUTED REG_BIT(28)
+#define PTL_FQ_DSB_ID_MASK REG_GENMASK(25, 24)
+#define PTL_FQ_DSB_ID(dsb_id) REG_FIELD_PREP(PTL_FQ_DSB_ID_MASK, (dsb_id))
+#define PTL_FQ_DSB_SIZE_MASK REG_GENMASK(15, 0)
+#define PTL_FQ_DSB_SIZE(size) REG_FIELD_PREP(PTL_FQ_DSB_SIZE_MASK, (size))
+/* DW3 head */
+/* DW4 second DSB size/etc. (general queue) */
+/* DW5 second DSB head (general queue) */
+
+/* undocumented magic DMC variables */
+#define PTL_PIPEDMC_EXEC_TIME_LINES(start_mmioaddr) _MMIO((start_mmioaddr) + 0x6b8)
+#define PTL_PIPEDMC_END_OF_EXEC_GB(start_mmioaddr) _MMIO((start_mmioaddr) + 0x6c0)
+
#endif /* __INTEL_DMC_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
index 7e2ce0c2f6c3..b3bb89ba34f9 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
@@ -7,9 +7,8 @@
#include <drm/drm_print.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_dmc_regs.h"
#include "intel_dmc_wl.h"
@@ -155,12 +154,11 @@ static const struct intel_dmc_wl_range xe3lpd_dc3co_dmc_ranges[] = {
static void __intel_dmc_wl_release(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_dmc_wl *wl = &display->wl;
WARN_ON(refcount_read(&wl->refcount));
- queue_delayed_work(i915->unordered_wq, &wl->work,
+ queue_delayed_work(display->wq.unordered, &wl->work,
msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME));
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 724de7ed3c04..7976fec88606 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -27,6 +27,8 @@
#include <linux/export.h>
#include <linux/i2c.h>
+#include <linux/log2.h>
+#include <linux/math.h>
#include <linux/notifier.h>
#include <linux/seq_buf.h>
#include <linux/slab.h>
@@ -34,7 +36,6 @@
#include <linux/string_helpers.h>
#include <linux/timekeeping.h>
#include <linux/types.h>
-
#include <asm/byteorder.h>
#include <drm/display/drm_dp_helper.h>
@@ -49,8 +50,6 @@
#include <drm/drm_probe_helper.h>
#include "g4x_dp.h"
-#include "i915_irq.h"
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
@@ -64,6 +63,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -847,7 +847,7 @@ small_joiner_ram_size_bits(struct intel_display *display)
return 6144 * 8;
}
-u32 intel_dp_dsc_nearest_valid_bpp(struct intel_display *display, u32 bpp, u32 pipe_bpp)
+static u32 intel_dp_dsc_nearest_valid_bpp(struct intel_display *display, u32 bpp, u32 pipe_bpp)
{
u32 bits_per_pixel = bpp;
int i;
@@ -939,6 +939,7 @@ static u32 ultrajoiner_ram_max_bpp(u32 mode_hdisplay)
return ultrajoiner_ram_bits() / mode_hdisplay;
}
+/* TODO: return a bpp_x16 value */
static
u32 get_max_compressed_bpp_with_joiner(struct intel_display *display,
u32 mode_clock, u32 mode_hdisplay,
@@ -955,6 +956,7 @@ u32 get_max_compressed_bpp_with_joiner(struct intel_display *display,
return max_bpp;
}
+/* TODO: return a bpp_x16 value */
u16 intel_dp_dsc_get_max_compressed_bpp(struct intel_display *display,
u32 link_clock, u32 lane_count,
u32 mode_clock, u32 mode_hdisplay,
@@ -1195,7 +1197,7 @@ intel_dp_output_format(struct intel_connector *connector,
int intel_dp_min_bpp(enum intel_output_format output_format)
{
if (output_format == INTEL_OUTPUT_FORMAT_RGB)
- return 6 * 3;
+ return intel_display_min_pipe_bpp();
else
return 8 * 3;
}
@@ -2073,7 +2075,7 @@ int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector
pipe_config, bpc) >> 4;
}
-static int dsc_src_min_compressed_bpp(void)
+int intel_dp_dsc_min_src_compressed_bpp(void)
{
/* Min Compressed bpp supported by source is 8 */
return 8;
@@ -2105,7 +2107,7 @@ static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp)
/*
* Note: for pre-13 display you still need to check the validity of each step.
*/
-static int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector)
+int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
u8 incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
@@ -2113,12 +2115,19 @@ static int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector)
if (DISPLAY_VER(display) < 14 || !incr)
return fxp_q4_from_int(1);
+ if (connector->mst.dp &&
+ !connector->link.force_bpp_x16 && !connector->mst.dp->force_dsc_fractional_bpp_en)
+ return fxp_q4_from_int(1);
+
/* fxp q4 */
return fxp_q4_from_int(1) / incr;
}
-/* Note: This is not universally usable! */
-static bool intel_dp_dsc_valid_bpp(struct intel_dp *intel_dp, int bpp_x16)
+/*
+ * Note: for bpp_x16 to be valid it must be also within the source/sink's
+ * min..max bpp capability range.
+ */
+bool intel_dp_dsc_valid_compressed_bpp(struct intel_dp *intel_dp, int bpp_x16)
{
struct intel_display *display = to_intel_display(intel_dp);
int i;
@@ -2156,24 +2165,16 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
const struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int output_bpp;
- int dsc_min_bpp;
- int dsc_max_bpp;
int min_bpp_x16, max_bpp_x16, bpp_step_x16;
int dsc_joiner_max_bpp;
int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
int bpp_x16;
int ret;
- dsc_min_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
-
dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(display, adjusted_mode->clock,
adjusted_mode->hdisplay,
num_joined_pipes);
- dsc_max_bpp = min(dsc_joiner_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
-
- /* FIXME: remove the round trip via integers */
- min_bpp_x16 = fxp_q4_from_int(dsc_min_bpp);
- max_bpp_x16 = fxp_q4_from_int(dsc_max_bpp);
+ max_bpp_x16 = min(fxp_q4_from_int(dsc_joiner_max_bpp), limits->link.max_bpp_x16);
bpp_step_x16 = intel_dp_dsc_bpp_step_x16(connector);
@@ -2181,8 +2182,12 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
output_bpp = intel_dp_output_bpp(pipe_config->output_format, pipe_bpp);
max_bpp_x16 = min(max_bpp_x16, fxp_q4_from_int(output_bpp) - bpp_step_x16);
+ drm_WARN_ON(display->drm, !is_power_of_2(bpp_step_x16));
+ min_bpp_x16 = round_up(limits->link.min_bpp_x16, bpp_step_x16);
+ max_bpp_x16 = round_down(max_bpp_x16, bpp_step_x16);
+
for (bpp_x16 = max_bpp_x16; bpp_x16 >= min_bpp_x16; bpp_x16 -= bpp_step_x16) {
- if (!intel_dp_dsc_valid_bpp(intel_dp, bpp_x16))
+ if (!intel_dp_dsc_valid_compressed_bpp(intel_dp, bpp_x16))
continue;
ret = dsc_compute_link_config(intel_dp,
@@ -2485,7 +2490,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
- dsc_src_min_bpp = dsc_src_min_compressed_bpp();
+ dsc_src_min_bpp = intel_dp_dsc_min_src_compressed_bpp();
dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
limits->link.min_bpp_x16 = fxp_q4_from_int(dsc_min_bpp);
@@ -3727,6 +3732,9 @@ static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
+ if (!drm_dp_is_branch(intel_dp->dpcd))
+ return;
+
if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
intel_dp->pcon_dsc_dpcd,
sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
@@ -5791,6 +5799,28 @@ intel_dp_detect_sdp_caps(struct intel_dp *intel_dp)
drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
}
+static bool intel_dp_needs_dpcd_probe(struct intel_dp *intel_dp, bool force_on_external)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ if (intel_dp_is_edp(intel_dp))
+ return false;
+
+ if (force_on_external)
+ return true;
+
+ if (intel_dp->is_mst)
+ return false;
+
+ return drm_edid_has_quirk(&connector->base, DRM_EDID_QUIRK_DP_DPCD_PROBE);
+}
+
+void intel_dp_dpcd_set_probe(struct intel_dp *intel_dp, bool force_on_external)
+{
+ drm_dp_dpcd_set_probe(&intel_dp->aux,
+ intel_dp_needs_dpcd_probe(intel_dp, force_on_external));
+}
+
static int
intel_dp_detect(struct drm_connector *_connector,
struct drm_modeset_acquire_ctx *ctx,
@@ -5919,6 +5949,8 @@ out_unset_edid:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
+ intel_dp_dpcd_set_probe(intel_dp, false);
+
if (!intel_dp_is_edp(intel_dp))
drm_dp_set_subconnector_property(&connector->base,
status,
@@ -5949,6 +5981,8 @@ intel_dp_force(struct drm_connector *_connector)
return;
intel_dp_set_edid(intel_dp);
+
+ intel_dp_dpcd_set_probe(intel_dp, false);
}
static int intel_dp_get_modes(struct drm_connector *_connector)
@@ -6321,10 +6355,11 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
* complete the DP tunnel BW request for the latter connector/encoder
* waiting for this encoder's DPRX read, perform a dummy read here.
*/
- if (long_hpd)
+ if (long_hpd) {
+ intel_dp_dpcd_set_probe(intel_dp, true);
+
intel_dp_read_dprx_caps(intel_dp, dpcd);
- if (long_hpd) {
intel_dp->reset_link_params = true;
intel_dp_invalidate_source_oui(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 742ae26ac4a9..0657f5681196 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -147,6 +147,7 @@ int intel_dp_dsc_sink_min_compressed_bpp(const struct intel_crtc_state *pipe_con
int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
const struct intel_crtc_state *pipe_config,
int bpc);
+bool intel_dp_dsc_valid_compressed_bpp(struct intel_dp *intel_dp, int bpp_x16);
u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
int num_joined_pipes);
@@ -173,8 +174,6 @@ bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state);
-u32 intel_dp_dsc_nearest_valid_bpp(struct intel_display *display, u32 bpp, u32 pipe_bpp);
-
void intel_ddi_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -209,7 +208,11 @@ bool intel_dp_has_connector(struct intel_dp *intel_dp,
const struct drm_connector_state *conn_state);
int intel_dp_dsc_max_src_input_bpc(struct intel_display *display);
int intel_dp_dsc_min_src_input_bpc(void);
+int intel_dp_dsc_min_src_compressed_bpp(void);
int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
+int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector);
+void intel_dp_dpcd_set_probe(struct intel_dp *intel_dp, bool force_on_external);
+
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index bf8e8e0cc19c..829a7c0fbe4f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -5,7 +5,6 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -835,6 +834,8 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux.transfer = intel_dp_aux_transfer;
cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ intel_dp_dpcd_set_probe(intel_dp, true);
}
static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 271b27c9de51..41228478b21c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -145,7 +145,7 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
* ranges for such panels.
*/
if (display->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
- !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
+ !(connector->base.display_info.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
drm_info(display->drm,
"[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d.\n",
@@ -475,31 +475,6 @@ static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector, en
return connector->panel.backlight.level;
}
-static int
-intel_dp_aux_vesa_set_luminance(struct intel_connector *connector, u32 level)
-{
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- u8 buf[3];
- int ret;
-
- level = level * 1000;
- level &= 0xffffff;
- buf[0] = (level & 0x0000ff);
- buf[1] = (level & 0x00ff00) >> 8;
- buf[2] = (level & 0xff0000) >> 16;
-
- ret = drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE,
- buf, sizeof(buf));
- if (ret != sizeof(buf)) {
- drm_err(intel_dp->aux.drm_dev,
- "%s: Failed to set VESA Aux Luminance: %d\n",
- intel_dp->aux.name, ret);
- return -EINVAL;
- } else {
- return 0;
- }
-}
-
static void
intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
@@ -507,11 +482,6 @@ intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u3
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- if (panel->backlight.edp.vesa.luminance_control_support) {
- if (!intel_dp_aux_vesa_set_luminance(connector, level))
- return;
- }
-
if (!panel->backlight.edp.vesa.info.aux_set) {
const u32 pwm_level = intel_backlight_level_to_pwm(connector, level);
@@ -528,18 +498,6 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- int ret;
-
- if (panel->backlight.edp.vesa.luminance_control_support) {
- ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
- DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE);
-
- if (ret == 1)
- return;
-
- if (!intel_dp_aux_vesa_set_luminance(connector, level))
- return;
- }
if (!panel->backlight.edp.vesa.info.aux_enable) {
u32 pwm_level;
@@ -580,13 +538,41 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
&connector->base.display_info.luminance_range;
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_panel *panel = &connector->panel;
- u16 current_level;
+ u32 current_level;
u8 current_mode;
int ret;
- if (panel->backlight.edp.vesa.luminance_control_support) {
+ ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info,
+ luminance_range->max_luminance,
+ panel->vbt.backlight.pwm_freq_hz,
+ intel_dp->edp_dpcd, &current_level, &current_mode,
+ false);
+ if (ret < 0)
+ return ret;
+
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
+
+ if (!panel->backlight.edp.vesa.info.aux_set ||
+ !panel->backlight.edp.vesa.info.aux_enable) {
+ ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+ if (ret < 0) {
+ drm_err(display->drm,
+ "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n",
+ connector->base.base.id, connector->base.name, ret);
+ return ret;
+ }
+ }
+
+ if (panel->backlight.edp.vesa.info.luminance_set) {
if (luminance_range->max_luminance) {
- panel->backlight.max = luminance_range->max_luminance;
+ panel->backlight.max = panel->backlight.edp.vesa.info.max;
panel->backlight.min = luminance_range->min_luminance;
} else {
panel->backlight.max = 512;
@@ -597,54 +583,26 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] AUX VESA Nits backlight level is controlled through DPCD\n",
connector->base.base.id, connector->base.name);
- } else {
- ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info,
- panel->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd,
- &current_level, &current_mode);
- if (ret < 0)
- return ret;
-
- drm_dbg_kms(display->drm,
- "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
- connector->base.base.id, connector->base.name,
- dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
- drm_dbg_kms(display->drm,
- "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
- connector->base.base.id, connector->base.name,
- dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
-
- if (!panel->backlight.edp.vesa.info.aux_set ||
- !panel->backlight.edp.vesa.info.aux_enable) {
- ret = panel->backlight.pwm_funcs->setup(connector, pipe);
- if (ret < 0) {
- drm_err(display->drm,
- "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n",
- connector->base.base.id, connector->base.name, ret);
- return ret;
- }
+ } else if (panel->backlight.edp.vesa.info.aux_set) {
+ panel->backlight.max = panel->backlight.edp.vesa.info.max;
+ panel->backlight.min = 0;
+ if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
+ panel->backlight.level = current_level;
+ panel->backlight.enabled = panel->backlight.level != 0;
+ } else {
+ panel->backlight.level = panel->backlight.max;
+ panel->backlight.enabled = false;
}
-
- if (panel->backlight.edp.vesa.info.aux_set) {
- panel->backlight.max = panel->backlight.edp.vesa.info.max;
- panel->backlight.min = 0;
- if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
- panel->backlight.level = current_level;
- panel->backlight.enabled = panel->backlight.level != 0;
- } else {
- panel->backlight.level = panel->backlight.max;
- panel->backlight.enabled = false;
- }
+ } else {
+ panel->backlight.max = panel->backlight.pwm_level_max;
+ panel->backlight.min = panel->backlight.pwm_level_min;
+ if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) {
+ panel->backlight.level =
+ panel->backlight.pwm_funcs->get(connector, pipe);
+ panel->backlight.enabled = panel->backlight.pwm_enabled;
} else {
- panel->backlight.max = panel->backlight.pwm_level_max;
- panel->backlight.min = panel->backlight.pwm_level_min;
- if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) {
- panel->backlight.level =
- panel->backlight.pwm_funcs->get(connector, pipe);
- panel->backlight.enabled = panel->backlight.pwm_enabled;
- } else {
- panel->backlight.level = panel->backlight.max;
- panel->backlight.enabled = false;
- }
+ panel->backlight.level = panel->backlight.max;
+ panel->backlight.enabled = false;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index cc312596fb77..bd757db85927 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -11,9 +11,9 @@
#include <drm/display/drm_hdcp_helper.h>
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
@@ -805,10 +805,16 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
enum pipe pipe = (enum pipe)cpu_transcoder;
enum port port = dig_port->base.port;
int ret;
-
- drm_WARN_ON(display->drm, enable &&
- !!(intel_de_read(display, HDCP2_AUTH_STREAM(display, cpu_transcoder, port))
- & AUTH_STREAM_TYPE) != data->streams[0].stream_type);
+ u32 val;
+ u8 stream_type;
+
+ if (DISPLAY_VER(display) < 30) {
+ val = intel_de_read(display,
+ HDCP2_AUTH_STREAM(display, cpu_transcoder, port));
+ stream_type = REG_FIELD_GET(AUTH_STREAM_TYPE_MASK, val);
+ drm_WARN_ON(display->drm, enable &&
+ stream_type != data->streams[0].stream_type);
+ }
ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
if (ret)
@@ -824,6 +830,14 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
return -ETIMEDOUT;
}
+ if (DISPLAY_VER(display) >= 30) {
+ val = intel_de_read(display,
+ HDCP2_STREAM_STATUS(display, cpu_transcoder, port));
+ stream_type = REG_FIELD_GET(STREAM_TYPE_STATUS_MASK, val);
+ drm_WARN_ON(display->drm, enable &&
+ stream_type != data->streams[0].stream_type);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index e1501b13f08f..74497c9a0554 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -23,6 +23,9 @@
*
*/
+#include <linux/log2.h>
+#include <linux/math.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
@@ -30,7 +33,6 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -39,6 +41,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
@@ -135,6 +138,7 @@ static bool intel_dp_mst_inc_active_streams(struct intel_dp *intel_dp)
return intel_dp->mst.active_streams++ == 0;
}
+/* TODO: return a bpp_x16 value */
static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
bool dsc)
{
@@ -241,6 +245,15 @@ static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connec
num_joined_pipes);
}
+static void mst_stream_update_slots(const struct intel_crtc_state *crtc_state,
+ struct drm_dp_mst_topology_state *topology_state)
+{
+ u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
+ DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B;
+
+ drm_dp_mst_update_slots(topology_state, link_coding_cap);
+}
+
int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
@@ -263,6 +276,12 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
fxp_q4_to_frac(max_bpp_x16) ||
fxp_q4_to_frac(bpp_step_x16)));
+ if (!bpp_step_x16) {
+ /* Allow using zero step only to indicate single try for a given bpp. */
+ drm_WARN_ON(display->drm, min_bpp_x16 != max_bpp_x16);
+ bpp_step_x16 = 1;
+ }
+
if (is_mst) {
mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst.mgr);
if (IS_ERR(mst_state))
@@ -270,6 +289,8 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
mst_state->pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
crtc_state->lane_count);
+
+ mst_stream_update_slots(crtc_state, mst_state);
}
if (dsc) {
@@ -298,12 +319,20 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
}
}
+ drm_WARN_ON(display->drm, min_bpp_x16 % bpp_step_x16 || max_bpp_x16 % bpp_step_x16);
+
for (bpp_x16 = max_bpp_x16; bpp_x16 >= min_bpp_x16; bpp_x16 -= bpp_step_x16) {
int local_bw_overhead;
int link_bpp_x16;
drm_dbg_kms(display->drm, "Trying bpp " FXP_Q4_FMT "\n", FXP_Q4_ARGS(bpp_x16));
+ if (dsc && !intel_dp_dsc_valid_compressed_bpp(intel_dp, bpp_x16)) {
+ /* SST must have validated the single bpp tried here already earlier. */
+ drm_WARN_ON(display->drm, !is_mst);
+ continue;
+ }
+
link_bpp_x16 = dsc ? bpp_x16 :
fxp_q4_from_int(intel_dp_output_bpp(crtc_state->output_format,
fxp_q4_to_int(bpp_x16)));
@@ -367,6 +396,10 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst.mgr,
connector->mst.port,
dfixed_trunc(pbn));
+
+ /* TODO: Check this already in drm_dp_atomic_find_time_slots(). */
+ if (slots > mst_state->total_avail_slots)
+ slots = -EINVAL;
} else {
/* Same as above for remote_tu */
crtc_state->dp_m_n.tu = ALIGN(crtc_state->dp_m_n.tu,
@@ -386,10 +419,6 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
break;
}
-
- /* Allow using zero step to indicate one try */
- if (!bpp_step_x16)
- break;
}
if (slots < 0) {
@@ -437,7 +466,8 @@ static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
int num_bpc;
u8 dsc_bpc[3] = {};
int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
- int min_compressed_bpp, max_compressed_bpp;
+ int min_compressed_bpp_x16, max_compressed_bpp_x16;
+ int bpp_step_x16;
max_bpp = limits->pipe.max_bpp;
min_bpp = limits->pipe.min_bpp;
@@ -462,46 +492,28 @@ static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
crtc_state->pipe_bpp = max_bpp;
- max_compressed_bpp = fxp_q4_to_int(limits->link.max_bpp_x16);
- min_compressed_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
+ min_compressed_bpp_x16 = limits->link.min_bpp_x16;
+ max_compressed_bpp_x16 = limits->link.max_bpp_x16;
- drm_dbg_kms(display->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
- min_compressed_bpp, max_compressed_bpp);
+ drm_dbg_kms(display->drm,
+ "DSC Sink supported compressed min bpp " FXP_Q4_FMT " compressed max bpp " FXP_Q4_FMT "\n",
+ FXP_Q4_ARGS(min_compressed_bpp_x16), FXP_Q4_ARGS(max_compressed_bpp_x16));
- /* Align compressed bpps according to our own constraints */
- max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, max_compressed_bpp,
- crtc_state->pipe_bpp);
- min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, min_compressed_bpp,
- crtc_state->pipe_bpp);
+ bpp_step_x16 = intel_dp_dsc_bpp_step_x16(connector);
+
+ max_compressed_bpp_x16 = min(max_compressed_bpp_x16, fxp_q4_from_int(crtc_state->pipe_bpp) - bpp_step_x16);
+
+ drm_WARN_ON(display->drm, !is_power_of_2(bpp_step_x16));
+ min_compressed_bpp_x16 = round_up(min_compressed_bpp_x16, bpp_step_x16);
+ max_compressed_bpp_x16 = round_down(max_compressed_bpp_x16, bpp_step_x16);
crtc_state->lane_count = limits->max_lane_count;
crtc_state->port_clock = limits->max_rate;
return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state, conn_state,
- fxp_q4_from_int(min_compressed_bpp),
- fxp_q4_from_int(max_compressed_bpp),
- fxp_q4_from_int(1), true);
-}
-
-static int mst_stream_update_slots(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct intel_display *display = to_intel_display(intel_dp);
- struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst.mgr;
- struct drm_dp_mst_topology_state *topology_state;
- u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
- DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B;
-
- topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr);
- if (IS_ERR(topology_state)) {
- drm_dbg_kms(display->drm, "slot update failed\n");
- return PTR_ERR(topology_state);
- }
-
- drm_dp_mst_update_slots(topology_state, link_coding_cap);
-
- return 0;
+ min_compressed_bpp_x16,
+ max_compressed_bpp_x16,
+ bpp_step_x16, true);
}
static int mode_hblank_period_ns(const struct drm_display_mode *mode)
@@ -709,10 +721,6 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- ret = mst_stream_update_slots(intel_dp, pipe_config, conn_state);
- if (ret)
- return ret;
-
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.c b/drivers/gpu/drm/i915/display/intel_dp_test.c
index bd61f3c3ec91..6ed5012c5fac 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_test.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_test.c
@@ -10,9 +10,9 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 69f242139420..3f77ad92c156 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -21,13 +21,15 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <drm/drm_print.h>
+
#include "bxt_dpio_phy_regs.h"
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
@@ -426,7 +428,7 @@ static void _bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy)
* use 1ms due to occasional timeouts observed with that.
*/
if (intel_de_wait_fw(display, BXT_PORT_CL1CM_DW0(phy),
- PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1))
+ PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1, NULL))
drm_err(display->drm, "timeout during PHY%d power on\n",
phy);
@@ -715,53 +717,53 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
u32 val;
int i;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Clear calc init */
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW10(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW10(ch), val);
}
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW9(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW9(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW9(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW9(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW9(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW9(ch), val);
}
/* Program swing deemph */
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i));
+ val = vlv_dpio_read(display->drm, phy, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
val |= DPIO_SWING_DEEMPH9P5(deemph_reg_value);
- vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val);
+ vlv_dpio_write(display->drm, phy, CHV_TX_DW4(ch, i), val);
}
/* Program swing margin */
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i));
+ val = vlv_dpio_read(display->drm, phy, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN000_MASK;
val |= DPIO_SWING_MARGIN000(margin_reg_value);
@@ -774,7 +776,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
val &= ~DPIO_UNIQ_TRANS_SCALE_MASK;
val |= DPIO_UNIQ_TRANS_SCALE(0x9a);
- vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val);
+ vlv_dpio_write(display->drm, phy, CHV_TX_DW2(ch, i), val);
}
/*
@@ -784,70 +786,70 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
* 27 for ch0 and ch1.
*/
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW3(ch, i));
+ val = vlv_dpio_read(display->drm, phy, CHV_TX_DW3(ch, i));
if (uniq_trans_scale)
val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
else
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
- vlv_dpio_write(dev_priv, phy, CHV_TX_DW3(ch, i), val);
+ vlv_dpio_write(display->drm, phy, CHV_TX_DW3(ch, i), val);
}
/* Start swing calculation */
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW10(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW10(ch), val);
}
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
static void __chv_data_lane_soft_reset(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
bool reset)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
u32 val;
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW0(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW0(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW0(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW0(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW0(ch), val);
}
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW1(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW1(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW1(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW1(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW1(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW1(ch), val);
}
}
@@ -855,11 +857,11 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
bool reset)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- vlv_dpio_get(i915);
+ vlv_dpio_get(display->drm);
__chv_data_lane_soft_reset(encoder, crtc_state, reset);
- vlv_dpio_put(i915);
+ vlv_dpio_put(display->drm);
}
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
@@ -867,7 +869,6 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
@@ -886,47 +887,47 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
chv_phy_powergate_lanes(encoder, true, lane_mask);
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Assert data lane reset */
__chv_data_lane_soft_reset(encoder, crtc_state, true);
/* program left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW1_CH1, val);
}
/* program clock channel usage */
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW8(ch));
val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe == PIPE_B)
val |= DPIO_PCS_USEDCLKCHANNEL;
else
val &= ~DPIO_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW8(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW8(ch));
val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe == PIPE_B)
val |= DPIO_PCS_USEDCLKCHANNEL;
else
val &= ~DPIO_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW8(ch), val);
}
/*
@@ -934,38 +935,38 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
* matches the pipe, but here we need to
* pick the CL based on the port.
*/
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch));
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW19(ch));
if (pipe == PIPE_B)
val |= CHV_CMN_USEDCLKCHANNEL;
else
val &= ~CHV_CMN_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW19(ch), val);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
int data, i, stagger;
u32 val;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* allow hardware to manage TX FIFO reset source */
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW11(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW11(ch), val);
}
/* Program Tx lane latency optimal setting*/
@@ -975,7 +976,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
data = 0;
else
data = (i == 1) ? 0 : DPIO_UPAR;
- vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i), data);
+ vlv_dpio_write(display->drm, phy, CHV_TX_DW14(ch, i), data);
}
/* Data lane stagger programming */
@@ -990,17 +991,17 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
else
stagger = 0x2;
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS01_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW11(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
+ val = vlv_dpio_read(display->drm, phy, VLV_PCS23_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW11(ch), val);
}
- vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW12(ch),
+ vlv_dpio_write(display->drm, phy, VLV_PCS01_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
@@ -1008,7 +1009,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
DPIO_TX2_STAGGER_MULT(0));
if (crtc_state->lane_count > 2) {
- vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW12(ch),
+ vlv_dpio_write(display->drm, phy, VLV_PCS23_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
@@ -1019,7 +1020,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
/* Deassert data lane reset */
__chv_data_lane_soft_reset(encoder, crtc_state, false);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
@@ -1036,25 +1037,25 @@ void chv_phy_release_cl2_override(struct intel_encoder *encoder)
void chv_phy_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
u32 val;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW1_CH1, val);
}
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
/*
* Leave the power down bit cleared for at least one
@@ -1073,97 +1074,97 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
u32 demph_reg_value, u32 preemph_reg_value,
u32 uniqtranscale_reg_value, u32 tx3_demph)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), 0x00000000);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW4_GRP(ch), demph_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW2_GRP(ch),
- uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW3_GRP(ch), 0x0C782040);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW5_GRP(ch), 0x00000000);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW4_GRP(ch), demph_reg_value);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW2_GRP(ch),
+ uniqtranscale_reg_value);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW3_GRP(ch), 0x0C782040);
if (tx3_demph)
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(ch, 3), tx3_demph);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW4(ch, 3), tx3_demph);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11_GRP(ch), 0x00030000);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9_GRP(ch), preemph_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), DPIO_TX_OCALINIT_EN);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW11_GRP(ch), 0x00030000);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW9_GRP(ch), preemph_reg_value);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW5_GRP(ch), DPIO_TX_OCALINIT_EN);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
/* Program Tx lane resets to default */
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch),
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW0_GRP(ch),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch),
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW1_GRP(ch),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
DPIO_PCS_CLK_DATAWIDTH_8_10 |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12_GRP(ch), 0x00750f00);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW11_GRP(ch), 0x00001500);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW14_GRP(ch), 0x40400000);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW12_GRP(ch), 0x00750f00);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW11_GRP(ch), 0x00001500);
+ vlv_dpio_write(display->drm, phy, VLV_TX_DW14_GRP(ch), 0x40400000);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
enum pipe pipe = crtc->pipe;
u32 val;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Enable clock channels for this port */
val = DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe == PIPE_B)
val |= DPIO_PCS_USEDCLKCHANNEL;
val |= 0xc4;
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8_GRP(ch), val);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW8_GRP(ch), val);
/* Program lane clock */
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14_GRP(ch), 0x00760018);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23_GRP(ch), 0x00400888);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW14_GRP(ch), 0x00760018);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW23_GRP(ch), 0x00400888);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
- vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch), 0x00000000);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch), 0x00e00060);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_get(display->drm);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW0_GRP(ch), 0x00000000);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW1_GRP(ch), 0x00e00060);
+ vlv_dpio_put(display->drm);
}
void vlv_wait_port_ready(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index a9e9b98d0bf9..f969c5399a51 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -6,13 +6,14 @@
#include <linux/kernel.h>
#include <linux/string_helpers.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include <drm/drm_print.h>
+
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_cx0_phy.h"
#include "intel_de.h"
#include "intel_display.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
@@ -24,11 +25,11 @@
#include "vlv_dpio_phy_regs.h"
#include "vlv_sideband.h"
-struct intel_dpll_funcs {
+struct intel_dpll_global_funcs {
int (*crtc_compute_clock)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
- int (*crtc_get_shared_dpll)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+ int (*crtc_get_dpll)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
};
struct intel_limit {
@@ -513,8 +514,8 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -526,9 +527,9 @@ void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
return;
- vlv_dpio_get(dev_priv);
- tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(ch));
- vlv_dpio_put(dev_priv);
+ vlv_dpio_get(display->drm);
+ tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW3(ch));
+ vlv_dpio_put(display->drm);
clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp);
clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp);
@@ -541,8 +542,8 @@ void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -554,13 +555,13 @@ void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
return;
- vlv_dpio_get(dev_priv);
- cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(ch));
- pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(ch));
- pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(ch));
- pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(ch));
- pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
- vlv_dpio_put(dev_priv);
+ vlv_dpio_get(display->drm);
+ cmn_dw13 = vlv_dpio_read(display->drm, phy, CHV_CMN_DW13(ch));
+ pll_dw0 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW0(ch));
+ pll_dw1 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW1(ch));
+ pll_dw2 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW2(ch));
+ pll_dw3 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW3(ch));
+ vlv_dpio_put(display->drm);
clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22;
@@ -1161,7 +1162,7 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
- ret = intel_compute_shared_dplls(state, crtc, encoder);
+ ret = intel_dpll_compute(state, crtc, encoder);
if (ret)
return ret;
@@ -1176,8 +1177,8 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
-static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int hsw_crtc_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
@@ -1189,7 +1190,7 @@ static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
- return intel_reserve_shared_dplls(state, crtc, encoder);
+ return intel_dpll_reserve(state, crtc, encoder);
}
static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
@@ -1223,7 +1224,7 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
if (ret)
return ret;
- /* TODO: Do the readback via intel_compute_shared_dplls() */
+ /* TODO: Do the readback via intel_dpll_compute() */
crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
@@ -1394,7 +1395,7 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
ilk_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
- ret = intel_compute_shared_dplls(state, crtc, NULL);
+ ret = intel_dpll_compute(state, crtc, NULL);
if (ret)
return ret;
@@ -1404,8 +1405,8 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
return ret;
}
-static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int ilk_crtc_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -1414,7 +1415,7 @@ static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
if (!crtc_state->has_pch_encoder)
return 0;
- return intel_reserve_shared_dplls(state, crtc, NULL);
+ return intel_dpll_reserve(state, crtc, NULL);
}
static u32 vlv_dpll(const struct intel_crtc_state *crtc_state)
@@ -1690,45 +1691,45 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
-static const struct intel_dpll_funcs mtl_dpll_funcs = {
+static const struct intel_dpll_global_funcs mtl_dpll_funcs = {
.crtc_compute_clock = mtl_crtc_compute_clock,
};
-static const struct intel_dpll_funcs dg2_dpll_funcs = {
+static const struct intel_dpll_global_funcs dg2_dpll_funcs = {
.crtc_compute_clock = dg2_crtc_compute_clock,
};
-static const struct intel_dpll_funcs hsw_dpll_funcs = {
+static const struct intel_dpll_global_funcs hsw_dpll_funcs = {
.crtc_compute_clock = hsw_crtc_compute_clock,
- .crtc_get_shared_dpll = hsw_crtc_get_shared_dpll,
+ .crtc_get_dpll = hsw_crtc_get_dpll,
};
-static const struct intel_dpll_funcs ilk_dpll_funcs = {
+static const struct intel_dpll_global_funcs ilk_dpll_funcs = {
.crtc_compute_clock = ilk_crtc_compute_clock,
- .crtc_get_shared_dpll = ilk_crtc_get_shared_dpll,
+ .crtc_get_dpll = ilk_crtc_get_dpll,
};
-static const struct intel_dpll_funcs chv_dpll_funcs = {
+static const struct intel_dpll_global_funcs chv_dpll_funcs = {
.crtc_compute_clock = chv_crtc_compute_clock,
};
-static const struct intel_dpll_funcs vlv_dpll_funcs = {
+static const struct intel_dpll_global_funcs vlv_dpll_funcs = {
.crtc_compute_clock = vlv_crtc_compute_clock,
};
-static const struct intel_dpll_funcs g4x_dpll_funcs = {
+static const struct intel_dpll_global_funcs g4x_dpll_funcs = {
.crtc_compute_clock = g4x_crtc_compute_clock,
};
-static const struct intel_dpll_funcs pnv_dpll_funcs = {
+static const struct intel_dpll_global_funcs pnv_dpll_funcs = {
.crtc_compute_clock = pnv_crtc_compute_clock,
};
-static const struct intel_dpll_funcs i9xx_dpll_funcs = {
+static const struct intel_dpll_global_funcs i9xx_dpll_funcs = {
.crtc_compute_clock = i9xx_crtc_compute_clock,
};
-static const struct intel_dpll_funcs i8xx_dpll_funcs = {
+static const struct intel_dpll_global_funcs i8xx_dpll_funcs = {
.crtc_compute_clock = i8xx_crtc_compute_clock,
};
@@ -1758,8 +1759,8 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
-int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+int intel_dpll_crtc_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
@@ -1767,15 +1768,15 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
int ret;
drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
- drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
+ drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->intel_dpll);
- if (!crtc_state->hw.enable || crtc_state->shared_dpll)
+ if (!crtc_state->hw.enable || crtc_state->intel_dpll)
return 0;
- if (!display->funcs.dpll->crtc_get_shared_dpll)
+ if (!display->funcs.dpll->crtc_get_dpll)
return 0;
- ret = display->funcs.dpll->crtc_get_shared_dpll(state, crtc);
+ ret = display->funcs.dpll->crtc_get_dpll(state, crtc);
if (ret) {
drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
crtc->base.base.id, crtc->base.name);
@@ -1871,45 +1872,43 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
static void vlv_pllb_recal_opamp(struct intel_display *display,
enum dpio_phy phy, enum dpio_channel ch)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
/*
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
- tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
+ tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW17(ch));
tmp &= 0xffffff00;
tmp |= 0x00000030;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW17(ch), tmp);
- tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
+ tmp = vlv_dpio_read(display->drm, phy, VLV_REF_DW11);
tmp &= 0x00ffffff;
tmp |= 0x8c000000;
- vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
+ vlv_dpio_write(display->drm, phy, VLV_REF_DW11, tmp);
- tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
+ tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW17(ch));
tmp &= 0xffffff00;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW17(ch), tmp);
- tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
+ tmp = vlv_dpio_read(display->drm, phy, VLV_REF_DW11);
tmp &= 0x00ffffff;
tmp |= 0xb0000000;
- vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
+ vlv_dpio_write(display->drm, phy, VLV_REF_DW11, tmp);
}
static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct dpll *clock = &crtc_state->dpll;
enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
enum pipe pipe = crtc->pipe;
u32 tmp, coreclk;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* See eDP HDMI DPIO driver vbios notes doc */
@@ -1918,15 +1917,15 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
vlv_pllb_recal_opamp(display, phy, ch);
/* Set up Tx target for periodic Rcomp update */
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
+ vlv_dpio_write(display->drm, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
/* Disable target IRef on PLL */
- tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW16(ch));
+ tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW16(ch));
tmp &= 0x00ffffff;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW16(ch), tmp);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW16(ch), tmp);
/* Disable fast lock */
- vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610);
+ vlv_dpio_write(display->drm, phy, VLV_CMN_DW0, 0x610);
/* Set idtafcrecal before PLL is enabled */
tmp = DPIO_M1_DIV(clock->m1) |
@@ -1942,48 +1941,42 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
* Note: don't use the DAC post divider as it seems unstable.
*/
tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP);
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW3(ch), tmp);
tmp |= DPIO_ENABLE_CALIBRATION;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW3(ch), tmp);
/* Set HBR and RBR LPF coefficients */
if (crtc_state->port_clock == 162000 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
- 0x009f0003);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW18(ch), 0x009f0003);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
- 0x00d0000f);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW18(ch), 0x00d0000f);
if (intel_crtc_has_dp_encoder(crtc_state)) {
/* Use SSC source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
- 0x0df40000);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df40000);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
- 0x0df70000);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
- 0x0df70000);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df70000);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
- 0x0df40000);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df40000);
}
- coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(ch));
+ coreclk = vlv_dpio_read(display->drm, phy, VLV_PLL_DW7(ch));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_crtc_has_dp_encoder(crtc_state))
coreclk |= 0x01000000;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(ch), coreclk);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW7(ch), coreclk);
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW19(ch), 0x87871000);
+ vlv_dpio_write(display->drm, phy, VLV_PLL_DW19(ch), 0x87871000);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
@@ -2028,8 +2021,8 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct dpll *clock = &crtc_state->dpll;
enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
@@ -2038,44 +2031,44 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
m2_frac = clock->m2 & 0x3fffff;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* p1 and p2 divider */
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(ch),
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW13(ch),
DPIO_CHV_S1_DIV(5) |
DPIO_CHV_P1_DIV(clock->p1) |
DPIO_CHV_P2_DIV(clock->p2) |
DPIO_CHV_K_DIV(1));
/* Feedback post-divider - m2 */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(ch),
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW0(ch),
DPIO_CHV_M2_DIV(clock->m2 >> 22));
/* Feedback refclk divider - n and m1 */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(ch),
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW1(ch),
DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) |
DPIO_CHV_N_DIV(1));
/* M2 fraction division */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(ch),
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW2(ch),
DPIO_CHV_M2_FRAC_DIV(m2_frac));
/* M2 fraction division enable */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
+ tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW3(ch));
tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
tmp |= DPIO_CHV_FEEDFWD_GAIN(2);
if (m2_frac)
tmp |= DPIO_CHV_FRAC_DIV_EN;
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(ch), tmp);
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW3(ch), tmp);
/* Program digital lock detect threshold */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(ch));
+ tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW9(ch));
tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5);
if (!m2_frac)
tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(ch), tmp);
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW9(ch), tmp);
/* Loop filter */
if (clock->vco == 5400000) {
@@ -2100,40 +2093,39 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
DPIO_CHV_GAIN_CTRL(0x3);
tribuf_calcntr = 0;
}
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(ch), loopfilter);
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW6(ch), loopfilter);
- tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(ch));
+ tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW8(ch));
tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr);
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(ch), tmp);
+ vlv_dpio_write(display->drm, phy, CHV_PLL_DW8(ch), tmp);
/* AFC Recal */
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch),
- vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)) |
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch),
+ vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch)) |
DPIO_AFC_RECAL);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
enum pipe pipe = crtc->pipe;
u32 tmp;
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Enable back the 10bit clock to display controller */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
+ tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch));
tmp |= DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), tmp);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch), tmp);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
/*
* Need to wait > 100ns between dclkp clock enable bit and PLL enable.
@@ -2252,7 +2244,6 @@ void vlv_disable_pll(struct intel_display *display, enum pipe pipe)
void chv_disable_pll(struct intel_display *display, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
@@ -2268,14 +2259,14 @@ void chv_disable_pll(struct intel_display *display, enum pipe pipe)
intel_de_write(display, DPLL(display, pipe), val);
intel_de_posting_read(display, DPLL(display, pipe));
- vlv_dpio_get(dev_priv);
+ vlv_dpio_get(display->drm);
/* Disable 10bit clock to display controller */
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
+ val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch));
val &= ~DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), val);
+ vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch), val);
- vlv_dpio_put(dev_priv);
+ vlv_dpio_put(display->drm);
}
void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 280e90a57c87..3444a2dd3166 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -19,8 +19,8 @@ struct intel_dpll_hw_state;
void intel_dpll_init_clock_hook(struct intel_display *display);
int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc);
-int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+int intel_dpll_crtc_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
u32 i9xx_dpll_compute_fp(const struct dpll *dpll);
void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 9da051a3f455..33e0398120c8 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -27,10 +27,10 @@
#include <drm/drm_print.h>
#include "bxt_dpio_phy_regs.h"
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
@@ -52,34 +52,34 @@
* share a PLL if their configurations match.
*
* This file provides an abstraction over display PLLs. The function
- * intel_shared_dpll_init() initializes the PLLs for the given platform. The
+ * intel_dpll_init() initializes the PLLs for the given platform. The
* users of a PLL are tracked and that tracking is integrated with the atomic
* modset interface. During an atomic operation, required PLLs can be reserved
* for a given CRTC and encoder configuration by calling
- * intel_reserve_shared_dplls() and previously reserved PLLs can be released
- * with intel_release_shared_dplls().
+ * intel_dpll_reserve() and previously reserved PLLs can be released
+ * with intel_dpll_release().
* Changes to the users are first staged in the atomic state, and then made
- * effective by calling intel_shared_dpll_swap_state() during the atomic
+ * effective by calling intel_dpll_swap_state() during the atomic
* commit phase.
*/
/* platform specific hooks for managing DPLLs */
-struct intel_shared_dpll_funcs {
+struct intel_dpll_funcs {
/*
- * Hook for enabling the pll, called from intel_enable_shared_dpll() if
+ * Hook for enabling the pll, called from intel_enable_dpll() if
* the pll is not already enabled.
*/
void (*enable)(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);
/*
- * Hook for disabling the pll, called from intel_disable_shared_dpll()
+ * Hook for disabling the pll, called from intel_disable_dpll()
* only when it is safe to disable the pll, i.e., there are no more
* tracked users for it.
*/
void (*disable)(struct intel_display *display,
- struct intel_shared_dpll *pll);
+ struct intel_dpll *pll);
/*
* Hook for reading the values currently programmed to the DPLL
@@ -87,7 +87,7 @@ struct intel_shared_dpll_funcs {
* verification after a mode set.
*/
bool (*get_hw_state)(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state);
/*
@@ -95,7 +95,7 @@ struct intel_shared_dpll_funcs {
* in state.
*/
int (*get_freq)(struct intel_display *i915,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);
};
@@ -122,18 +122,18 @@ struct intel_dpll_mgr {
static void
intel_atomic_duplicate_dpll_state(struct intel_display *display,
- struct intel_shared_dpll_state *shared_dpll)
+ struct intel_dpll_state *dpll_state)
{
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
- /* Copy shared dpll state */
- for_each_shared_dpll(display, pll, i)
- shared_dpll[pll->index] = pll->state;
+ /* Copy dpll state */
+ for_each_dpll(display, pll, i)
+ dpll_state[pll->index] = pll->state;
}
-static struct intel_shared_dpll_state *
-intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
+static struct intel_dpll_state *
+intel_atomic_get_dpll_state(struct drm_atomic_state *s)
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
struct intel_display *display = to_intel_display(state);
@@ -144,28 +144,28 @@ intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
state->dpll_set = true;
intel_atomic_duplicate_dpll_state(display,
- state->shared_dpll);
+ state->dpll_state);
}
- return state->shared_dpll;
+ return state->dpll_state;
}
/**
- * intel_get_shared_dpll_by_id - get a DPLL given its id
+ * intel_get_dpll_by_id - get a DPLL given its id
* @display: intel_display device instance
* @id: pll id
*
* Returns:
* A pointer to the DPLL with @id
*/
-struct intel_shared_dpll *
-intel_get_shared_dpll_by_id(struct intel_display *display,
- enum intel_dpll_id id)
+struct intel_dpll *
+intel_get_dpll_by_id(struct intel_display *display,
+ enum intel_dpll_id id)
{
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
- for_each_shared_dpll(display, pll, i) {
+ for_each_dpll(display, pll, i) {
if (pll->info->id == id)
return pll;
}
@@ -175,9 +175,9 @@ intel_get_shared_dpll_by_id(struct intel_display *display,
}
/* For ILK+ */
-void assert_shared_dpll(struct intel_display *display,
- struct intel_shared_dpll *pll,
- bool state)
+void assert_dpll(struct intel_display *display,
+ struct intel_dpll *pll,
+ bool state)
{
bool cur_state;
struct intel_dpll_hw_state hw_state;
@@ -205,7 +205,7 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
static i915_reg_t
intel_combo_pll_enable_reg(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
if (display->platform.dg1)
return DG1_DPLL_ENABLE(pll->info->id);
@@ -218,7 +218,7 @@ intel_combo_pll_enable_reg(struct intel_display *display,
static i915_reg_t
intel_tc_pll_enable_reg(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
enum tc_port tc_port = icl_pll_id_to_tc_port(id);
@@ -230,7 +230,7 @@ intel_tc_pll_enable_reg(struct intel_display *display,
}
static void _intel_enable_shared_dpll(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
if (pll->info->power_domain)
pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
@@ -240,7 +240,7 @@ static void _intel_enable_shared_dpll(struct intel_display *display,
}
static void _intel_disable_shared_dpll(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
pll->info->funcs->disable(display, pll);
pll->on = false;
@@ -250,16 +250,16 @@ static void _intel_disable_shared_dpll(struct intel_display *display,
}
/**
- * intel_enable_shared_dpll - enable a CRTC's shared DPLL
- * @crtc_state: CRTC, and its state, which has a shared DPLL
+ * intel_dpll_enable - enable a CRTC's DPLL
+ * @crtc_state: CRTC, and its state, which has a DPLL
*
- * Enable the shared DPLL used by @crtc.
+ * Enable DPLL used by @crtc.
*/
-void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
+void intel_dpll_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_dpll *pll = crtc_state->intel_dpll;
unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
unsigned int old_mask;
@@ -282,7 +282,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (old_mask) {
drm_WARN_ON(display->drm, !pll->on);
- assert_shared_dpll_enabled(display, pll);
+ assert_dpll_enabled(display, pll);
goto out;
}
drm_WARN_ON(display->drm, pll->on);
@@ -296,16 +296,16 @@ out:
}
/**
- * intel_disable_shared_dpll - disable a CRTC's shared DPLL
+ * intel_dpll_disable - disable a CRTC's shared DPLL
* @crtc_state: CRTC, and its state, which has a shared DPLL
*
- * Disable the shared DPLL used by @crtc.
+ * Disable DPLL used by @crtc.
*/
-void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
+void intel_dpll_disable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ struct intel_dpll *pll = crtc_state->intel_dpll;
unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
/* PCH only available on ILK+ */
@@ -326,7 +326,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
pll->info->name, pll->active_mask, pll->on,
crtc->base.base.id, crtc->base.name);
- assert_shared_dpll_enabled(display, pll);
+ assert_dpll_enabled(display, pll);
drm_WARN_ON(display->drm, !pll->on);
pll->active_mask &= ~pipe_mask;
@@ -344,11 +344,11 @@ out:
static unsigned long
intel_dpll_mask_all(struct intel_display *display)
{
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
unsigned long dpll_mask = 0;
int i;
- for_each_shared_dpll(display, pll, i) {
+ for_each_dpll(display, pll, i) {
drm_WARN_ON(display->drm, dpll_mask & BIT(pll->info->id));
dpll_mask |= BIT(pll->info->id);
@@ -357,44 +357,44 @@ intel_dpll_mask_all(struct intel_display *display)
return dpll_mask;
}
-static struct intel_shared_dpll *
-intel_find_shared_dpll(struct intel_atomic_state *state,
- const struct intel_crtc *crtc,
- const struct intel_dpll_hw_state *dpll_hw_state,
- unsigned long dpll_mask)
+static struct intel_dpll *
+intel_find_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_dpll_hw_state *dpll_hw_state,
+ unsigned long dpll_mask)
{
struct intel_display *display = to_intel_display(crtc);
unsigned long dpll_mask_all = intel_dpll_mask_all(display);
- struct intel_shared_dpll_state *shared_dpll;
- struct intel_shared_dpll *unused_pll = NULL;
+ struct intel_dpll_state *dpll_state;
+ struct intel_dpll *unused_pll = NULL;
enum intel_dpll_id id;
- shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+ dpll_state = intel_atomic_get_dpll_state(&state->base);
drm_WARN_ON(display->drm, dpll_mask & ~dpll_mask_all);
for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
- pll = intel_get_shared_dpll_by_id(display, id);
+ pll = intel_get_dpll_by_id(display, id);
if (!pll)
continue;
/* Only want to check enabled timings first */
- if (shared_dpll[pll->index].pipe_mask == 0) {
+ if (dpll_state[pll->index].pipe_mask == 0) {
if (!unused_pll)
unused_pll = pll;
continue;
}
if (memcmp(dpll_hw_state,
- &shared_dpll[pll->index].hw_state,
+ &dpll_state[pll->index].hw_state,
sizeof(*dpll_hw_state)) == 0) {
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
crtc->base.base.id, crtc->base.name,
pll->info->name,
- shared_dpll[pll->index].pipe_mask,
+ dpll_state[pll->index].pipe_mask,
pll->active_mask);
return pll;
}
@@ -412,76 +412,76 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
}
/**
- * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
+ * intel_dpll_crtc_get - Get a DPLL reference for a CRTC
* @crtc: CRTC on which behalf the reference is taken
* @pll: DPLL for which the reference is taken
- * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
+ * @dpll_state: the DPLL atomic state in which the reference is tracked
*
* Take a reference for @pll tracking the use of it by @crtc.
*/
static void
-intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
- const struct intel_shared_dpll *pll,
- struct intel_shared_dpll_state *shared_dpll_state)
+intel_dpll_crtc_get(const struct intel_crtc *crtc,
+ const struct intel_dpll *pll,
+ struct intel_dpll_state *dpll_state)
{
struct intel_display *display = to_intel_display(crtc);
- drm_WARN_ON(display->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
+ drm_WARN_ON(display->drm, (dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
- shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
+ dpll_state->pipe_mask |= BIT(crtc->pipe);
drm_dbg_kms(display->drm, "[CRTC:%d:%s] reserving %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
}
static void
-intel_reference_shared_dpll(struct intel_atomic_state *state,
- const struct intel_crtc *crtc,
- const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *dpll_hw_state)
+intel_reference_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- struct intel_shared_dpll_state *shared_dpll;
+ struct intel_dpll_state *dpll_state;
- shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+ dpll_state = intel_atomic_get_dpll_state(&state->base);
- if (shared_dpll[pll->index].pipe_mask == 0)
- shared_dpll[pll->index].hw_state = *dpll_hw_state;
+ if (dpll_state[pll->index].pipe_mask == 0)
+ dpll_state[pll->index].hw_state = *dpll_hw_state;
- intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
+ intel_dpll_crtc_get(crtc, pll, &dpll_state[pll->index]);
}
/**
- * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
+ * intel_dpll_crtc_put - Drop a DPLL reference for a CRTC
* @crtc: CRTC on which behalf the reference is dropped
* @pll: DPLL for which the reference is dropped
- * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
+ * @dpll_state: the DPLL atomic state in which the reference is tracked
*
* Drop a reference for @pll tracking the end of use of it by @crtc.
*/
void
-intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
- const struct intel_shared_dpll *pll,
- struct intel_shared_dpll_state *shared_dpll_state)
+intel_dpll_crtc_put(const struct intel_crtc *crtc,
+ const struct intel_dpll *pll,
+ struct intel_dpll_state *dpll_state)
{
struct intel_display *display = to_intel_display(crtc);
- drm_WARN_ON(display->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
+ drm_WARN_ON(display->drm, (dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
- shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
+ dpll_state->pipe_mask &= ~BIT(crtc->pipe);
drm_dbg_kms(display->drm, "[CRTC:%d:%s] releasing %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
}
-static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
- const struct intel_crtc *crtc,
- const struct intel_shared_dpll *pll)
+static void intel_unreference_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_dpll *pll)
{
- struct intel_shared_dpll_state *shared_dpll;
+ struct intel_dpll_state *dpll_state;
- shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+ dpll_state = intel_atomic_get_dpll_state(&state->base);
- intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
+ intel_dpll_crtc_put(crtc, pll, &dpll_state[pll->index]);
}
static void intel_put_dpll(struct intel_atomic_state *state,
@@ -492,16 +492,16 @@ static void intel_put_dpll(struct intel_atomic_state *state,
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- new_crtc_state->shared_dpll = NULL;
+ new_crtc_state->intel_dpll = NULL;
- if (!old_crtc_state->shared_dpll)
+ if (!old_crtc_state->intel_dpll)
return;
- intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
+ intel_unreference_dpll(state, crtc, old_crtc_state->intel_dpll);
}
/**
- * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
+ * intel_dpll_swap_state - make atomic DPLL configuration effective
* @state: atomic state
*
* This is the dpll version of drm_atomic_helper_swap_state() since the
@@ -511,22 +511,22 @@ static void intel_put_dpll(struct intel_atomic_state *state,
* i.e. it also puts the current state into @state, even though there is no
* need for that at this moment.
*/
-void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
+void intel_dpll_swap_state(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
- struct intel_shared_dpll *pll;
+ struct intel_dpll_state *dpll_state = state->dpll_state;
+ struct intel_dpll *pll;
int i;
if (!state->dpll_set)
return;
- for_each_shared_dpll(display, pll, i)
- swap(pll->state, shared_dpll[pll->index]);
+ for_each_dpll(display, pll, i)
+ swap(pll->state, dpll_state[pll->index]);
}
static bool ibx_pch_dpll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
@@ -562,7 +562,7 @@ static void ibx_assert_pch_refclk_enabled(struct intel_display *display)
}
static void ibx_pch_dpll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
@@ -591,7 +591,7 @@ static void ibx_pch_dpll_enable(struct intel_display *display,
}
static void ibx_pch_dpll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
@@ -614,33 +614,33 @@ static int ibx_get_dpll(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
enum intel_dpll_id id;
if (HAS_PCH_IBX(display)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
id = (enum intel_dpll_id) crtc->pipe;
- pll = intel_get_shared_dpll_by_id(display, id);
+ pll = intel_get_dpll_by_id(display, id);
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] using pre-allocated %s\n",
crtc->base.base.id, crtc->base.name,
pll->info->name);
} else {
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_PCH_PLL_B) |
- BIT(DPLL_ID_PCH_PLL_A));
+ pll = intel_find_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_PCH_PLL_B) |
+ BIT(DPLL_ID_PCH_PLL_A));
}
if (!pll)
return -EINVAL;
/* reference the pll */
- intel_reference_shared_dpll(state, crtc,
- pll, &crtc_state->dpll_hw_state);
+ intel_reference_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- crtc_state->shared_dpll = pll;
+ crtc_state->intel_dpll = pll;
return 0;
}
@@ -670,7 +670,7 @@ static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
a->fp1 == b->fp1;
}
-static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
+static const struct intel_dpll_funcs ibx_pch_dpll_funcs = {
.enable = ibx_pch_dpll_enable,
.disable = ibx_pch_dpll_disable,
.get_hw_state = ibx_pch_dpll_get_hw_state,
@@ -692,7 +692,7 @@ static const struct intel_dpll_mgr pch_pll_mgr = {
};
static void hsw_ddi_wrpll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -704,7 +704,7 @@ static void hsw_ddi_wrpll_enable(struct intel_display *display,
}
static void hsw_ddi_spll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -715,7 +715,7 @@ static void hsw_ddi_spll_enable(struct intel_display *display,
}
static void hsw_ddi_wrpll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
@@ -731,7 +731,7 @@ static void hsw_ddi_wrpll_disable(struct intel_display *display,
}
static void hsw_ddi_spll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
enum intel_dpll_id id = pll->info->id;
@@ -747,7 +747,7 @@ static void hsw_ddi_spll_disable(struct intel_display *display,
}
static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -769,7 +769,7 @@ static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
}
static bool hsw_ddi_spll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -996,7 +996,7 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
}
static int hsw_ddi_wrpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -1059,14 +1059,14 @@ hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
return 0;
}
-static struct intel_shared_dpll *
+static struct intel_dpll *
hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- return intel_find_shared_dpll(state, crtc,
+ return intel_find_dpll(state, crtc,
&crtc_state->dpll_hw_state,
BIT(DPLL_ID_WRPLL2) |
BIT(DPLL_ID_WRPLL1));
@@ -1090,11 +1090,11 @@ hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
}
}
-static struct intel_shared_dpll *
+static struct intel_dpll *
hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
enum intel_dpll_id pll_id;
int clock = crtc_state->port_clock;
@@ -1113,7 +1113,7 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
return NULL;
}
- pll = intel_get_shared_dpll_by_id(display, pll_id);
+ pll = intel_get_dpll_by_id(display, pll_id);
if (!pll)
return NULL;
@@ -1122,7 +1122,7 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
}
static int hsw_ddi_lcpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
int link_clock = 0;
@@ -1162,19 +1162,19 @@ hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
return 0;
}
-static struct intel_shared_dpll *
+static struct intel_dpll *
hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
+ return intel_find_dpll(state, crtc, &crtc_state->dpll_hw_state,
BIT(DPLL_ID_SPLL));
}
static int hsw_ddi_spll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
@@ -1221,7 +1221,7 @@ static int hsw_get_dpll(struct intel_atomic_state *state,
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll = NULL;
+ struct intel_dpll *pll = NULL;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
pll = hsw_ddi_wrpll_get_dpll(state, crtc);
@@ -1233,10 +1233,10 @@ static int hsw_get_dpll(struct intel_atomic_state *state,
if (!pll)
return -EINVAL;
- intel_reference_shared_dpll(state, crtc,
- pll, &crtc_state->dpll_hw_state);
+ intel_reference_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- crtc_state->shared_dpll = pll;
+ crtc_state->intel_dpll = pll;
return 0;
}
@@ -1270,14 +1270,14 @@ static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
a->spll == b->spll;
}
-static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
+static const struct intel_dpll_funcs hsw_ddi_wrpll_funcs = {
.enable = hsw_ddi_wrpll_enable,
.disable = hsw_ddi_wrpll_disable,
.get_hw_state = hsw_ddi_wrpll_get_hw_state,
.get_freq = hsw_ddi_wrpll_get_freq,
};
-static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
+static const struct intel_dpll_funcs hsw_ddi_spll_funcs = {
.enable = hsw_ddi_spll_enable,
.disable = hsw_ddi_spll_disable,
.get_hw_state = hsw_ddi_spll_get_hw_state,
@@ -1285,24 +1285,24 @@ static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
};
static void hsw_ddi_lcpll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *hw_state)
{
}
static void hsw_ddi_lcpll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
}
static bool hsw_ddi_lcpll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
return true;
}
-static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
+static const struct intel_dpll_funcs hsw_ddi_lcpll_funcs = {
.enable = hsw_ddi_lcpll_enable,
.disable = hsw_ddi_lcpll_disable,
.get_hw_state = hsw_ddi_lcpll_get_hw_state,
@@ -1364,7 +1364,7 @@ static const struct skl_dpll_regs skl_dpll_regs[4] = {
};
static void skl_ddi_pll_write_ctrl1(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct skl_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
@@ -1378,7 +1378,7 @@ static void skl_ddi_pll_write_ctrl1(struct intel_display *display,
}
static void skl_ddi_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1400,7 +1400,7 @@ static void skl_ddi_pll_enable(struct intel_display *display,
}
static void skl_ddi_dpll0_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1409,7 +1409,7 @@ static void skl_ddi_dpll0_enable(struct intel_display *display,
}
static void skl_ddi_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
@@ -1420,12 +1420,12 @@ static void skl_ddi_pll_disable(struct intel_display *display,
}
static void skl_ddi_dpll0_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
}
static bool skl_ddi_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1463,7 +1463,7 @@ out:
}
static bool skl_ddi_dpll0_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1736,7 +1736,7 @@ skip_remaining_dividers:
}
static int skl_ddi_wrpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1884,7 +1884,7 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
}
static int skl_ddi_lcpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -1939,31 +1939,31 @@ static int skl_get_dpll(struct intel_atomic_state *state,
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_SKL_DPLL0));
+ pll = intel_find_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SKL_DPLL0));
else
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_SKL_DPLL3) |
- BIT(DPLL_ID_SKL_DPLL2) |
- BIT(DPLL_ID_SKL_DPLL1));
+ pll = intel_find_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SKL_DPLL3) |
+ BIT(DPLL_ID_SKL_DPLL2) |
+ BIT(DPLL_ID_SKL_DPLL1));
if (!pll)
return -EINVAL;
- intel_reference_shared_dpll(state, crtc,
- pll, &crtc_state->dpll_hw_state);
+ intel_reference_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- crtc_state->shared_dpll = pll;
+ crtc_state->intel_dpll = pll;
return 0;
}
static int skl_ddi_pll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
@@ -2004,14 +2004,14 @@ static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
a->cfgcr2 == b->cfgcr2;
}
-static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
+static const struct intel_dpll_funcs skl_ddi_pll_funcs = {
.enable = skl_ddi_pll_enable,
.disable = skl_ddi_pll_disable,
.get_hw_state = skl_ddi_pll_get_hw_state,
.get_freq = skl_ddi_pll_get_freq,
};
-static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
+static const struct intel_dpll_funcs skl_ddi_dpll0_funcs = {
.enable = skl_ddi_dpll0_enable,
.disable = skl_ddi_dpll0_disable,
.get_hw_state = skl_ddi_dpll0_get_hw_state,
@@ -2038,7 +2038,7 @@ static const struct intel_dpll_mgr skl_pll_mgr = {
};
static void bxt_ddi_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
@@ -2141,7 +2141,7 @@ static void bxt_ddi_pll_enable(struct intel_display *display,
}
static void bxt_ddi_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
@@ -2160,7 +2160,7 @@ static void bxt_ddi_pll_disable(struct intel_display *display,
}
static bool bxt_ddi_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
@@ -2360,7 +2360,7 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
}
static int bxt_ddi_pll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
@@ -2429,20 +2429,20 @@ static int bxt_get_dpll(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
enum intel_dpll_id id;
/* 1:1 mapping between ports and PLLs */
id = (enum intel_dpll_id) encoder->port;
- pll = intel_get_shared_dpll_by_id(display, id);
+ pll = intel_get_dpll_by_id(display, id);
drm_dbg_kms(display->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
- intel_reference_shared_dpll(state, crtc,
- pll, &crtc_state->dpll_hw_state);
+ intel_reference_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- crtc_state->shared_dpll = pll;
+ crtc_state->intel_dpll = pll;
return 0;
}
@@ -2486,7 +2486,7 @@ static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
a->pcsdw12 == b->pcsdw12;
}
-static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
+static const struct intel_dpll_funcs bxt_ddi_pll_funcs = {
.enable = bxt_ddi_pll_enable,
.disable = bxt_ddi_pll_disable,
.get_hw_state = bxt_ddi_pll_get_hw_state,
@@ -2755,7 +2755,7 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
}
static int icl_ddi_tbt_pll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
/*
@@ -2826,7 +2826,7 @@ icl_calc_wrpll(struct intel_crtc_state *crtc_state,
}
static int icl_ddi_combo_pll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -3199,7 +3199,7 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
static int icl_ddi_mg_pll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -3285,7 +3285,7 @@ void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
struct icl_port_dpll *port_dpll =
&crtc_state->icl_port_dplls[port_dpll_id];
- crtc_state->shared_dpll = port_dpll->pll;
+ crtc_state->intel_dpll = port_dpll->pll;
crtc_state->dpll_hw_state = port_dpll->hw_state;
}
@@ -3388,14 +3388,14 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
/* Eliminate DPLLs from consideration if reserved by HTI */
dpll_mask &= ~intel_hti_dpll_mask(display);
- port_dpll->pll = intel_find_shared_dpll(state, crtc,
- &port_dpll->hw_state,
- dpll_mask);
+ port_dpll->pll = intel_find_dpll(state, crtc,
+ &port_dpll->hw_state,
+ dpll_mask);
if (!port_dpll->pll)
return -EINVAL;
- intel_reference_shared_dpll(state, crtc,
- port_dpll->pll, &port_dpll->hw_state);
+ intel_reference_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
icl_update_active_dpll(state, crtc, encoder);
@@ -3428,8 +3428,8 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
return ret;
/* this is mainly for the fastset check */
- if (old_crtc_state->shared_dpll &&
- old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
+ if (old_crtc_state->intel_dpll &&
+ old_crtc_state->intel_dpll->info->id == DPLL_ID_ICL_TBTPLL)
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
else
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
@@ -3452,26 +3452,25 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
int ret;
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
- port_dpll->pll = intel_find_shared_dpll(state, crtc,
- &port_dpll->hw_state,
- BIT(DPLL_ID_ICL_TBTPLL));
+ port_dpll->pll = intel_find_dpll(state, crtc,
+ &port_dpll->hw_state,
+ BIT(DPLL_ID_ICL_TBTPLL));
if (!port_dpll->pll)
return -EINVAL;
- intel_reference_shared_dpll(state, crtc,
- port_dpll->pll, &port_dpll->hw_state);
-
+ intel_reference_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
- port_dpll->pll = intel_find_shared_dpll(state, crtc,
- &port_dpll->hw_state,
- BIT(dpll_id));
+ port_dpll->pll = intel_find_dpll(state, crtc,
+ &port_dpll->hw_state,
+ BIT(dpll_id));
if (!port_dpll->pll) {
ret = -EINVAL;
goto err_unreference_tbt_pll;
}
- intel_reference_shared_dpll(state, crtc,
- port_dpll->pll, &port_dpll->hw_state);
+ intel_reference_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
icl_update_active_dpll(state, crtc, encoder);
@@ -3479,7 +3478,7 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
err_unreference_tbt_pll:
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
- intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
+ intel_unreference_dpll(state, crtc, port_dpll->pll);
return ret;
}
@@ -3521,7 +3520,7 @@ static void icl_put_dplls(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
enum icl_port_dpll_id id;
- new_crtc_state->shared_dpll = NULL;
+ new_crtc_state->intel_dpll = NULL;
for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
const struct icl_port_dpll *old_port_dpll =
@@ -3534,12 +3533,12 @@ static void icl_put_dplls(struct intel_atomic_state *state,
if (!old_port_dpll->pll)
continue;
- intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
+ intel_unreference_dpll(state, crtc, old_port_dpll->pll);
}
}
static bool mg_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -3606,7 +3605,7 @@ out:
}
static bool dkl_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -3678,7 +3677,7 @@ out:
}
static bool icl_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state,
i915_reg_t enable_reg)
{
@@ -3739,7 +3738,7 @@ out:
}
static bool combo_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
@@ -3748,14 +3747,14 @@ static bool combo_pll_get_hw_state(struct intel_display *display,
}
static bool tbt_pll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
return icl_pll_get_hw_state(display, pll, dpll_hw_state, TBT_PLL_ENABLE);
}
static void icl_dpll_write(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct icl_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
@@ -3797,7 +3796,7 @@ static void icl_dpll_write(struct intel_display *display,
}
static void icl_mg_pll_write(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct icl_dpll_hw_state *hw_state)
{
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
@@ -3840,7 +3839,7 @@ static void icl_mg_pll_write(struct intel_display *display,
}
static void dkl_pll_write(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct icl_dpll_hw_state *hw_state)
{
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
@@ -3905,7 +3904,7 @@ static void dkl_pll_write(struct intel_display *display,
}
static void icl_pll_power_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
i915_reg_t enable_reg)
{
intel_de_rmw(display, enable_reg, 0, PLL_POWER_ENABLE);
@@ -3920,7 +3919,7 @@ static void icl_pll_power_enable(struct intel_display *display,
}
static void icl_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
i915_reg_t enable_reg)
{
intel_de_rmw(display, enable_reg, 0, PLL_ENABLE);
@@ -3930,7 +3929,7 @@ static void icl_pll_enable(struct intel_display *display,
drm_err(display->drm, "PLL %d not locked\n", pll->info->id);
}
-static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct intel_shared_dpll *pll)
+static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct intel_dpll *pll)
{
u32 val;
@@ -3955,7 +3954,7 @@ static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct inte
}
static void combo_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -3979,7 +3978,7 @@ static void combo_pll_enable(struct intel_display *display,
}
static void tbt_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -4000,7 +3999,7 @@ static void tbt_pll_enable(struct intel_display *display,
}
static void mg_pll_enable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@@ -4025,7 +4024,7 @@ static void mg_pll_enable(struct intel_display *display,
}
static void icl_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
i915_reg_t enable_reg)
{
/* The first steps are done by intel_ddi_post_disable(). */
@@ -4056,7 +4055,7 @@ static void icl_pll_disable(struct intel_display *display,
}
static void combo_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
@@ -4064,13 +4063,13 @@ static void combo_pll_disable(struct intel_display *display,
}
static void tbt_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
icl_pll_disable(display, pll, TBT_PLL_ENABLE);
}
static void mg_pll_disable(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
@@ -4129,21 +4128,21 @@ static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
}
-static const struct intel_shared_dpll_funcs combo_pll_funcs = {
+static const struct intel_dpll_funcs combo_pll_funcs = {
.enable = combo_pll_enable,
.disable = combo_pll_disable,
.get_hw_state = combo_pll_get_hw_state,
.get_freq = icl_ddi_combo_pll_get_freq,
};
-static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
+static const struct intel_dpll_funcs tbt_pll_funcs = {
.enable = tbt_pll_enable,
.disable = tbt_pll_disable,
.get_hw_state = tbt_pll_get_hw_state,
.get_freq = icl_ddi_tbt_pll_get_freq,
};
-static const struct intel_shared_dpll_funcs mg_pll_funcs = {
+static const struct intel_dpll_funcs mg_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = mg_pll_get_hw_state,
@@ -4191,7 +4190,7 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
.compare_hw_state = icl_compare_hw_state,
};
-static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
+static const struct intel_dpll_funcs dkl_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = dkl_pll_get_hw_state,
@@ -4300,12 +4299,12 @@ static const struct intel_dpll_mgr adlp_pll_mgr = {
};
/**
- * intel_shared_dpll_init - Initialize shared DPLLs
+ * intel_dpll_init - Initialize DPLLs
* @display: intel_display device
*
- * Initialize shared DPLLs for @display.
+ * Initialize DPLLs for @display.
*/
-void intel_shared_dpll_init(struct intel_display *display)
+void intel_dpll_init(struct intel_display *display)
{
const struct intel_dpll_mgr *dpll_mgr = NULL;
const struct dpll_info *dpll_info;
@@ -4346,23 +4345,23 @@ void intel_shared_dpll_init(struct intel_display *display)
for (i = 0; dpll_info[i].name; i++) {
if (drm_WARN_ON(display->drm,
- i >= ARRAY_SIZE(display->dpll.shared_dplls)))
+ i >= ARRAY_SIZE(display->dpll.dplls)))
break;
/* must fit into unsigned long bitmask on 32bit */
if (drm_WARN_ON(display->drm, dpll_info[i].id >= 32))
break;
- display->dpll.shared_dplls[i].info = &dpll_info[i];
- display->dpll.shared_dplls[i].index = i;
+ display->dpll.dplls[i].info = &dpll_info[i];
+ display->dpll.dplls[i].index = i;
}
display->dpll.mgr = dpll_mgr;
- display->dpll.num_shared_dpll = i;
+ display->dpll.num_dpll = i;
}
/**
- * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
+ * intel_dpll_compute - compute DPLL state CRTC and encoder combination
* @state: atomic state
* @crtc: CRTC to compute DPLLs for
* @encoder: encoder
@@ -4370,14 +4369,14 @@ void intel_shared_dpll_init(struct intel_display *display)
* This function computes the DPLL state for the given CRTC and encoder.
*
* The new configuration in the atomic commit @state is made effective by
- * calling intel_shared_dpll_swap_state().
+ * calling intel_dpll_swap_state().
*
* Returns:
* 0 on success, negative error code on failure.
*/
-int intel_compute_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+int intel_dpll_compute(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(state);
const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
@@ -4389,7 +4388,7 @@ int intel_compute_shared_dplls(struct intel_atomic_state *state,
}
/**
- * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
+ * intel_dpll_reserve - reserve DPLLs for CRTC and encoder combination
* @state: atomic state
* @crtc: CRTC to reserve DPLLs for
* @encoder: encoder
@@ -4399,18 +4398,18 @@ int intel_compute_shared_dplls(struct intel_atomic_state *state,
* state.
*
* The new configuration in the atomic commit @state is made effective by
- * calling intel_shared_dpll_swap_state().
+ * calling intel_dpll_swap_state().
*
* The reserved DPLLs should be released by calling
- * intel_release_shared_dplls().
+ * intel_dpll_release().
*
* Returns:
* 0 if all required DPLLs were successfully reserved,
* negative error code otherwise.
*/
-int intel_reserve_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+int intel_dpll_reserve(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(state);
const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
@@ -4422,18 +4421,18 @@ int intel_reserve_shared_dplls(struct intel_atomic_state *state,
}
/**
- * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
+ * intel_dpll_release - end use of DPLLs by CRTC in atomic state
* @state: atomic state
* @crtc: crtc from which the DPLLs are to be released
*
- * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
+ * This function releases all DPLLs reserved by intel_dpll_reserve()
* from the current atomic commit @state and the old @crtc atomic state.
*
* The new configuration in the atomic commit @state is made effective by
- * calling intel_shared_dpll_swap_state().
+ * calling intel_dpll_swap_state().
*/
-void intel_release_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void intel_dpll_release(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
@@ -4441,7 +4440,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state,
/*
* FIXME: this function is called for every platform having a
* compute_clock hook, even though the platform doesn't yet support
- * the shared DPLL framework and intel_reserve_shared_dplls() is not
+ * the DPLL framework and intel_dpll_reserve() is not
* called on those.
*/
if (!dpll_mgr)
@@ -4451,16 +4450,16 @@ void intel_release_shared_dplls(struct intel_atomic_state *state,
}
/**
- * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
+ * intel_dpll_update_active - update the active DPLL for a CRTC/encoder
* @state: atomic state
* @crtc: the CRTC for which to update the active DPLL
* @encoder: encoder determining the type of port DPLL
*
* Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
- * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
+ * from the port DPLLs reserved previously by intel_dpll_reserve(). The
* DPLL selected will be based on the current mode of the encoder's port.
*/
-void intel_update_active_dpll(struct intel_atomic_state *state,
+void intel_dpll_update_active(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
@@ -4482,7 +4481,7 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
* Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
*/
int intel_dpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
if (drm_WARN_ON(display->drm, !pll->info->funcs->get_freq))
@@ -4500,14 +4499,14 @@ int intel_dpll_get_freq(struct intel_display *display,
* Read out @pll's hardware state into @dpll_hw_state.
*/
bool intel_dpll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
return pll->info->funcs->get_hw_state(display, pll, dpll_hw_state);
}
static void readout_dpll_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
struct intel_crtc *crtc;
@@ -4521,8 +4520,8 @@ static void readout_dpll_hw_state(struct intel_display *display,
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
- intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
+ if (crtc_state->hw.active && crtc_state->intel_dpll == pll)
+ intel_dpll_crtc_get(crtc, pll, &pll->state);
}
pll->active_mask = pll->state.pipe_mask;
@@ -4539,15 +4538,15 @@ void intel_dpll_update_ref_clks(struct intel_display *display)
void intel_dpll_readout_hw_state(struct intel_display *display)
{
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
- for_each_shared_dpll(display, pll, i)
+ for_each_dpll(display, pll, i)
readout_dpll_hw_state(display, pll);
}
static void sanitize_dpll_state(struct intel_display *display,
- struct intel_shared_dpll *pll)
+ struct intel_dpll *pll)
{
if (!pll->on)
return;
@@ -4566,12 +4565,12 @@ static void sanitize_dpll_state(struct intel_display *display,
void intel_dpll_sanitize_state(struct intel_display *display)
{
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
intel_cx0_pll_power_save_wa(display);
- for_each_shared_dpll(display, pll, i)
+ for_each_dpll(display, pll, i)
sanitize_dpll_state(display, pll);
}
@@ -4623,7 +4622,7 @@ bool intel_dpll_compare_hw_state(struct intel_display *display,
static void
verify_single_dpll_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_crtc *crtc,
const struct intel_crtc_state *new_crtc_state)
{
@@ -4676,15 +4675,15 @@ verify_single_dpll_state(struct intel_display *display,
pll->info->name);
}
-static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
- const struct intel_shared_dpll *new_pll)
+static bool has_alt_port_dpll(const struct intel_dpll *old_pll,
+ const struct intel_dpll *new_pll)
{
return old_pll && new_pll && old_pll != new_pll &&
(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
}
-void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void intel_dpll_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
@@ -4692,34 +4691,34 @@ void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (new_crtc_state->shared_dpll)
- verify_single_dpll_state(display, new_crtc_state->shared_dpll,
+ if (new_crtc_state->intel_dpll)
+ verify_single_dpll_state(display, new_crtc_state->intel_dpll,
crtc, new_crtc_state);
- if (old_crtc_state->shared_dpll &&
- old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
+ if (old_crtc_state->intel_dpll &&
+ old_crtc_state->intel_dpll != new_crtc_state->intel_dpll) {
u8 pipe_mask = BIT(crtc->pipe);
- struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
+ struct intel_dpll *pll = old_crtc_state->intel_dpll;
INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
"%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
- INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->shared_dpll,
- new_crtc_state->shared_dpll) &&
+ INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->intel_dpll,
+ new_crtc_state->intel_dpll) &&
pll->state.pipe_mask & pipe_mask,
"%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
}
}
-void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
+void intel_dpll_verify_disabled(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
- for_each_shared_dpll(display, pll, i)
+ for_each_dpll(display, pll, i)
verify_single_dpll_state(display, pll, NULL, NULL);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index caffb084830c..f131bdd1c975 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -30,18 +30,18 @@
#include "intel_display_power.h"
#include "intel_wakeref.h"
-#define for_each_shared_dpll(__display, __pll, __i) \
- for ((__i) = 0; (__i) < (__display)->dpll.num_shared_dpll && \
- ((__pll) = &(__display)->dpll.shared_dplls[(__i)]) ; (__i)++)
+#define for_each_dpll(__display, __pll, __i) \
+ for ((__i) = 0; (__i) < (__display)->dpll.num_dpll && \
+ ((__pll) = &(__display)->dpll.dplls[(__i)]) ; (__i)++)
enum tc_port;
struct drm_printer;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dpll_funcs;
struct intel_encoder;
struct intel_shared_dpll;
-struct intel_shared_dpll_funcs;
/**
* enum intel_dpll_id - possible DPLL ids
@@ -280,7 +280,7 @@ struct intel_dpll_hw_state {
};
/**
- * struct intel_shared_dpll_state - hold the DPLL atomic state
+ * struct intel_dpll_state - hold the DPLL atomic state
*
* This structure holds an atomic state for the DPLL, that can represent
* either its current state (in struct &intel_shared_dpll) or a desired
@@ -289,7 +289,7 @@ struct intel_dpll_hw_state {
*
* See also intel_reserve_shared_dplls() and intel_release_shared_dplls().
*/
-struct intel_shared_dpll_state {
+struct intel_dpll_state {
/**
* @pipe_mask: mask of pipes using this DPLL, active or not
*/
@@ -314,7 +314,7 @@ struct dpll_info {
/**
* @funcs: platform specific hooks
*/
- const struct intel_shared_dpll_funcs *funcs;
+ const struct intel_dpll_funcs *funcs;
/**
* @id: unique identifier for this DPLL
@@ -344,16 +344,16 @@ struct dpll_info {
};
/**
- * struct intel_shared_dpll - display PLL with tracked state and users
+ * struct intel_dpll - display PLL with tracked state and users
*/
-struct intel_shared_dpll {
+struct intel_dpll {
/**
* @state:
*
* Store the state for the pll, including its hw state
* and CRTCs using it.
*/
- struct intel_shared_dpll_state state;
+ struct intel_dpll_state state;
/**
* @index: index for atomic state
@@ -387,41 +387,41 @@ struct intel_shared_dpll {
#define SKL_DPLL2 2
#define SKL_DPLL3 3
-/* shared dpll functions */
-struct intel_shared_dpll *
-intel_get_shared_dpll_by_id(struct intel_display *display,
- enum intel_dpll_id id);
-void assert_shared_dpll(struct intel_display *display,
- struct intel_shared_dpll *pll,
- bool state);
-#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
-#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-int intel_compute_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
-int intel_reserve_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
-void intel_release_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-void intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
- const struct intel_shared_dpll *pll,
- struct intel_shared_dpll_state *shared_dpll_state);
+/* dpll functions */
+struct intel_dpll *
+intel_get_dpll_by_id(struct intel_display *display,
+ enum intel_dpll_id id);
+void assert_dpll(struct intel_display *display,
+ struct intel_dpll *pll,
+ bool state);
+#define assert_dpll_enabled(d, p) assert_dpll(d, p, true)
+#define assert_dpll_disabled(d, p) assert_dpll(d, p, false)
+int intel_dpll_compute(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+int intel_dpll_reserve(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+void intel_dpll_release(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_dpll_crtc_put(const struct intel_crtc *crtc,
+ const struct intel_dpll *pll,
+ struct intel_dpll_state *shared_dpll_state);
void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
enum icl_port_dpll_id port_dpll_id);
-void intel_update_active_dpll(struct intel_atomic_state *state,
+void intel_dpll_update_active(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
int intel_dpll_get_freq(struct intel_display *display,
- const struct intel_shared_dpll *pll,
+ const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);
bool intel_dpll_get_hw_state(struct intel_display *display,
- struct intel_shared_dpll *pll,
+ struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state);
-void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
-void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
-void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
-void intel_shared_dpll_init(struct intel_display *display);
+void intel_dpll_enable(const struct intel_crtc_state *crtc_state);
+void intel_dpll_disable(const struct intel_crtc_state *crtc_state);
+void intel_dpll_swap_state(struct intel_atomic_state *state);
+void intel_dpll_init(struct intel_display *display);
void intel_dpll_update_ref_clks(struct intel_display *display);
void intel_dpll_readout_hw_state(struct intel_display *display);
void intel_dpll_sanitize_state(struct intel_display *display);
@@ -435,8 +435,8 @@ bool intel_dpll_compare_hw_state(struct intel_display *display,
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
-void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state);
+void intel_dpll_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_dpll_verify_disabled(struct intel_atomic_state *state);
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 43bd97e4f589..aea249e2699f 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -9,6 +9,7 @@
#include "gt/gen8_ppgtt.h"
#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
@@ -126,7 +127,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
unsigned int alignment)
{
struct drm_i915_private *i915 = vm->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
struct ref_tracker *wakeref;
struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.c b/drivers/gpu/drm/i915/display/intel_dpt_common.c
index ce5aa0ca0fa5..5eb88d51dba1 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt_common.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt_common.c
@@ -3,8 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpt_common.h"
#include "skl_universal_plane_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 05cd0f6e6d71..0fdb32ef241c 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -5,10 +5,11 @@
#include <linux/debugfs.h>
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include <drm/drm_print.h>
+
#include "intel_atomic.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_drrs.h"
#include "intel_frontbuffer.h"
@@ -123,9 +124,9 @@ static void intel_drrs_set_state(struct intel_crtc *crtc,
static void intel_drrs_schedule_work(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- mod_delayed_work(i915->unordered_wq, &crtc->drrs.work, msecs_to_jiffies(1000));
+ mod_delayed_work(display->wq.unordered, &crtc->drrs.work, msecs_to_jiffies(1000));
}
static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 481488d1fe67..53d8ae3a70e9 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -7,11 +7,10 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_irq.h"
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dsb.h"
@@ -94,6 +93,10 @@ struct intel_dsb {
/* see DSB_REG_VALUE_MASK */
#define DSB_OPCODE_POLL 0xA
/* see DSB_REG_VALUE_MASK */
+#define DSB_OPCODE_GOSUB 0xC /* ptl+ */
+#define DSB_GOSUB_HEAD_SHIFT 26
+#define DSB_GOSUB_TAIL_SHIFT 0
+#define DSB_GOSUB_CONVERT_ADDR(x) ((x) >> 6)
static bool pre_commit_is_vrr_active(struct intel_atomic_state *state,
struct intel_crtc *crtc)
@@ -205,6 +208,15 @@ static bool assert_dsb_has_room(struct intel_dsb *dsb)
crtc->base.base.id, crtc->base.name, dsb->id);
}
+static bool assert_dsb_tail_is_aligned(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+ struct intel_display *display = to_intel_display(crtc->base.dev);
+
+ return !drm_WARN_ON(display->drm,
+ !IS_ALIGNED(dsb->free_pos * 4, CACHELINE_BYTES));
+}
+
static void intel_dsb_dump(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
@@ -229,13 +241,40 @@ static bool is_dsb_busy(struct intel_display *display, enum pipe pipe,
return intel_de_read_fw(display, DSB_CTRL(pipe, dsb_id)) & DSB_STATUS_BUSY;
}
+unsigned int intel_dsb_size(struct intel_dsb *dsb)
+{
+ return dsb->free_pos * 4;
+}
+
+unsigned int intel_dsb_head(struct intel_dsb *dsb)
+{
+ return intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
+}
+
+static unsigned int intel_dsb_tail(struct intel_dsb *dsb)
+{
+ return intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + intel_dsb_size(dsb);
+}
+
+static void intel_dsb_ins_align(struct intel_dsb *dsb)
+{
+ /*
+ * Every instruction should be 8 byte aligned.
+ *
+ * The only way to get unaligned free_pos is via
+ * intel_dsb_reg_write_indexed() which already
+ * makes sure the next dword is zeroed, so no need
+ * to clear it here.
+ */
+ dsb->free_pos = ALIGN(dsb->free_pos, 2);
+}
+
static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
{
if (!assert_dsb_has_room(dsb))
return;
- /* Every instruction should be 8 byte aligned. */
- dsb->free_pos = ALIGN(dsb->free_pos, 2);
+ intel_dsb_ins_align(dsb);
dsb->ins_start_offset = dsb->free_pos;
dsb->ins[0] = ldw;
@@ -493,6 +532,8 @@ static void intel_dsb_align_tail(struct intel_dsb *dsb)
{
u32 aligned_tail, tail;
+ intel_dsb_ins_align(dsb);
+
tail = dsb->free_pos * 4;
aligned_tail = ALIGN(tail, CACHELINE_BYTES);
@@ -503,20 +544,90 @@ static void intel_dsb_align_tail(struct intel_dsb *dsb)
dsb->free_pos = aligned_tail / 4;
}
-void intel_dsb_finish(struct intel_dsb *dsb)
+static void intel_dsb_gosub_align(struct intel_dsb *dsb)
+{
+ u32 aligned_tail, tail;
+
+ intel_dsb_ins_align(dsb);
+
+ tail = dsb->free_pos * 4;
+ aligned_tail = ALIGN(tail, CACHELINE_BYTES);
+
+ /*
+ * Wa_16024917128
+ * "Ensure GOSUB is not placed in cacheline QW slot 6 or 7 (numbered 0-7)"
+ */
+ if (aligned_tail - tail <= 2 * 8)
+ intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
+ aligned_tail - tail);
+
+ dsb->free_pos = aligned_tail / 4;
+}
+
+void intel_dsb_gosub(struct intel_dsb *dsb,
+ struct intel_dsb *sub_dsb)
{
struct intel_crtc *crtc = dsb->crtc;
+ struct intel_display *display = to_intel_display(crtc->base.dev);
+ unsigned int head, tail;
+ u64 head_tail;
+
+ if (drm_WARN_ON(display->drm, dsb->id != sub_dsb->id))
+ return;
+
+ if (!assert_dsb_tail_is_aligned(sub_dsb))
+ return;
+
+ intel_dsb_gosub_align(dsb);
+
+ head = intel_dsb_head(sub_dsb);
+ tail = intel_dsb_tail(sub_dsb);
/*
- * DSB_FORCE_DEWAKE remains active even after DSB is
- * disabled, so make sure to clear it (if set during
- * intel_dsb_commit()). And clear DSB_ENABLE_DEWAKE as
- * well for good measure.
+ * The GOSUB instruction has the following memory layout.
+ *
+ * +------------------------------------------------------------+
+ * | Opcode | Rsvd | Head Ptr | Tail Ptr |
+ * | 0x0c | | | |
+ * +------------------------------------------------------------+
+ * |<- 8bits->|<- 4bits ->|<-- 26bits -->|<-- 26bits -->|
+ *
+ * We have only 26 bits each to represent the head and tail
+ * pointers even though the addresses itself are of 32 bit. However, this
+ * is not a problem because the addresses are 64 bit aligned and therefore
+ * the last 6 bits are always Zero's. Therefore, we right shift the address
+ * by 6 before embedding it into the GOSUB instruction.
*/
- intel_dsb_reg_write(dsb, DSB_PMCTRL(crtc->pipe, dsb->id), 0);
- intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(crtc->pipe, dsb->id),
- DSB_FORCE_DEWAKE, 0);
+ head_tail = ((u64)(DSB_GOSUB_CONVERT_ADDR(head)) << DSB_GOSUB_HEAD_SHIFT) |
+ ((u64)(DSB_GOSUB_CONVERT_ADDR(tail)) << DSB_GOSUB_TAIL_SHIFT);
+
+ intel_dsb_emit(dsb, lower_32_bits(head_tail),
+ (DSB_OPCODE_GOSUB << DSB_OPCODE_SHIFT) |
+ upper_32_bits(head_tail));
+
+ /*
+ * "NOTE: the instructions within the cacheline
+ * FOLLOWING the GOSUB instruction must be NOPs."
+ */
+ intel_dsb_align_tail(dsb);
+}
+
+void intel_dsb_gosub_finish(struct intel_dsb *dsb)
+{
+ intel_dsb_align_tail(dsb);
+
+ /*
+ * Wa_16024917128
+ * "Ensure that all subroutines called by GOSUB end with a cacheline of NOPs"
+ */
+ intel_dsb_noop(dsb, 8);
+
+ intel_dsb_buffer_flush_map(&dsb->dsb_buf);
+}
+
+void intel_dsb_finish(struct intel_dsb *dsb)
+{
intel_dsb_align_tail(dsb);
intel_dsb_buffer_flush_map(&dsb->dsb_buf);
@@ -539,6 +650,9 @@ static u32 dsb_error_int_status(struct intel_display *display)
if (DISPLAY_VER(display) >= 14)
errors |= DSB_ATS_FAULT_INT_STATUS;
+ if (DISPLAY_VER(display) >= 30)
+ errors |= DSB_GOSUB_INT_STATUS;
+
return errors;
}
@@ -553,17 +667,46 @@ static u32 dsb_error_int_en(struct intel_display *display)
if (DISPLAY_VER(display) >= 14)
errors |= DSB_ATS_FAULT_INT_EN;
+ /*
+ * Wa_16024917128
+ * "Disable nested GOSUB interrupt (DSB_INTERRUPT bit 21)"
+ */
+ if (0 && DISPLAY_VER(display) >= 30)
+ errors |= DSB_GOSUB_INT_EN;
+
return errors;
}
+/*
+ * FIXME calibrate these sensibly, ideally compute based on
+ * the number of regisetrs to be written. But that requires
+ * measuring the actual DSB execution speed on each platform
+ * (and the speed also depends on CDCLK and memory clock)...
+ */
+static int intel_dsb_noarm_exec_time_us(void)
+{
+ return 80;
+}
+
+static int intel_dsb_arm_exec_time_us(void)
+{
+ return 20;
+}
+
+int intel_dsb_exec_time_us(void)
+{
+ return intel_dsb_noarm_exec_time_us() +
+ intel_dsb_arm_exec_time_us();
+}
+
void intel_dsb_vblank_evade(struct intel_atomic_state *state,
struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
const struct intel_crtc_state *crtc_state =
intel_pre_commit_crtc_state(state, crtc);
- /* FIXME calibrate sensibly */
- int latency = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 20);
+ int latency = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
+ intel_dsb_arm_exec_time_us());
int start, end;
/*
@@ -605,13 +748,11 @@ static void _intel_dsb_chain(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state->base.dev);
struct intel_crtc *crtc = dsb->crtc;
enum pipe pipe = crtc->pipe;
- u32 tail;
if (drm_WARN_ON(display->drm, dsb->id == chained_dsb->id))
return;
- tail = chained_dsb->free_pos * 4;
- if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
+ if (!assert_dsb_tail_is_aligned(chained_dsb))
return;
intel_dsb_reg_write(dsb, DSB_CTRL(pipe, chained_dsb->id),
@@ -631,13 +772,15 @@ static void _intel_dsb_chain(struct intel_atomic_state *state,
intel_dsb_reg_write(dsb, DSB_PMCTRL(pipe, chained_dsb->id),
DSB_ENABLE_DEWAKE |
DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
+ } else {
+ intel_dsb_reg_write(dsb, DSB_PMCTRL(pipe, chained_dsb->id), 0);
}
intel_dsb_reg_write(dsb, DSB_HEAD(pipe, chained_dsb->id),
- intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf));
+ intel_dsb_head(chained_dsb));
intel_dsb_reg_write(dsb, DSB_TAIL(pipe, chained_dsb->id),
- intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf) + tail);
+ intel_dsb_tail(chained_dsb));
if (ctrl & DSB_WAIT_FOR_VBLANK) {
/*
@@ -652,6 +795,13 @@ static void _intel_dsb_chain(struct intel_atomic_state *state,
intel_dsb_wait_scanline_out(state, dsb,
dsb_dewake_scanline_start(state, crtc),
dsb_dewake_scanline_end(state, crtc));
+
+ /*
+ * DSB_FORCE_DEWAKE remains active even after DSB is
+ * disabled, so make sure to clear it.
+ */
+ intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(crtc->pipe, dsb->id),
+ DSB_FORCE_DEWAKE, 0);
}
}
@@ -676,16 +826,19 @@ void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
intel_dsb_wait_usec(dsb, usecs);
}
-static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
- int hw_dewake_scanline)
+/**
+ * intel_dsb_commit() - Trigger workload execution of DSB.
+ * @dsb: DSB context
+ *
+ * This function is used to do actual write to hardware using DSB.
+ */
+void intel_dsb_commit(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
struct intel_display *display = to_intel_display(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- u32 tail;
- tail = dsb->free_pos * 4;
- if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
+ if (!assert_dsb_tail_is_aligned(dsb))
return;
if (is_dsb_busy(display, pipe, dsb->id)) {
@@ -695,7 +848,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
}
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
- ctrl | DSB_ENABLE);
+ DSB_ENABLE);
intel_de_write_fw(display, DSB_CHICKEN(pipe, dsb->id),
dsb->chicken);
@@ -704,45 +857,13 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
dsb_error_int_en(display) | DSB_PROG_INT_EN);
- intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id),
- intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
-
- if (hw_dewake_scanline >= 0) {
- int diff, position;
+ intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id), 0);
- intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id),
- DSB_ENABLE_DEWAKE |
- DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
-
- /*
- * Force DEwake immediately if we're already past
- * or close to racing past the target scanline.
- */
- position = intel_de_read_fw(display, PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK;
-
- diff = hw_dewake_scanline - position;
- intel_de_write_fw(display, DSB_PMCTRL_2(pipe, dsb->id),
- (diff >= 0 && diff < 5 ? DSB_FORCE_DEWAKE : 0) |
- DSB_BLOCK_DEWAKE_EXTENSION);
- }
+ intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id),
+ intel_dsb_head(dsb));
intel_de_write_fw(display, DSB_TAIL(pipe, dsb->id),
- intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail);
-}
-
-/**
- * intel_dsb_commit() - Trigger workload execution of DSB.
- * @dsb: DSB context
- * @wait_for_vblank: wait for vblank before executing
- *
- * This function is used to do actual write to hardware using DSB.
- */
-void intel_dsb_commit(struct intel_dsb *dsb,
- bool wait_for_vblank)
-{
- _intel_dsb_commit(dsb,
- wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0,
- wait_for_vblank ? dsb->hw_dewake_scanline : -1);
+ intel_dsb_tail(dsb));
}
void intel_dsb_wait(struct intel_dsb *dsb)
@@ -895,4 +1016,7 @@ void intel_dsb_irq_handler(struct intel_display *display,
if (errors & DSB_POLL_ERR_INT_STATUS)
drm_err(display->drm, "[CRTC:%d:%s] DSB %d poll error\n",
crtc->base.base.id, crtc->base.name, dsb_id);
+ if (errors & DSB_GOSUB_INT_STATUS)
+ drm_err(display->drm, "[CRTC:%d:%s] DSB %d GOSUB programming error\n",
+ crtc->base.base.id, crtc->base.name, dsb_id);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index e843c52bf97c..c8f4499916eb 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -26,12 +26,16 @@ enum intel_dsb_id {
I915_MAX_DSBS,
};
+unsigned int intel_dsb_size(struct intel_dsb *dsb);
+unsigned int intel_dsb_head(struct intel_dsb *dsb);
struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
struct intel_crtc *crtc,
enum intel_dsb_id dsb_id,
unsigned int max_cmds);
void intel_dsb_finish(struct intel_dsb *dsb);
+void intel_dsb_gosub_finish(struct intel_dsb *dsb);
void intel_dsb_cleanup(struct intel_dsb *dsb);
+int intel_dsb_exec_time_us(void);
void intel_dsb_reg_write(struct intel_dsb *dsb,
i915_reg_t reg, u32 val);
void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
@@ -57,13 +61,14 @@ void intel_dsb_vblank_evade(struct intel_atomic_state *state,
void intel_dsb_poll(struct intel_dsb *dsb,
i915_reg_t reg, u32 mask, u32 val,
int wait_us, int count);
+void intel_dsb_gosub(struct intel_dsb *dsb,
+ struct intel_dsb *sub_dsb);
void intel_dsb_chain(struct intel_atomic_state *state,
struct intel_dsb *dsb,
struct intel_dsb *chained_dsb,
bool wait_for_vblank);
-void intel_dsb_commit(struct intel_dsb *dsb,
- bool wait_for_vblank);
+void intel_dsb_commit(struct intel_dsb *dsb);
void intel_dsb_wait(struct intel_dsb *dsb);
void intel_dsb_irq_handler(struct intel_display *display,
diff --git a/drivers/gpu/drm/i915/display/intel_dsb_regs.h b/drivers/gpu/drm/i915/display/intel_dsb_regs.h
index cb6e0e5624a6..230104f36145 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb_regs.h
@@ -51,11 +51,13 @@
#define DSB_RESET_SM_STATE_MASK REG_GENMASK(5, 4)
#define DSB_RUN_SM_STATE_MASK REG_GENMASK(2, 0)
#define DSB_INTERRUPT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x28)
+#define DSB_GOSUB_INT_EN REG_BIT(21) /* ptl+ */
#define DSB_ATS_FAULT_INT_EN REG_BIT(20) /* mtl+ */
#define DSB_GTT_FAULT_INT_EN REG_BIT(19)
#define DSB_RSPTIMEOUT_INT_EN REG_BIT(18)
#define DSB_POLL_ERR_INT_EN REG_BIT(17)
#define DSB_PROG_INT_EN REG_BIT(16)
+#define DSB_GOSUB_INT_STATUS REG_BIT(5) /* ptl+ */
#define DSB_ATS_FAULT_INT_STATUS REG_BIT(4) /* mtl+ */
#define DSB_GTT_FAULT_INT_STATUS REG_BIT(3)
#define DSB_RSPTIMEOUT_INT_STATUS REG_BIT(2)
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 29c920983413..e6a851d276f8 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -36,12 +36,11 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
-
#include <video/mipi_display.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index b61520353c92..08b48e36aca6 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -34,11 +34,11 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/intel_encoder.c b/drivers/gpu/drm/i915/display/intel_encoder.c
index 21d638535497..0b7bd26f4339 100644
--- a/drivers/gpu/drm/i915/display/intel_encoder.c
+++ b/drivers/gpu/drm/i915/display/intel_encoder.c
@@ -5,8 +5,7 @@
#include <linux/workqueue.h>
-#include "i915_drv.h"
-
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_encoder.h"
@@ -32,9 +31,9 @@ void intel_encoder_link_check_flush_work(struct intel_encoder *encoder)
void intel_encoder_link_check_queue_work(struct intel_encoder *encoder, int delay_ms)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- mod_delayed_work(i915->unordered_wq,
+ mod_delayed_work(display->wq.unordered,
&encoder->link_check_work, msecs_to_jiffies(delay_ms));
}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 05393bd60c98..0da842bd2f2f 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -11,14 +11,15 @@
#include <drm/drm_modeset_helper.h>
#include "i915_drv.h"
-#include "intel_atomic_plane.h"
#include "intel_bo.h"
#include "intel_display.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fb_bo.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#define check_array_bounds(display, a, i) drm_WARN_ON((display)->drm, (i) >= ARRAY_SIZE(a))
@@ -421,21 +422,22 @@ unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
/**
* intel_fb_get_format_info: Get a modifier specific format information
- * @cmd: FB add command structure
+ * @pixel_format: pixel format
+ * @modifier: modifier
*
* Returns:
- * Returns the format information for @cmd->pixel_format specific to @cmd->modifier[0],
+ * Returns the format information for @pixel_format specific to @modifier,
* or %NULL if the modifier doesn't override the format.
*/
const struct drm_format_info *
-intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+intel_fb_get_format_info(u32 pixel_format, u64 modifier)
{
- const struct intel_modifier_desc *md = lookup_modifier_or_null(cmd->modifier[0]);
+ const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier);
if (!md || !md->formats)
return NULL;
- return lookup_format_info(md->formats, md->format_count, cmd->pixel_format);
+ return lookup_format_info(md->formats, md->format_count, pixel_format);
}
static bool plane_caps_contain_any(u8 caps, u8 mask)
@@ -1285,10 +1287,10 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- return DISPLAY_VER(dev_priv) < 4 ||
+ return DISPLAY_VER(display) < 4 ||
(plane->fbc && !plane_state->no_fbc_reason &&
plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
}
@@ -2205,6 +2207,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct drm_gem_object *obj,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct intel_display *display = to_intel_display(obj->dev);
@@ -2252,7 +2255,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err_frontbuffer_put;
}
- drm_helper_mode_fill_fb_struct(display->drm, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(display->drm, fb, info, mode_cmd);
for (i = 0; i < fb->format->num_planes; i++) {
unsigned int stride_alignment;
@@ -2322,6 +2325,7 @@ err:
struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *user_mode_cmd)
{
struct drm_framebuffer *fb;
@@ -2332,7 +2336,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
if (IS_ERR(obj))
return ERR_CAST(obj);
- fb = intel_framebuffer_create(obj, &mode_cmd);
+ fb = intel_framebuffer_create(obj, info, &mode_cmd);
drm_gem_object_put(obj);
return fb;
@@ -2340,16 +2344,17 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *
intel_framebuffer_create(struct drm_gem_object *obj,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct intel_framebuffer *intel_fb;
int ret;
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ intel_fb = intel_bo_alloc_framebuffer();
if (!intel_fb)
return ERR_PTR(-ENOMEM);
- ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
+ ret = intel_framebuffer_init(intel_fb, obj, info, mode_cmd);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index bdd76b372957..403b8b63721a 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -47,7 +47,7 @@ u64 *intel_fb_plane_get_modifiers(struct intel_display *display,
bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier);
const struct drm_format_info *
-intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+intel_fb_get_format_info(u32 pixel_format, u64 modifier);
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
@@ -102,13 +102,16 @@ void intel_add_fb_offsets(int *x, int *y,
int intel_framebuffer_init(struct intel_framebuffer *ifb,
struct drm_gem_object *obj,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *
intel_framebuffer_create(struct drm_gem_object *obj,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *user_mode_cmd);
bool intel_fb_modifier_uses_dpt(struct intel_display *display, u64 modifier);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c
index 3d338a728354..b0e8b89f7ce8 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.c
@@ -8,6 +8,7 @@
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_bo.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index c648ab8a93d7..5a0151775a3a 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -11,12 +11,13 @@
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
-#include "intel_atomic_plane.h"
+#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
+#include "intel_plane.h"
static struct i915_vma *
intel_fb_pin_to_dpt(const struct drm_framebuffer *fb,
@@ -333,3 +334,8 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
intel_dpt_unpin_from_ggtt(fb->dpt_vm);
}
}
+
+void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map)
+{
+ iosys_map_set_vaddr_iomem(map, i915_vma_get_iomap(vma));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.h b/drivers/gpu/drm/i915/display/intel_fb_pin.h
index 01770dbba2e0..81ab79da1af7 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.h
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.h
@@ -12,6 +12,7 @@ struct drm_framebuffer;
struct i915_vma;
struct intel_plane_state;
struct i915_gtt_view;
+struct iosys_map;
struct i915_vma *
intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
@@ -27,5 +28,6 @@ void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags);
int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
const struct intel_plane_state *old_plane_state);
void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
+void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index bed2bba20b55..6e26cb4c5724 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -45,9 +45,10 @@
#include <drm/drm_fourcc.h>
#include "gem/i915_gem_stolen.h"
+
#include "gt/intel_gt_types.h"
+
#include "i915_drv.h"
-#include "i915_reg.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
#include "i915_vma.h"
@@ -55,6 +56,7 @@
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_device.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
@@ -1574,7 +1576,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
if (IS_ERR(cdclk_state))
return PTR_ERR(cdclk_state);
- if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) {
+ if (crtc_state->pixel_rate >= intel_cdclk_logical(cdclk_state) * 95 / 100) {
plane_state->no_fbc_reason = "pixel rate too high";
return 0;
}
@@ -2009,7 +2011,7 @@ void intel_fbc_reset_underrun(struct intel_display *display)
static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = to_i915(fbc->display->drm);
+ struct intel_display *display = fbc->display;
/*
* There's no guarantee that underrun_detected won't be set to true
@@ -2022,7 +2024,7 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
if (READ_ONCE(fbc->underrun_detected))
return;
- queue_work(i915->unordered_wq, &fbc->underrun_work);
+ queue_work(display->wq.unordered, &fbc->underrun_work);
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 2dc4029d71ed..7c4709d58aa3 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -512,3 +512,8 @@ struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev)
{
return fbdev ? fbdev->vma : NULL;
}
+
+void intel_fbdev_get_map(struct intel_fbdev *fbdev, struct iosys_map *map)
+{
+ intel_fb_get_map(fbdev->vma, map);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index a15e3e222a0c..150cc5f45bb3 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -13,6 +13,7 @@ struct drm_fb_helper_surface_size;
struct intel_display;
struct intel_fbdev;
struct intel_framebuffer;
+struct iosys_map;
#ifdef CONFIG_DRM_FBDEV_EMULATION
int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
@@ -22,7 +23,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
void intel_fbdev_setup(struct intel_display *display);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev);
-
+void intel_fbdev_get_map(struct intel_fbdev *fbdev, struct iosys_map *map);
#else
#define INTEL_FBDEV_DRIVER_OPS \
.fbdev_probe = NULL
@@ -39,6 +40,9 @@ static inline struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev
return NULL;
}
+static inline void intel_fbdev_get_map(struct intel_fbdev *fbdev, struct iosys_map *map)
+{
+}
#endif
#endif /* __INTEL_FBDEV_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
index 5f4cb3328265..210aee9ae88b 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -8,6 +8,7 @@
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fbdev_fb.h"
@@ -61,7 +62,11 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
return ERR_PTR(-ENOMEM);
}
- fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj), &mode_cmd);
+ fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj),
+ drm_get_format_info(display->drm,
+ mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd);
i915_gem_object_put(obj);
return to_intel_framebuffer(fb);
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 169bbe154b5c..8039a84671cc 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -14,6 +14,7 @@
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_fdi.h"
@@ -910,7 +911,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
/* Configure Port Clock Select */
- drm_WARN_ON(display->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
+ drm_WARN_ON(display->drm, crtc_state->intel_dpll->info->id != DPLL_ID_SPLL);
intel_ddi_enable_clock(encoder, crtc_state);
/* Start the training iterating through available voltages and emphasis,
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 2a787897b2d3..c2ce8461ac9e 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -30,6 +30,7 @@
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
diff --git a/drivers/gpu/drm/i915/display/intel_flipq.c b/drivers/gpu/drm/i915/display/intel_flipq.c
new file mode 100644
index 000000000000..6ab2272ab2df
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_flipq.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/pci.h>
+
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
+#include "intel_step.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "intel_flipq.h"
+#include "intel_dmc.h"
+#include "intel_dmc_regs.h"
+#include "intel_dsb.h"
+#include "intel_vblank.h"
+#include "intel_vrr.h"
+
+/**
+ * DOC: DMC Flip Queue
+ *
+ * A flip queue is a ring buffer implemented by the pipe DMC firmware.
+ * The driver inserts entries into the queues to be executed by the
+ * pipe DMC at a specified presentation timestamp (PTS).
+ *
+ * Each pipe DMC provides several queues:
+ *
+ * - 1 general queue (two DSB buffers executed per entry)
+ * - 3 plane queues (one DSB buffer executed per entry)
+ * - 1 fast queue (deprecated)
+ */
+
+#define for_each_flipq(flipq_id) \
+ for ((flipq_id) = INTEL_FLIPQ_PLANE_1; (flipq_id) < MAX_INTEL_FLIPQ; (flipq_id)++)
+
+static int intel_flipq_offset(enum intel_flipq_id flipq_id)
+{
+ switch (flipq_id) {
+ case INTEL_FLIPQ_PLANE_1:
+ return 0x008;
+ case INTEL_FLIPQ_PLANE_2:
+ return 0x108;
+ case INTEL_FLIPQ_PLANE_3:
+ return 0x208;
+ case INTEL_FLIPQ_GENERAL:
+ return 0x308;
+ case INTEL_FLIPQ_FAST:
+ return 0x3c8;
+ default:
+ MISSING_CASE(flipq_id);
+ return 0;
+ }
+}
+
+static int intel_flipq_size_dw(enum intel_flipq_id flipq_id)
+{
+ switch (flipq_id) {
+ case INTEL_FLIPQ_PLANE_1:
+ case INTEL_FLIPQ_PLANE_2:
+ case INTEL_FLIPQ_PLANE_3:
+ return 64;
+ case INTEL_FLIPQ_GENERAL:
+ case INTEL_FLIPQ_FAST:
+ return 48;
+ default:
+ MISSING_CASE(flipq_id);
+ return 1;
+ }
+}
+
+static int intel_flipq_elem_size_dw(enum intel_flipq_id flipq_id)
+{
+ switch (flipq_id) {
+ case INTEL_FLIPQ_PLANE_1:
+ case INTEL_FLIPQ_PLANE_2:
+ case INTEL_FLIPQ_PLANE_3:
+ return 4;
+ case INTEL_FLIPQ_GENERAL:
+ case INTEL_FLIPQ_FAST:
+ return 6;
+ default:
+ MISSING_CASE(flipq_id);
+ return 1;
+ }
+}
+
+static int intel_flipq_size_entries(enum intel_flipq_id flipq_id)
+{
+ return intel_flipq_size_dw(flipq_id) / intel_flipq_elem_size_dw(flipq_id);
+}
+
+static void intel_flipq_crtc_init(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ enum intel_flipq_id flipq_id;
+
+ for_each_flipq(flipq_id) {
+ struct intel_flipq *flipq = &crtc->flipq[flipq_id];
+
+ flipq->start_mmioaddr = intel_pipedmc_start_mmioaddr(crtc) + intel_flipq_offset(flipq_id);
+ flipq->flipq_id = flipq_id;
+
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] FQ %d: start 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ flipq_id, flipq->start_mmioaddr);
+ }
+}
+
+bool intel_flipq_supported(struct intel_display *display)
+{
+ if (!display->params.enable_flipq)
+ return false;
+
+ if (!display->dmc.dmc)
+ return false;
+
+ if (DISPLAY_VER(display) == 20)
+ return true;
+
+ /* DMC firmware expects VRR timing generator to be used */
+ return DISPLAY_VER(display) >= 30 && intel_vrr_always_use_vrr_tg(display);
+}
+
+void intel_flipq_init(struct intel_display *display)
+{
+ struct intel_crtc *crtc;
+
+ intel_dmc_wait_fw_load(display);
+
+ for_each_intel_crtc(display->drm, crtc)
+ intel_flipq_crtc_init(crtc);
+}
+
+static int cdclk_factor(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 30)
+ return 120;
+ else
+ return 280;
+}
+
+int intel_flipq_exec_time_us(struct intel_display *display)
+{
+ return intel_dsb_exec_time_us() +
+ DIV_ROUND_UP(display->cdclk.hw.cdclk * cdclk_factor(display), 540000) +
+ display->sagv.block_time_us;
+}
+
+static int intel_flipq_preempt_timeout_ms(struct intel_display *display)
+{
+ return DIV_ROUND_UP(intel_flipq_exec_time_us(display), 1000);
+}
+
+static void intel_flipq_preempt(struct intel_crtc *crtc, bool preempt)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ intel_de_rmw(display, PIPEDMC_FQ_CTRL(crtc->pipe),
+ PIPEDMC_FQ_CTRL_PREEMPT, preempt ? PIPEDMC_FQ_CTRL_PREEMPT : 0);
+
+ if (preempt &&
+ intel_de_wait_for_clear(display,
+ PIPEDMC_FQ_STATUS(crtc->pipe),
+ PIPEDMC_FQ_STATUS_BUSY,
+ intel_flipq_preempt_timeout_ms(display)))
+ drm_err(display->drm, "[CRTC:%d:%s] flip queue preempt timeout\n",
+ crtc->base.base.id, crtc->base.name);
+}
+
+static int intel_flipq_current_head(struct intel_crtc *crtc, enum intel_flipq_id flipq_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ return intel_de_read(display, PIPEDMC_FPQ_CHP(crtc->pipe, flipq_id));
+}
+
+static void intel_flipq_write_tail(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ intel_de_write(display, PIPEDMC_FPQ_ATOMIC_TP(crtc->pipe),
+ PIPEDMC_FPQ_PLANEQ_3_TP(crtc->flipq[INTEL_FLIPQ_PLANE_3].tail) |
+ PIPEDMC_FPQ_PLANEQ_2_TP(crtc->flipq[INTEL_FLIPQ_PLANE_2].tail) |
+ PIPEDMC_FPQ_PLANEQ_1_TP(crtc->flipq[INTEL_FLIPQ_PLANE_1].tail) |
+ PIPEDMC_FPQ_FASTQ_TP(crtc->flipq[INTEL_FLIPQ_FAST].tail) |
+ PIPEDMC_FPQ_GENERALQ_TP(crtc->flipq[INTEL_FLIPQ_GENERAL].tail));
+}
+
+static void intel_flipq_sw_dmc_wake(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ intel_de_write(display, PIPEDMC_FPQ_CTL1(crtc->pipe), PIPEDMC_SW_DMC_WAKE);
+}
+
+static int intel_flipq_exec_time_lines(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
+ intel_flipq_exec_time_us(display));
+}
+
+void intel_flipq_dump(struct intel_crtc *crtc,
+ enum intel_flipq_id flipq_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_flipq *flipq = &crtc->flipq[flipq_id];
+ u32 tmp;
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] FQ %d @ 0x%x: ",
+ crtc->base.base.id, crtc->base.name, flipq_id,
+ flipq->start_mmioaddr);
+ for (int i = 0 ; i < intel_flipq_size_dw(flipq_id); i++) {
+ printk(KERN_CONT " 0x%08x",
+ intel_de_read(display, PIPEDMC_FQ_RAM(flipq->start_mmioaddr, i)));
+ if (i % intel_flipq_elem_size_dw(flipq_id) == intel_flipq_elem_size_dw(flipq_id) - 1)
+ printk(KERN_CONT "\n");
+ }
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] FQ %d: chp=0x%x, hp=0x%x\n",
+ crtc->base.base.id, crtc->base.name, flipq_id,
+ intel_de_read(display, PIPEDMC_FPQ_CHP(crtc->pipe, flipq_id)),
+ intel_de_read(display, PIPEDMC_FPQ_HP(crtc->pipe, flipq_id)));
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] FQ %d: current head %d\n",
+ crtc->base.base.id, crtc->base.name, flipq_id,
+ intel_flipq_current_head(crtc, flipq_id));
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] flip queue timestamp: 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ intel_de_read(display, PIPEDMC_FPQ_TS(crtc->pipe)));
+
+ tmp = intel_de_read(display, PIPEDMC_FPQ_ATOMIC_TP(crtc->pipe));
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] flip queue atomic tails: P3 %d, P2 %d, P1 %d, G %d, F %d\n",
+ crtc->base.base.id, crtc->base.name,
+ REG_FIELD_GET(PIPEDMC_FPQ_PLANEQ_3_TP_MASK, tmp),
+ REG_FIELD_GET(PIPEDMC_FPQ_PLANEQ_2_TP_MASK, tmp),
+ REG_FIELD_GET(PIPEDMC_FPQ_PLANEQ_1_TP_MASK, tmp),
+ REG_FIELD_GET(PIPEDMC_FPQ_GENERALQ_TP_MASK, tmp),
+ REG_FIELD_GET(PIPEDMC_FPQ_FASTQ_TP_MASK, tmp));
+}
+
+void intel_flipq_reset(struct intel_display *display, enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
+ enum intel_flipq_id flipq_id;
+
+ intel_de_write(display, PIPEDMC_FQ_CTRL(pipe), 0);
+
+ intel_de_write(display, PIPEDMC_SCANLINECMPLOWER(pipe), 0);
+ intel_de_write(display, PIPEDMC_SCANLINECMPUPPER(pipe), 0);
+
+ for_each_flipq(flipq_id) {
+ struct intel_flipq *flipq = &crtc->flipq[flipq_id];
+
+ intel_de_write(display, PIPEDMC_FPQ_HP(pipe, flipq_id), 0);
+ intel_de_write(display, PIPEDMC_FPQ_CHP(pipe, flipq_id), 0);
+
+ flipq->tail = 0;
+ }
+
+ intel_de_write(display, PIPEDMC_FPQ_ATOMIC_TP(pipe), 0);
+}
+
+static enum pipedmc_event_id flipq_event_id(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 30)
+ return PIPEDMC_EVENT_FULL_FQ_WAKE_TRIGGER;
+ else
+ return PIPEDMC_EVENT_SCANLINE_INRANGE_FQ_TRIGGER;
+}
+
+void intel_flipq_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ /* FIXME what to do with VRR? */
+ int scanline = intel_mode_vblank_start(&crtc_state->hw.adjusted_mode) -
+ intel_flipq_exec_time_lines(crtc_state);
+
+ if (DISPLAY_VER(display) >= 30) {
+ u32 start_mmioaddr = intel_pipedmc_start_mmioaddr(crtc);
+
+ /* undocumented magic DMC variables */
+ intel_de_write(display, PTL_PIPEDMC_EXEC_TIME_LINES(start_mmioaddr),
+ intel_flipq_exec_time_lines(crtc_state));
+ intel_de_write(display, PTL_PIPEDMC_END_OF_EXEC_GB(start_mmioaddr),
+ 100);
+ }
+
+ intel_de_write(display, PIPEDMC_SCANLINECMPUPPER(crtc->pipe),
+ PIPEDMC_SCANLINE_UPPER(scanline));
+ intel_de_write(display, PIPEDMC_SCANLINECMPLOWER(crtc->pipe),
+ PIPEDMC_SCANLINEINRANGECMP_EN |
+ PIPEDMC_SCANLINE_LOWER(scanline - 2));
+
+ intel_pipedmc_enable_event(crtc, flipq_event_id(display));
+
+ intel_de_write(display, PIPEDMC_FQ_CTRL(crtc->pipe), PIPEDMC_FQ_CTRL_ENABLE);
+}
+
+void intel_flipq_disable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ intel_flipq_preempt(crtc, true);
+
+ intel_de_write(display, PIPEDMC_FQ_CTRL(crtc->pipe), 0);
+
+ intel_pipedmc_disable_event(crtc, flipq_event_id(display));
+
+ intel_de_write(display, PIPEDMC_SCANLINECMPLOWER(crtc->pipe), 0);
+ intel_de_write(display, PIPEDMC_SCANLINECMPUPPER(crtc->pipe), 0);
+}
+
+static bool assert_flipq_has_room(struct intel_crtc *crtc,
+ enum intel_flipq_id flipq_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_flipq *flipq = &crtc->flipq[flipq_id];
+ int head, size = intel_flipq_size_entries(flipq_id);
+
+ head = intel_flipq_current_head(crtc, flipq_id);
+
+ return !drm_WARN(display->drm,
+ (flipq->tail + size - head) % size >= size - 1,
+ "[CRTC:%d:%s] FQ %d overflow (head %d, tail %d, size %d)\n",
+ crtc->base.base.id, crtc->base.name, flipq_id,
+ head, flipq->tail, size);
+}
+
+static void intel_flipq_write(struct intel_display *display,
+ struct intel_flipq *flipq, u32 data, int i)
+{
+ intel_de_write(display, PIPEDMC_FQ_RAM(flipq->start_mmioaddr, flipq->tail *
+ intel_flipq_elem_size_dw(flipq->flipq_id) + i), data);
+}
+
+static void lnl_flipq_add(struct intel_display *display,
+ struct intel_flipq *flipq,
+ unsigned int pts,
+ enum intel_dsb_id dsb_id,
+ struct intel_dsb *dsb)
+{
+ int i = 0;
+
+ switch (flipq->flipq_id) {
+ case INTEL_FLIPQ_GENERAL:
+ intel_flipq_write(display, flipq, pts, i++);
+ intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
+ intel_flipq_write(display, flipq, LNL_FQ_INTERRUPT |
+ LNL_FQ_DSB_ID(dsb_id) |
+ LNL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
+ intel_flipq_write(display, flipq, 0, i++);
+ intel_flipq_write(display, flipq, 0, i++); /* head for second DSB */
+ intel_flipq_write(display, flipq, 0, i++); /* DSB engine + size for second DSB */
+ break;
+ case INTEL_FLIPQ_PLANE_1:
+ case INTEL_FLIPQ_PLANE_2:
+ case INTEL_FLIPQ_PLANE_3:
+ intel_flipq_write(display, flipq, pts, i++);
+ intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
+ intel_flipq_write(display, flipq, LNL_FQ_INTERRUPT |
+ LNL_FQ_DSB_ID(dsb_id) |
+ LNL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
+ intel_flipq_write(display, flipq, 0, i++);
+ break;
+ default:
+ MISSING_CASE(flipq->flipq_id);
+ return;
+ }
+}
+
+static void ptl_flipq_add(struct intel_display *display,
+ struct intel_flipq *flipq,
+ unsigned int pts,
+ enum intel_dsb_id dsb_id,
+ struct intel_dsb *dsb)
+{
+ int i = 0;
+
+ switch (flipq->flipq_id) {
+ case INTEL_FLIPQ_GENERAL:
+ intel_flipq_write(display, flipq, pts, i++);
+ intel_flipq_write(display, flipq, 0, i++);
+ intel_flipq_write(display, flipq, PTL_FQ_INTERRUPT |
+ PTL_FQ_DSB_ID(dsb_id) |
+ PTL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
+ intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
+ intel_flipq_write(display, flipq, 0, i++); /* DSB engine + size for second DSB */
+ intel_flipq_write(display, flipq, 0, i++); /* head for second DSB */
+ break;
+ case INTEL_FLIPQ_PLANE_1:
+ case INTEL_FLIPQ_PLANE_2:
+ case INTEL_FLIPQ_PLANE_3:
+ intel_flipq_write(display, flipq, pts, i++);
+ intel_flipq_write(display, flipq, 0, i++);
+ intel_flipq_write(display, flipq, PTL_FQ_INTERRUPT |
+ PTL_FQ_DSB_ID(dsb_id) |
+ PTL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
+ intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
+ break;
+ default:
+ MISSING_CASE(flipq->flipq_id);
+ return;
+ }
+}
+
+void intel_flipq_add(struct intel_crtc *crtc,
+ enum intel_flipq_id flipq_id,
+ unsigned int pts,
+ enum intel_dsb_id dsb_id,
+ struct intel_dsb *dsb)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_flipq *flipq = &crtc->flipq[flipq_id];
+
+ if (!assert_flipq_has_room(crtc, flipq_id))
+ return;
+
+ pts += intel_de_read(display, PIPEDMC_FPQ_TS(crtc->pipe));
+
+ intel_flipq_preempt(crtc, true);
+
+ if (DISPLAY_VER(display) >= 30)
+ ptl_flipq_add(display, flipq, pts, dsb_id, dsb);
+ else
+ lnl_flipq_add(display, flipq, pts, dsb_id, dsb);
+
+ flipq->tail = (flipq->tail + 1) % intel_flipq_size_entries(flipq->flipq_id);
+ intel_flipq_write_tail(crtc);
+
+ intel_flipq_preempt(crtc, false);
+
+ intel_flipq_sw_dmc_wake(crtc);
+}
+
+/* Wa_18034343758 */
+static bool need_dmc_halt_wa(struct intel_display *display)
+{
+ return DISPLAY_VER(display) == 20 ||
+ (display->platform.pantherlake &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_B0));
+}
+
+void intel_flipq_wait_dmc_halt(struct intel_dsb *dsb, struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ if (need_dmc_halt_wa(display))
+ intel_dsb_wait_usec(dsb, 2);
+}
+
+void intel_flipq_unhalt_dmc(struct intel_dsb *dsb, struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ if (need_dmc_halt_wa(display))
+ intel_dsb_reg_write(dsb, PIPEDMC_CTL(crtc->pipe), 0);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_flipq.h b/drivers/gpu/drm/i915/display/intel_flipq.h
new file mode 100644
index 000000000000..012e3e9a6bcb
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_flipq.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_FLIPQ_H__
+#define __INTEL_FLIPQ_H__
+
+#include <linux/types.h>
+
+enum intel_dsb_id;
+enum intel_flipq_id;
+enum pipe;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_display;
+struct intel_dsb;
+
+bool intel_flipq_supported(struct intel_display *display);
+void intel_flipq_init(struct intel_display *display);
+void intel_flipq_reset(struct intel_display *display, enum pipe pipe);
+
+void intel_flipq_enable(const struct intel_crtc_state *crtc_state);
+void intel_flipq_disable(const struct intel_crtc_state *old_crtc_state);
+
+void intel_flipq_add(struct intel_crtc *crtc,
+ enum intel_flipq_id flip_queue_id,
+ unsigned int pts,
+ enum intel_dsb_id dsb_id,
+ struct intel_dsb *dsb);
+int intel_flipq_exec_time_us(struct intel_display *display);
+void intel_flipq_wait_dmc_halt(struct intel_dsb *dsb, struct intel_crtc *crtc);
+void intel_flipq_unhalt_dmc(struct intel_dsb *dsb, struct intel_crtc *crtc);
+void intel_flipq_dump(struct intel_crtc *crtc,
+ enum intel_flipq_id flip_queue_id);
+
+#endif /* __INTEL_FLIPQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index d55cc77650b7..0d73f32fe7f1 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -37,6 +37,7 @@
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
#include "intel_gmbus_regs.h"
@@ -414,7 +415,7 @@ gmbus_wait_idle(struct intel_display *display)
add_wait_queue(&display->gmbus.wait_queue, &wait);
intel_de_write_fw(display, GMBUS4(display), irq_enable);
- ret = intel_de_wait_fw(display, GMBUS2(display), GMBUS_ACTIVE, 0, 10);
+ ret = intel_de_wait_fw(display, GMBUS2(display), GMBUS_ACTIVE, 0, 10, NULL);
intel_de_write_fw(display, GMBUS4(display), 0);
remove_wait_queue(&display->gmbus.wait_queue, &wait);
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
index 59bad1dda6d6..ab750562566b 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_GMBUS_REGS_H__
#define __INTEL_GMBUS_REGS_H__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
#define __GMBUS_MMIO_BASE(__display) ((__display)->gmbus.mmio_base)
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 3e3038f4ee1f..42202c8bb066 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -14,14 +14,16 @@
#include <linux/random.h>
#include <drm/display/drm_hdcp_helper.h>
+#include <drm/drm_print.h>
#include <drm/intel/i915_component.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp_mst.h"
@@ -31,6 +33,7 @@
#include "intel_hdcp_regs.h"
#include "intel_hdcp_shim.h"
#include "intel_pcode.h"
+#include "intel_step.h"
#define USE_HDCP_GSC(__display) (DISPLAY_VER(__display) >= 14)
@@ -373,7 +376,6 @@ static void intel_hdcp_clear_keys(struct intel_display *display)
static int intel_hdcp_load_keys(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
u32 val;
@@ -398,7 +400,7 @@ static int intel_hdcp_load_keys(struct intel_display *display)
* Mailbox interface.
*/
if (DISPLAY_VER(display) == 9 && !display->platform.broxton) {
- ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
+ ret = intel_pcode_write(display->drm, SKL_PCODE_LOAD_HDCP_KEYS, 1);
if (ret) {
drm_err(display->drm,
"Failed to initiate HDCP key load (%d)\n",
@@ -1088,7 +1090,6 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
u64 value, bool update_property)
{
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
@@ -1109,7 +1110,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
hdcp->value = value;
if (update_property) {
drm_connector_get(&connector->base);
- if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
+ if (!queue_work(display->wq.unordered, &hdcp->prop_work))
drm_connector_put(&connector->base);
}
}
@@ -2236,16 +2237,15 @@ static void intel_hdcp_check_work(struct work_struct *work)
check_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (drm_connector_is_unregistered(&connector->base))
return;
if (!intel_hdcp2_check_link(connector))
- queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+ queue_delayed_work(display->wq.unordered, &hdcp->check_work,
DRM_HDCP2_CHECK_PERIOD_MS);
else if (!intel_hdcp_check_link(connector))
- queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+ queue_delayed_work(display->wq.unordered, &hdcp->check_work,
DRM_HDCP_CHECK_PERIOD_MS);
}
@@ -2436,7 +2436,6 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -2495,7 +2494,7 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
}
if (!ret) {
- queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+ queue_delayed_work(display->wq.unordered, &hdcp->check_work,
check_link_interval);
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_ENABLED,
@@ -2566,7 +2565,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
to_intel_connector(conn_state->connector);
struct intel_hdcp *hdcp = &connector->hdcp;
bool content_protection_type_changed, desired_and_not_enabled = false;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (!connector->hdcp.shim)
return;
@@ -2593,7 +2592,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
mutex_lock(&hdcp->mutex);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
drm_connector_get(&connector->base);
- if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
+ if (!queue_work(display->wq.unordered, &hdcp->prop_work))
drm_connector_put(&connector->base);
mutex_unlock(&hdcp->mutex);
}
@@ -2611,7 +2610,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
*/
if (!desired_and_not_enabled && !content_protection_type_changed) {
drm_connector_get(&connector->base);
- if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
+ if (!queue_work(display->wq.unordered, &hdcp->prop_work))
drm_connector_put(&connector->base);
}
@@ -2735,7 +2734,6 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (!hdcp->shim)
return;
@@ -2743,7 +2741,7 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
atomic_inc(&connector->hdcp.cp_irq_count);
wake_up_all(&connector->hdcp.cp_irq_queue);
- queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
+ queue_delayed_work(display->wq.unordered, &hdcp->check_work, 0);
}
static void __intel_hdcp_info(struct seq_file *m, struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
index f590d7f48ba7..112ce8c896d6 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
@@ -247,7 +247,7 @@
_TRANSA_HDCP2_STREAM_STATUS, \
_TRANSB_HDCP2_STREAM_STATUS)
#define STREAM_ENCRYPTION_STATUS REG_BIT(31)
-#define STREAM_TYPE_STATUS REG_BIT(30)
+#define STREAM_TYPE_STATUS_MASK REG_GENMASK(30, 30)
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_STREAM_STATUS(trans) : \
@@ -263,7 +263,7 @@
#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \
_TRANSA_HDCP2_AUTH_STREAM, \
_TRANSB_HDCP2_AUTH_STREAM)
-#define AUTH_STREAM_TYPE REG_BIT(31)
+#define AUTH_STREAM_TYPE_MASK REG_GENMASK(31, 31)
#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_AUTH_STREAM(trans) : \
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 98033471902c..9961ff259298 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -41,11 +41,9 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/intel/intel_lpe_audio.h>
-
#include <media/cec-notifier.h>
#include "g4x_hdmi.h"
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -54,6 +52,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_gmbus.h"
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index fc5d8928c37e..265aa97fcc75 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -30,8 +30,10 @@
#include "i915_irq.h"
#include "intel_connector.h"
#include "intel_display_power.h"
+#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_hotplug_irq.h"
@@ -191,40 +193,34 @@ static bool detection_work_enabled(struct intel_display *display)
static bool
mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
lockdep_assert_held(&display->irq.lock);
if (!detection_work_enabled(display))
return false;
- return mod_delayed_work(i915->unordered_wq, work, delay);
+ return mod_delayed_work(display->wq.unordered, work, delay);
}
static bool
queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
lockdep_assert_held(&display->irq.lock);
if (!detection_work_enabled(display))
return false;
- return queue_delayed_work(i915->unordered_wq, work, delay);
+ return queue_delayed_work(display->wq.unordered, work, delay);
}
static bool
queue_detection_work(struct intel_display *display, struct work_struct *work)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
lockdep_assert_held(&display->irq.lock);
if (!detection_work_enabled(display))
return false;
- return queue_work(i915->unordered_wq, work);
+ return queue_work(display->wq.unordered, work);
}
static void
@@ -905,9 +901,14 @@ void intel_hpd_poll_enable(struct intel_display *display)
*/
void intel_hpd_poll_disable(struct intel_display *display)
{
+ struct intel_encoder *encoder;
+
if (!HAS_DISPLAY(display))
return;
+ for_each_intel_dp(display->drm, encoder)
+ intel_dp_dpcd_set_probe(enc_to_intel_dp(encoder), true);
+
WRITE_ONCE(display->hotplug.poll_enabled, false);
spin_lock_irq(&display->irq.lock);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index c024b42369c8..43aee70597bf 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -9,6 +9,7 @@
#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
#include "intel_gmbus.h"
diff --git a/drivers/gpu/drm/i915/display/intel_hti_regs.h b/drivers/gpu/drm/i915/display/intel_hti_regs.h
index e206f2837fc8..39c046bd351c 100644
--- a/drivers/gpu/drm/i915/display/intel_hti_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_hti_regs.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_HTI_REGS_H__
#define __INTEL_HTI_REGS_H__
-#include "i915_reg_defs.h"
+#include "intel_display_reg_defs.h"
#define HDPORT_STATE _MMIO(0x45050)
#define HDPORT_DPLL_USED_MASK REG_GENMASK(15, 12)
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index a10cd3992607..3caef7f9c7c4 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -3,6 +3,11 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/int_log.h>
+#include <linux/math.h>
+
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
@@ -10,11 +15,33 @@
#include "intel_crtc.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
#include "intel_fdi.h"
#include "intel_link_bw.h"
+static int get_forced_link_bpp_x16(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc)
+{
+ struct intel_digital_connector_state *conn_state;
+ struct intel_connector *connector;
+ int force_bpp_x16 = INT_MAX;
+ int i;
+
+ for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+ if (conn_state->base.crtc != &crtc->base)
+ continue;
+
+ if (!connector->link.force_bpp_x16)
+ continue;
+
+ force_bpp_x16 = min(force_bpp_x16, connector->link.force_bpp_x16);
+ }
+
+ return force_bpp_x16 < INT_MAX ? force_bpp_x16 : 0;
+}
+
/**
* intel_link_bw_init_limits - initialize BW limits
* @state: Atomic state
@@ -31,9 +58,10 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
limits->force_fec_pipes = 0;
limits->bpp_limit_reached_pipes = 0;
for_each_pipe(display, pipe) {
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state,
- intel_crtc_for_pipe(display, pipe));
+ intel_atomic_get_new_crtc_state(state, crtc);
+ int forced_bpp_x16 = get_forced_link_bpp_x16(state, crtc);
if (state->base.duplicated && crtc_state) {
limits->max_bpp_x16[pipe] = crtc_state->max_link_bpp_x16;
@@ -42,15 +70,19 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
} else {
limits->max_bpp_x16[pipe] = INT_MAX;
}
+
+ if (forced_bpp_x16)
+ limits->max_bpp_x16[pipe] = min(limits->max_bpp_x16[pipe], forced_bpp_x16);
}
}
/**
- * intel_link_bw_reduce_bpp - reduce maximum link bpp for a selected pipe
+ * __intel_link_bw_reduce_bpp - reduce maximum link bpp for a selected pipe
* @state: atomic state
* @limits: link BW limits
* @pipe_mask: mask of pipes to select from
* @reason: explanation of why bpp reduction is needed
+ * @reduce_forced_bpp: allow reducing bpps below their forced link bpp
*
* Select the pipe from @pipe_mask with the biggest link bpp value and set the
* maximum of link bpp in @limits below this value. Modeset the selected pipe,
@@ -64,10 +96,11 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
* - %-ENOSPC if no pipe can further reduce its link bpp
* - Other negative error, if modesetting the selected pipe failed
*/
-int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
- struct intel_link_bw_limits *limits,
- u8 pipe_mask,
- const char *reason)
+static int __intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits,
+ u8 pipe_mask,
+ const char *reason,
+ bool reduce_forced_bpp)
{
struct intel_display *display = to_intel_display(state);
enum pipe max_bpp_pipe = INVALID_PIPE;
@@ -97,6 +130,10 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
*/
link_bpp_x16 = fxp_q4_from_int(crtc_state->pipe_bpp);
+ if (!reduce_forced_bpp &&
+ link_bpp_x16 <= get_forced_link_bpp_x16(state, crtc))
+ continue;
+
if (link_bpp_x16 > max_bpp_x16) {
max_bpp_x16 = link_bpp_x16;
max_bpp_pipe = crtc->pipe;
@@ -112,6 +149,21 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
BIT(max_bpp_pipe));
}
+int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits,
+ u8 pipe_mask,
+ const char *reason)
+{
+ int ret;
+
+ /* Try to keep any forced link BPP. */
+ ret = __intel_link_bw_reduce_bpp(state, limits, pipe_mask, reason, false);
+ if (ret == -ENOSPC)
+ ret = __intel_link_bw_reduce_bpp(state, limits, pipe_mask, reason, true);
+
+ return ret;
+}
+
/**
* intel_link_bw_set_bpp_limit_for_pipe - set link bpp limit for a pipe to its minimum
* @state: atomic state
@@ -245,3 +297,176 @@ int intel_link_bw_atomic_check(struct intel_atomic_state *state,
return -EAGAIN;
}
+
+static int force_link_bpp_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+
+ seq_printf(m, FXP_Q4_FMT "\n", FXP_Q4_ARGS(connector->link.force_bpp_x16));
+
+ return 0;
+}
+
+static int str_to_fxp_q4_nonneg_int(const char *str, int *val_x16)
+{
+ unsigned int val;
+ int err;
+
+ err = kstrtouint(str, 10, &val);
+ if (err)
+ return err;
+
+ if (val > INT_MAX >> 4)
+ return -ERANGE;
+
+ *val_x16 = fxp_q4_from_int(val);
+
+ return 0;
+}
+
+/* modifies str */
+static int str_to_fxp_q4_nonneg(char *str, int *val_x16)
+{
+ const char *int_str;
+ char *frac_str;
+ int frac_digits;
+ int frac_val;
+ int err;
+
+ int_str = strim(str);
+ frac_str = strchr(int_str, '.');
+
+ if (frac_str)
+ *frac_str++ = '\0';
+
+ err = str_to_fxp_q4_nonneg_int(int_str, val_x16);
+ if (err)
+ return err;
+
+ if (!frac_str)
+ return 0;
+
+ /* prevent negative number/leading +- sign mark */
+ if (!isdigit(*frac_str))
+ return -EINVAL;
+
+ err = str_to_fxp_q4_nonneg_int(frac_str, &frac_val);
+ if (err)
+ return err;
+
+ frac_digits = strlen(frac_str);
+ if (frac_digits > intlog10(INT_MAX) >> 24 ||
+ frac_val > INT_MAX - int_pow(10, frac_digits) / 2)
+ return -ERANGE;
+
+ frac_val = DIV_ROUND_CLOSEST(frac_val, (int)int_pow(10, frac_digits));
+
+ if (*val_x16 > INT_MAX - frac_val)
+ return -ERANGE;
+
+ *val_x16 += frac_val;
+
+ return 0;
+}
+
+static int user_str_to_fxp_q4_nonneg(const char __user *ubuf, size_t len, int *val_x16)
+{
+ char *kbuf;
+ int err;
+
+ kbuf = memdup_user_nul(ubuf, len);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
+
+ err = str_to_fxp_q4_nonneg(kbuf, val_x16);
+
+ kfree(kbuf);
+
+ return err;
+}
+
+static bool connector_supports_dsc(struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(connector);
+
+ switch (connector->base.connector_type) {
+ case DRM_MODE_CONNECTOR_eDP:
+ return intel_dp_has_dsc(connector);
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ if (connector->mst.dp)
+ return HAS_DSC_MST(display);
+
+ return HAS_DSC(display);
+ default:
+ return false;
+ }
+}
+
+static ssize_t
+force_link_bpp_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct intel_display *display = to_intel_display(connector);
+ int min_bpp;
+ int bpp_x16;
+ int err;
+
+ err = user_str_to_fxp_q4_nonneg(ubuf, len, &bpp_x16);
+ if (err)
+ return err;
+
+ /* TODO: Make the non-DSC min_bpp value connector specific. */
+ if (connector_supports_dsc(connector))
+ min_bpp = intel_dp_dsc_min_src_compressed_bpp();
+ else
+ min_bpp = intel_display_min_pipe_bpp();
+
+ if (bpp_x16 &&
+ (bpp_x16 < fxp_q4_from_int(min_bpp) ||
+ bpp_x16 > fxp_q4_from_int(intel_display_max_pipe_bpp(display))))
+ return -EINVAL;
+
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
+ if (err)
+ return err;
+
+ connector->link.force_bpp_x16 = bpp_x16;
+
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
+
+ *offp += len;
+
+ return len;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(force_link_bpp);
+
+void intel_link_bw_connector_debugfs_add(struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(connector);
+ struct dentry *root = connector->base.debugfs_entry;
+
+ switch (connector->base.connector_type) {
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ break;
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_DVID:
+ if (HAS_FDI(display))
+ break;
+
+ return;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (HAS_FDI(display) && !HAS_DDI(display))
+ break;
+
+ return;
+ default:
+ return;
+ }
+
+ debugfs_create_file("intel_force_link_bpp", 0644, root,
+ connector, &force_link_bpp_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h
index e69049cf178f..b499042e62b1 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.h
@@ -11,6 +11,7 @@
#include "intel_display_limits.h"
struct intel_atomic_state;
+struct intel_connector;
struct intel_crtc_state;
struct intel_link_bw_limits {
@@ -32,5 +33,6 @@ bool intel_link_bw_set_bpp_limit_for_pipe(struct intel_atomic_state *state,
enum pipe pipe);
int intel_link_bw_atomic_check(struct intel_atomic_state *state,
struct intel_link_bw_limits *new_limits);
+void intel_link_bw_connector_debugfs_add(struct intel_connector *connector);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index f94b7eeae20f..abc4b562083d 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -29,9 +29,9 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_hdmi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 8ce7c630da52..7e48a235c99f 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -40,7 +40,6 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
#include "intel_connector.h"
@@ -249,7 +248,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
if (HAS_PCH_SPLIT(display)) {
assert_fdi_rx_pll_disabled(display, pipe);
- assert_shared_dpll_disabled(display, crtc_state->shared_dpll);
+ assert_dpll_disabled(display, crtc_state->intel_dpll);
} else {
assert_pll_disabled(display, pipe);
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 0325b0c9506d..8415f3d703ed 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -23,6 +23,7 @@
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_fifo_underrun.h"
@@ -92,10 +93,10 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
crtc->active = false;
crtc->base.enabled = false;
- if (crtc_state->shared_dpll)
- intel_unreference_shared_dpll_crtc(crtc,
- crtc_state->shared_dpll,
- &crtc_state->shared_dpll->state);
+ if (crtc_state->intel_dpll)
+ intel_dpll_crtc_put(crtc,
+ crtc_state->intel_dpll,
+ &crtc_state->intel_dpll->state);
}
static void set_encoder_for_connector(struct intel_connector *connector,
@@ -565,7 +566,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
*/
return display->platform.sandybridge &&
crtc_state->hw.active &&
- crtc_state->shared_dpll &&
+ crtc_state->intel_dpll &&
crtc_state->port_clock == 0;
}
@@ -960,7 +961,7 @@ void intel_modeset_setup_hw_state(struct intel_display *display,
drm_crtc_vblank_reset(&crtc->base);
if (crtc_state->hw.active) {
- intel_dmc_enable_pipe(display, crtc->pipe);
+ intel_dmc_enable_pipe(crtc_state);
intel_crtc_vblank_on(crtc_state);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index 766a9983665a..f2f6b9d9afa1 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -243,7 +243,7 @@ void intel_modeset_verify_crtc(struct intel_atomic_state *state,
intel_wm_state_verify(state, crtc);
verify_connector_state(state, crtc);
verify_crtc_state(state, crtc);
- intel_shared_dpll_state_verify(state, crtc);
+ intel_dpll_state_verify(state, crtc);
intel_mpllb_state_verify(state, crtc);
intel_cx0pll_state_verify(state, crtc);
}
@@ -252,5 +252,5 @@ void intel_modeset_verify_disabled(struct intel_atomic_state *state)
{
verify_encoder_state(state);
verify_connector_state(state, NULL);
- intel_shared_dpll_verify_disabled(state);
+ intel_dpll_verify_disabled(state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 0eaa6cd6fe80..81efdb17fc0c 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -31,10 +31,13 @@
#include <acpi/video.h>
#include <drm/drm_edid.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_acpi.h"
#include "intel_backlight.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_opregion.h"
#include "intel_pci_config.h"
@@ -664,11 +667,10 @@ bool intel_opregion_asle_present(struct intel_display *display)
void intel_opregion_asle_intr(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_opregion *opregion = display->opregion;
if (opregion && opregion->asle)
- queue_work(i915->unordered_wq, &opregion->asle_work);
+ queue_work(display->wq.unordered, &opregion->asle_work);
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 12308495afa5..159a5f998ea0 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -31,6 +31,7 @@
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_object_frontbuffer.h"
#include "gem/i915_gem_pm.h"
+
#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
@@ -38,6 +39,7 @@
#include "i915_reg.h"
#include "intel_color_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
#include "intel_overlay.h"
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index f5c972880391..2a20aaaaac39 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -462,3 +462,135 @@ void intel_panel_fini(struct intel_connector *connector)
drm_mode_destroy(connector->base.dev, fixed_mode);
}
}
+
+/*
+ * If the panel was already enabled at probe, and we took over the state, the
+ * panel prepared state is out of sync, and the panel followers won't be
+ * notified. We need to call drm_panel_prepare() on enabled panels.
+ *
+ * It would be natural to handle this e.g. in the connector ->sync_state hook at
+ * intel_modeset_readout_hw_state(), but that's unfortunately too early. We
+ * don't have drm_connector::kdev at that time. For now, figure out the state at
+ * ->late_register, and sync there.
+ */
+static void intel_panel_sync_state(struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(connector);
+ struct drm_connector_state *conn_state;
+ struct intel_crtc *crtc;
+ int ret;
+
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
+ if (ret)
+ return;
+
+ conn_state = connector->base.state;
+
+ crtc = to_intel_crtc(conn_state->crtc);
+ if (crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active) {
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Panel prepare\n",
+ connector->base.base.id, connector->base.name);
+ intel_panel_prepare(crtc_state, conn_state);
+ }
+ }
+
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
+}
+
+static const struct drm_panel_funcs dummy_panel_funcs = {
+};
+
+int intel_panel_register(struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+
+ ret = intel_backlight_device_register(connector);
+ if (ret)
+ return ret;
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
+ struct device *dev = connector->base.kdev;
+ struct drm_panel *base;
+
+ /* Sanity check. */
+ if (drm_WARN_ON(display->drm, !dev))
+ goto out;
+
+ /*
+ * We need drm_connector::kdev for allocating the panel, to make
+ * drm_panel_add_follower() lookups work. The kdev is
+ * initialized in drm_sysfs_connector_add(), just before the
+ * connector .late_register() hooks. So we can't allocate the
+ * panel at connector init time, and can't allocate struct
+ * intel_panel with a drm_panel sub-struct. For now, use
+ * __devm_drm_panel_alloc() directly.
+ *
+ * The lookups also depend on drm_connector::fwnode being set in
+ * intel_acpi_assign_connector_fwnodes(). However, if that's
+ * missing, it will gracefully lead to -EPROBE_DEFER in
+ * drm_panel_add_follower().
+ */
+ base = __devm_drm_panel_alloc(dev, sizeof(*base), 0,
+ &dummy_panel_funcs,
+ connector->base.connector_type);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto err;
+ }
+
+ panel->base = base;
+
+ drm_panel_add(panel->base);
+
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Registered panel device '%s', has fwnode: %s\n",
+ connector->base.base.id, connector->base.name,
+ dev_name(dev), str_yes_no(dev_fwnode(dev)));
+
+ intel_panel_sync_state(connector);
+ }
+
+out:
+ return 0;
+
+err:
+ intel_backlight_device_unregister(connector);
+
+ return ret;
+}
+
+void intel_panel_unregister(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->base)
+ drm_panel_remove(panel->base);
+
+ intel_backlight_device_unregister(connector);
+}
+
+/* Notify followers, if any, about power being up. */
+void intel_panel_prepare(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_panel_prepare(panel->base);
+}
+
+/* Notify followers, if any, about power going down. */
+void intel_panel_unprepare(const struct drm_connector_state *old_conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_panel_unprepare(panel->base);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index b60d12322e5d..56a6412cf0fb 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -23,6 +23,8 @@ void intel_panel_init_alloc(struct intel_connector *connector);
int intel_panel_init(struct intel_connector *connector,
const struct drm_edid *fixed_edid);
void intel_panel_fini(struct intel_connector *connector);
+int intel_panel_register(struct intel_connector *connector);
+void intel_panel_unregister(struct intel_connector *connector);
enum drm_connector_status
intel_panel_detect(struct drm_connector *connector, bool force);
bool intel_panel_use_ssc(struct intel_display *display);
@@ -51,4 +53,8 @@ void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector);
void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
struct intel_encoder *encoder);
+void intel_panel_prepare(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_panel_unprepare(const struct drm_connector_state *old_conn_state);
+
#endif /* __INTEL_PANEL_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 1743ebf551cb..3456c794e0e7 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -10,6 +10,7 @@
#include "intel_crt.h"
#include "intel_crt_regs.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_fdi.h"
@@ -251,7 +252,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
u32 val, pipeconf_val;
/* Make sure PCH DPLL is enabled */
- assert_shared_dpll_enabled(display, crtc_state->shared_dpll);
+ assert_dpll_enabled(display, crtc_state->intel_dpll);
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(display, pipe);
@@ -381,8 +382,8 @@ void ilk_pch_enable(struct intel_atomic_state *state,
temp = intel_de_read(display, PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
- if (crtc_state->shared_dpll ==
- intel_get_shared_dpll_by_id(display, DPLL_ID_PCH_PLL_B))
+ if (crtc_state->intel_dpll ==
+ intel_get_dpll_by_id(display, DPLL_ID_PCH_PLL_B))
temp |= sel;
else
temp &= ~sel;
@@ -394,11 +395,11 @@ void ilk_pch_enable(struct intel_atomic_state *state,
* transcoder, and we actually should do this to not upset any PCH
* transcoder that already use the clock when we share it.
*
- * Note that enable_shared_dpll tries to do the right thing, but
- * get_shared_dpll unconditionally resets the pll - we need that
+ * Note that dpll_enable tries to do the right thing, but
+ * get_dpll unconditionally resets the pll - we need that
* to have the right LVDS enable sequence.
*/
- intel_enable_shared_dpll(crtc_state);
+ intel_dpll_enable(crtc_state);
/* set transcoder timing, panel must allow it */
assert_pps_unlocked(display, pipe);
@@ -472,7 +473,7 @@ void ilk_pch_post_disable(struct intel_atomic_state *state,
ilk_fdi_pll_disable(crtc);
- intel_disable_shared_dpll(old_crtc_state);
+ intel_dpll_disable(old_crtc_state);
}
static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
@@ -496,7 +497,7 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
enum pipe pipe = crtc->pipe;
enum intel_dpll_id pll_id;
bool pll_active;
@@ -528,8 +529,8 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
pll_id = DPLL_ID_PCH_PLL_A;
}
- crtc_state->shared_dpll = intel_get_shared_dpll_by_id(display, pll_id);
- pll = crtc_state->shared_dpll;
+ crtc_state->intel_dpll = intel_get_dpll_by_id(display, pll_id);
+ pll = crtc_state->intel_dpll;
pll_active = intel_dpll_get_hw_state(display, pll,
&crtc_state->dpll_hw_state);
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 693b90e3dfc3..d3c5255bf1a8 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -3,13 +3,17 @@
* Copyright © 2021 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_panel.h"
#include "intel_pch_refclk.h"
#include "intel_sbi.h"
+#include "intel_sbi_regs.h"
static void lpt_fdi_reset_mphy(struct intel_display *display)
{
@@ -29,95 +33,93 @@ static void lpt_fdi_reset_mphy(struct intel_display *display)
/* WaMPhyProgramming:hsw */
static void lpt_fdi_program_mphy(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
lpt_fdi_reset_mphy(display);
- tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
tmp |= (0x12 << 24);
- intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x8008, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2008, SBI_MPHY);
tmp |= (1 << 11);
- intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2008, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2108, SBI_MPHY);
tmp |= (1 << 11);
- intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2108, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x206C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
- intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x206C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x216C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
- intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x216C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2080, SBI_MPHY);
tmp &= ~(7 << 13);
tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2080, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2180, SBI_MPHY);
tmp &= ~(7 << 13);
tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2180, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x208C, SBI_MPHY);
tmp &= ~0xFF;
tmp |= 0x1C;
- intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x208C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x218C, SBI_MPHY);
tmp &= ~0xFF;
tmp |= 0x1C;
- intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x218C, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2098, SBI_MPHY);
tmp &= ~(0xFF << 16);
tmp |= (0x1C << 16);
- intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2098, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x2198, SBI_MPHY);
tmp &= ~(0xFF << 16);
tmp |= (0x1C << 16);
- intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x2198, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x20C4, SBI_MPHY);
tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x20C4, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x21C4, SBI_MPHY);
tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x21C4, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x20EC, SBI_MPHY);
tmp &= ~(0xF << 28);
tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x20EC, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp = intel_sbi_read(display, 0x21EC, SBI_MPHY);
tmp &= ~(0xF << 28);
tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+ intel_sbi_write(display, 0x21EC, tmp, SBI_MPHY);
}
void lpt_disable_iclkip(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 temp;
intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_GATE);
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCCTL6, SBI_ICLK);
temp |= SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL6, temp, SBI_ICLK);
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
}
struct iclkip_params {
@@ -178,8 +180,6 @@ int lpt_iclkip(const struct intel_crtc_state *crtc_state)
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int clock = crtc_state->hw.adjusted_mode.crtc_clock;
struct iclkip_params p;
u32 temp;
@@ -199,30 +199,30 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
"iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
/* Program SSCDIVINTPHASE6 */
- temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCDIVINTPHASE6, SBI_ICLK);
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
temp |= SBI_SSCDIVINTPHASE_DIVSEL(p.divsel);
temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
temp |= SBI_SSCDIVINTPHASE_INCVAL(p.phaseinc);
temp |= SBI_SSCDIVINTPHASE_DIR(p.phasedir);
temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
- intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
/* Program SSCAUXDIV */
- temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCAUXDIV6, SBI_ICLK);
temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
temp |= SBI_SSCAUXDIV_FINALDIV2SEL(p.auxdiv);
- intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCAUXDIV6, temp, SBI_ICLK);
/* Enable modulator and associated divider */
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCCTL6, SBI_ICLK);
temp &= ~SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL6, temp, SBI_ICLK);
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
/* Wait for initialization time */
udelay(24);
@@ -232,7 +232,6 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
int lpt_get_iclkip(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct iclkip_params p;
u32 temp;
@@ -241,25 +240,25 @@ int lpt_get_iclkip(struct intel_display *display)
iclkip_params_init(&p);
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCCTL6, SBI_ICLK);
if (temp & SBI_SSCCTL_DISABLE) {
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
return 0;
}
- temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCDIVINTPHASE6, SBI_ICLK);
p.divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
p.phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
- temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+ temp = intel_sbi_read(display, SBI_SSCAUXDIV6, SBI_ICLK);
p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc;
@@ -275,7 +274,6 @@ int lpt_get_iclkip(struct intel_display *display)
static void lpt_enable_clkout_dp(struct intel_display *display,
bool with_spread, bool with_fdi)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, tmp;
if (drm_WARN(display->drm, with_fdi && !with_spread,
@@ -285,57 +283,56 @@ static void lpt_enable_clkout_dp(struct intel_display *display,
with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp = intel_sbi_read(display, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_DISABLE;
tmp |= SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL, tmp, SBI_ICLK);
udelay(24);
if (with_spread) {
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp = intel_sbi_read(display, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL, tmp, SBI_ICLK);
if (with_fdi)
lpt_fdi_program_mphy(display);
}
reg = HAS_PCH_LPT_LP(display) ? SBI_GEN0 : SBI_DBUFF0;
- tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp = intel_sbi_read(display, reg, SBI_ICLK);
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
- intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+ intel_sbi_write(display, reg, tmp, SBI_ICLK);
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
}
/* Sequence to disable CLKOUT_DP */
void lpt_disable_clkout_dp(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, tmp;
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
reg = HAS_PCH_LPT_LP(display) ? SBI_GEN0 : SBI_DBUFF0;
- tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp = intel_sbi_read(display, reg, SBI_ICLK);
tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
- intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+ intel_sbi_write(display, reg, tmp, SBI_ICLK);
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp = intel_sbi_read(display, SBI_SSCCTL, SBI_ICLK);
if (!(tmp & SBI_SSCCTL_DISABLE)) {
if (!(tmp & SBI_SSCCTL_PATHALT)) {
tmp |= SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL, tmp, SBI_ICLK);
udelay(32);
}
tmp |= SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCCTL, tmp, SBI_ICLK);
}
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
}
#define BEND_IDX(steps) ((50 + (steps)) / 5)
@@ -372,7 +369,6 @@ static const u16 sscdivintphase[] = {
*/
static void lpt_bend_clkout_dp(struct intel_display *display, int steps)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
int idx = BEND_IDX(steps);
@@ -382,20 +378,20 @@ static void lpt_bend_clkout_dp(struct intel_display *display, int steps)
if (drm_WARN_ON(display->drm, idx >= ARRAY_SIZE(sscdivintphase)))
return;
- intel_sbi_lock(dev_priv);
+ intel_sbi_lock(display);
if (steps % 10 != 0)
tmp = 0xAAAAAAAB;
else
tmp = 0x00000000;
- intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
- tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
+ tmp = intel_sbi_read(display, SBI_SSCDIVINTPHASE, SBI_ICLK);
tmp &= 0xffff0000;
tmp |= sscdivintphase[idx];
- intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
+ intel_sbi_write(display, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
- intel_sbi_unlock(dev_priv);
+ intel_sbi_unlock(display);
}
#undef BEND_IDX
@@ -499,7 +495,7 @@ static void lpt_init_pch_refclk(struct intel_display *display)
static void ilk_init_pch_refclk(struct intel_display *display)
{
struct intel_encoder *encoder;
- struct intel_shared_dpll *pll;
+ struct intel_dpll *pll;
int i;
u32 val, final;
bool has_lvds = false;
@@ -535,7 +531,7 @@ static void ilk_init_pch_refclk(struct intel_display *display)
}
/* Check if any DPLLs are using the SSC source */
- for_each_shared_dpll(display, pll, i) {
+ for_each_dpll(display, pll, i) {
u32 temp;
temp = intel_de_read(display, PCH_DPLL(pll->info->id));
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c
index 3c3ecf288570..13541be4d6df 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.c
+++ b/drivers/gpu/drm/i915/display/intel_pfit.c
@@ -5,11 +5,11 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_core.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_lvds_regs.h"
#include "intel_pfit.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 6182f484b5bd..c2b4b2254190 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -30,10 +30,10 @@
#include "i915_drv.h"
#include "i915_irq.h"
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_pipe_crc.h"
#include "intel_pipe_crc_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_plane.c
index 1bcfa5f4fd63..36fb07471deb 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_plane.c
@@ -33,20 +33,22 @@
#include <linux/dma-fence-chain.h>
#include <linux/dma-resv.h>
+#include <linux/iosys-map.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
+#include <drm/drm_cache.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_panic.h>
#include "gem/i915_gem_object.h"
-#include "i915_config.h"
#include "i915_scheduler_types.h"
#include "i915_vma.h"
#include "i9xx_plane_regs.h"
-#include "intel_atomic_plane.h"
+#include "intel_bo.h"
#include "intel_cdclk.h"
#include "intel_cursor.h"
#include "intel_display_rps.h"
@@ -54,6 +56,9 @@
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
+#include "intel_fbdev.h"
+#include "intel_plane.h"
+#include "intel_psr.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
@@ -334,7 +339,7 @@ int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
* display blinking due to constant cdclk changes.
*/
if (new_crtc_state->min_cdclk[plane->id] <=
- cdclk_state->min_cdclk[crtc->pipe])
+ intel_cdclk_min_cdclk(cdclk_state, crtc->pipe))
return 0;
drm_dbg_kms(display->drm,
@@ -342,7 +347,7 @@ int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
plane->base.base.id, plane->base.name,
new_crtc_state->min_cdclk[plane->id],
crtc->base.base.id, crtc->base.name,
- cdclk_state->min_cdclk[crtc->pipe]);
+ intel_cdclk_min_cdclk(cdclk_state, crtc->pipe));
*need_cdclk_calc = true;
return 0;
@@ -735,8 +740,8 @@ intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id)
return NULL;
}
-int intel_plane_atomic_check(struct intel_atomic_state *state,
- struct intel_plane *plane)
+static int plane_atomic_check(struct intel_atomic_state *state,
+ struct intel_plane *plane)
{
struct intel_display *display = to_intel_display(state);
struct intel_plane_state *new_plane_state =
@@ -984,10 +989,10 @@ void intel_crtc_planes_update_arm(struct intel_dsb *dsb,
i9xx_crtc_planes_update_arm(dsb, state, crtc);
}
-int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
- struct intel_crtc_state *crtc_state,
- int min_scale, int max_scale,
- bool can_position)
+int intel_plane_check_clipping(struct intel_plane_state *plane_state,
+ struct intel_crtc_state *crtc_state,
+ int min_scale, int max_scale,
+ bool can_position)
{
struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
@@ -1086,7 +1091,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
/* Wa_16023981245 */
if ((DISPLAY_VERx100(display) == 2000 ||
- DISPLAY_VERx100(display) == 3000) &&
+ DISPLAY_VERx100(display) == 3000 ||
+ DISPLAY_VERx100(display) == 3002) &&
src_x % 2 != 0)
hsub = 2;
} else {
@@ -1267,14 +1273,176 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
intel_plane_unpin_fb(old_plane_state);
}
+/* Handle Y-tiling, only if DPT is enabled (otherwise disabling tiling is easier)
+ * All DPT hardware have 128-bytes width tiling, so Y-tile dimension is 32x32
+ * pixels for 32bits pixels.
+ */
+#define YTILE_WIDTH 32
+#define YTILE_HEIGHT 32
+#define YTILE_SIZE (YTILE_WIDTH * YTILE_HEIGHT * 4)
+
+static unsigned int intel_ytile_get_offset(unsigned int width, unsigned int x, unsigned int y)
+{
+ u32 offset;
+ unsigned int swizzle;
+ unsigned int width_in_blocks = DIV_ROUND_UP(width, 32);
+
+ /* Block offset */
+ offset = ((y / YTILE_HEIGHT) * width_in_blocks + (x / YTILE_WIDTH)) * YTILE_SIZE;
+
+ x = x % YTILE_WIDTH;
+ y = y % YTILE_HEIGHT;
+
+ /* bit order inside a block is x4 x3 x2 y4 y3 y2 y1 y0 x1 x0 */
+ swizzle = (x & 3) | ((y & 0x1f) << 2) | ((x & 0x1c) << 5);
+ offset += swizzle * 4;
+ return offset;
+}
+
+static unsigned int intel_4tile_get_offset(unsigned int width, unsigned int x, unsigned int y)
+{
+ u32 offset;
+ unsigned int swizzle;
+ unsigned int width_in_blocks = DIV_ROUND_UP(width, 32);
+
+ /* Block offset */
+ offset = ((y / YTILE_HEIGHT) * width_in_blocks + (x / YTILE_WIDTH)) * YTILE_SIZE;
+
+ x = x % YTILE_WIDTH;
+ y = y % YTILE_HEIGHT;
+
+ /* bit order inside a block is y4 y3 x4 y2 x3 x2 y1 y0 x1 x0 */
+ swizzle = (x & 3) | ((y & 3) << 2) | ((x & 0xc) << 2) | (y & 4) << 4 |
+ ((x & 0x10) << 3) | ((y & 0x18) << 5);
+ offset += swizzle * 4;
+ return offset;
+}
+
+static void intel_panic_flush(struct drm_plane *plane)
+{
+ struct intel_plane_state *plane_state = to_intel_plane_state(plane->state);
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(plane->state->crtc->state);
+ struct intel_plane *iplane = to_intel_plane(plane);
+ struct intel_display *display = to_intel_display(iplane);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ intel_bo_panic_finish(intel_fb);
+
+ if (crtc_state->enable_psr2_sel_fetch) {
+ /* Force a full update for psr2 */
+ intel_psr2_panic_force_full_update(display, crtc_state);
+ }
+
+ /* Flush the cache and don't disable tiling if it's the fbdev framebuffer.*/
+ if (intel_fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) {
+ struct iosys_map map;
+
+ intel_fbdev_get_map(display->fbdev.fbdev, &map);
+ drm_clflush_virt_range(map.vaddr, fb->pitches[0] * fb->height);
+ return;
+ }
+
+ if (fb->modifier && iplane->disable_tiling)
+ iplane->disable_tiling(iplane);
+}
+
+static unsigned int (*intel_get_tiling_func(u64 fb_modifier))(unsigned int width,
+ unsigned int x,
+ unsigned int y)
+{
+ switch (fb_modifier) {
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ return intel_ytile_get_offset;
+ case I915_FORMAT_MOD_4_TILED:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED_BMG_CCS:
+ case I915_FORMAT_MOD_4_TILED_LNL_CCS:
+ return intel_4tile_get_offset;
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ default:
+ /* Not supported yet */
+ return NULL;
+ }
+}
+
+static int intel_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct intel_plane_state *plane_state;
+ struct drm_gem_object *obj;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+ struct intel_display *display = to_intel_display(plane->dev);
+
+ if (!plane->state || !plane->state->fb || !plane->state->visible)
+ return -ENODEV;
+
+ plane_state = to_intel_plane_state(plane->state);
+ fb = plane_state->hw.fb;
+ intel_fb = to_intel_framebuffer(fb);
+
+ obj = intel_fb_bo(fb);
+ if (!obj)
+ return -ENODEV;
+
+ if (intel_fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) {
+ intel_fbdev_get_map(display->fbdev.fbdev, &sb->map[0]);
+ } else {
+ int ret;
+ /* Can't disable tiling if DPT is in use */
+ if (intel_fb_uses_dpt(fb)) {
+ if (fb->format->cpp[0] != 4)
+ return -EOPNOTSUPP;
+ intel_fb->panic_tiling = intel_get_tiling_func(fb->modifier);
+ if (!intel_fb->panic_tiling)
+ return -EOPNOTSUPP;
+ }
+ sb->private = intel_fb;
+ ret = intel_bo_panic_setup(sb);
+ if (ret)
+ return ret;
+ }
+ sb->width = fb->width;
+ sb->height = fb->height;
+ /* Use the generic linear format, because tiling, RC, CCS, CC
+ * will be disabled in disable_tiling()
+ */
+ sb->format = drm_format_info(fb->format->format);
+ sb->pitch[0] = fb->pitches[0];
+
+ return 0;
+}
+
static const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
};
+static const struct drm_plane_helper_funcs intel_primary_plane_helper_funcs = {
+ .prepare_fb = intel_prepare_plane_fb,
+ .cleanup_fb = intel_cleanup_plane_fb,
+ .get_scanout_buffer = intel_get_scanout_buffer,
+ .panic_flush = intel_panic_flush,
+};
+
void intel_plane_helper_add(struct intel_plane *plane)
{
- drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+ if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_helper_add(&plane->base, &intel_primary_plane_helper_funcs);
+ else
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
}
void intel_plane_init_cursor_vblank_work(struct intel_plane_state *old_plane_state,
@@ -1434,8 +1602,8 @@ static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
return 0;
}
-int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+int intel_plane_add_affected(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -1529,7 +1697,7 @@ static int intel_add_affected_planes(struct intel_atomic_state *state)
return 0;
}
-int intel_atomic_check_planes(struct intel_atomic_state *state)
+int intel_plane_atomic_check(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -1543,7 +1711,7 @@ int intel_atomic_check_planes(struct intel_atomic_state *state)
return ret;
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- ret = intel_plane_atomic_check(state, plane);
+ ret = plane_atomic_check(state, plane);
if (ret) {
drm_dbg_atomic(display->drm,
"[PLANE:%d:%s] atomic driver check failed\n",
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_plane.h
index 317320c32285..4ef012c08fa4 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_plane.h
@@ -3,8 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
-#ifndef __INTEL_ATOMIC_PLANE_H__
-#define __INTEL_ATOMIC_PLANE_H__
+#ifndef __INTEL_PLANE_H__
+#define __INTEL_PLANE_H__
#include <linux/types.h>
@@ -69,15 +69,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
-int intel_plane_atomic_check(struct intel_atomic_state *state,
- struct intel_plane *plane);
int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
struct intel_plane *plane,
bool *need_cdclk_calc);
-int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
- struct intel_crtc_state *crtc_state,
- int min_scale, int max_scale,
- bool can_position);
+int intel_plane_check_clipping(struct intel_plane_state *plane_state,
+ struct intel_crtc_state *crtc_state,
+ int min_scale, int max_scale,
+ bool can_position);
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
@@ -85,13 +83,13 @@ void intel_plane_helper_add(struct intel_plane *plane);
bool intel_plane_needs_physical(struct intel_plane *plane);
void intel_plane_init_cursor_vblank_work(struct intel_plane_state *old_plane_state,
struct intel_plane_state *new_plane_state);
-int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-int intel_atomic_check_planes(struct intel_atomic_state *state);
+int intel_plane_add_affected(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_plane_atomic_check(struct intel_atomic_state *state);
u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state);
bool intel_plane_format_mod_supported_async(struct drm_plane *plane,
u32 format,
u64 modifier);
-#endif /* __INTEL_ATOMIC_PLANE_H__ */
+#endif /* __INTEL_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index c00d9184c586..4246173ed311 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -6,12 +6,13 @@
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
-#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_display.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_plane_initial.h"
void intel_plane_initial_vblank_wait(struct intel_crtc *crtc)
@@ -288,7 +289,8 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
if (intel_framebuffer_init(to_intel_framebuffer(fb),
- intel_bo_to_drm_bo(vma->obj), &mode_cmd)) {
+ intel_bo_to_drm_bo(vma->obj),
+ fb->format, &mode_cmd)) {
drm_dbg_kms(display->drm, "intel fb init failed\n");
goto err_vma;
}
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index 1253376c7654..d806c15db7ce 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -7,12 +7,12 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_pmdemand.h"
#include "intel_step.h"
@@ -294,40 +294,17 @@ intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
{
- struct intel_display *display = to_intel_display(state);
- const struct intel_bw_state *new_bw_state, *old_bw_state;
- const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
- const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
struct intel_crtc *crtc;
int i;
- new_bw_state = intel_atomic_get_new_bw_state(state);
- old_bw_state = intel_atomic_get_old_bw_state(state);
- if (new_bw_state && new_bw_state->qgv_point_peakbw !=
- old_bw_state->qgv_point_peakbw)
+ if (intel_bw_pmdemand_needs_update(state))
return true;
- new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- if (new_dbuf_state &&
- new_dbuf_state->active_pipes != old_dbuf_state->active_pipes)
+ if (intel_dbuf_pmdemand_needs_update(state))
return true;
- if (DISPLAY_VER(display) < 30) {
- if (new_dbuf_state &&
- new_dbuf_state->enabled_slices !=
- old_dbuf_state->enabled_slices)
- return true;
- }
-
- new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
- old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
- if (new_cdclk_state &&
- (new_cdclk_state->actual.cdclk !=
- old_cdclk_state->actual.cdclk ||
- new_cdclk_state->actual.voltage_level !=
- old_cdclk_state->actual.voltage_level))
+ if (intel_cdclk_pmdemand_needs_update(state))
return true;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -362,7 +339,7 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
/* firmware will calculate the qclk_gv_index, requirement is set to 0 */
new_pmdemand_state->params.qclk_gv_index = 0;
- new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw;
+ new_pmdemand_state->params.qclk_gv_bw = intel_bw_qgv_point_peakbw(new_bw_state);
new_dbuf_state = intel_atomic_get_dbuf_state(state);
if (IS_ERR(new_dbuf_state))
@@ -370,12 +347,12 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
if (DISPLAY_VER(display) < 30) {
new_pmdemand_state->params.active_dbufs =
- min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
+ min_t(u8, intel_dbuf_num_enabled_slices(new_dbuf_state), 3);
new_pmdemand_state->params.active_pipes =
- min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
+ min_t(u8, intel_dbuf_num_active_pipes(new_dbuf_state), 3);
} else {
new_pmdemand_state->params.active_pipes =
- min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(display));
+ min_t(u8, intel_dbuf_num_active_pipes(new_dbuf_state), INTEL_NUM_PIPES(display));
}
new_cdclk_state = intel_atomic_get_cdclk_state(state);
@@ -383,9 +360,9 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
return PTR_ERR(new_cdclk_state);
new_pmdemand_state->params.voltage_index =
- new_cdclk_state->actual.voltage_level;
+ intel_cdclk_actual_voltage_level(new_cdclk_state);
new_pmdemand_state->params.cdclk_freq_mhz =
- DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000);
+ DIV_ROUND_UP(intel_cdclk_actual(new_cdclk_state), 1000);
intel_pmdemand_update_max_ddiclk(display, state, new_pmdemand_state);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 05e1e5c7e8b7..b64d0b30f5b1 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -5,11 +5,14 @@
#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
#include "g4x_dp.h"
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_power_well.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
@@ -891,7 +894,6 @@ static void edp_panel_vdd_work(struct work_struct *__work)
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
unsigned long delay;
/*
@@ -907,7 +909,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
* operations.
*/
delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
- queue_delayed_work(i915->unordered_wq,
+ queue_delayed_work(display->wq.unordered,
&intel_dp->pps.panel_vdd_work, delay);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pps_regs.h b/drivers/gpu/drm/i915/display/intel_pps_regs.h
index 8f9dbfab9523..2f014d929d32 100644
--- a/drivers/gpu/drm/i915/display/intel_pps_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_pps_regs.h
@@ -6,7 +6,6 @@
#ifndef __INTEL_PPS_REGS_H__
#define __INTEL_PPS_REGS_H__
-#include "intel_display_conversion.h"
#include "intel_display_reg_defs.h"
/* Panel power sequencing */
@@ -14,11 +13,11 @@
#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
#define PCH_PPS_BASE 0xC7200
-#define _MMIO_PPS(dev_priv, pps_idx, reg) \
- _MMIO(__to_intel_display(dev_priv)->pps.mmio_base - PPS_BASE + (reg) + (pps_idx) * 0x100)
+#define _MMIO_PPS(display, pps_idx, reg) \
+ _MMIO((display)->pps.mmio_base - PPS_BASE + (reg) + (pps_idx) * 0x100)
#define _PP_STATUS 0x61200
-#define PP_STATUS(dev_priv, pps_idx) _MMIO_PPS(dev_priv, pps_idx, _PP_STATUS)
+#define PP_STATUS(display, pps_idx) _MMIO_PPS((display), (pps_idx), _PP_STATUS)
#define PP_ON REG_BIT(31)
/*
* Indicates that all dependencies of the panel are on:
@@ -45,7 +44,7 @@
#define PP_SEQUENCE_STATE_RESET REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xf)
#define _PP_CONTROL 0x61204
-#define PP_CONTROL(dev_priv, pps_idx) _MMIO_PPS(dev_priv, pps_idx, _PP_CONTROL)
+#define PP_CONTROL(display, pps_idx) _MMIO_PPS((display), (pps_idx), _PP_CONTROL)
#define PANEL_UNLOCK_MASK REG_GENMASK(31, 16)
#define PANEL_UNLOCK_REGS REG_FIELD_PREP(PANEL_UNLOCK_MASK, 0xabcd)
#define BXT_POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
@@ -55,7 +54,7 @@
#define PANEL_POWER_ON REG_BIT(0)
#define _PP_ON_DELAYS 0x61208
-#define PP_ON_DELAYS(dev_priv, pps_idx) _MMIO_PPS(dev_priv, pps_idx, _PP_ON_DELAYS)
+#define PP_ON_DELAYS(display, pps_idx) _MMIO_PPS((display), (pps_idx), _PP_ON_DELAYS)
#define PANEL_PORT_SELECT_MASK REG_GENMASK(31, 30)
#define PANEL_PORT_SELECT_LVDS REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 0)
#define PANEL_PORT_SELECT_DPA REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 1)
@@ -66,12 +65,12 @@
#define PANEL_LIGHT_ON_DELAY_MASK REG_GENMASK(12, 0)
#define _PP_OFF_DELAYS 0x6120C
-#define PP_OFF_DELAYS(dev_priv, pps_idx) _MMIO_PPS(dev_priv, pps_idx, _PP_OFF_DELAYS)
+#define PP_OFF_DELAYS(display, pps_idx) _MMIO_PPS((display), (pps_idx), _PP_OFF_DELAYS)
#define PANEL_POWER_DOWN_DELAY_MASK REG_GENMASK(28, 16)
#define PANEL_LIGHT_OFF_DELAY_MASK REG_GENMASK(12, 0)
#define _PP_DIVISOR 0x61210
-#define PP_DIVISOR(dev_priv, pps_idx) _MMIO_PPS(dev_priv, pps_idx, _PP_DIVISOR)
+#define PP_DIVISOR(display, pps_idx) _MMIO_PPS((display), (pps_idx), _PP_DIVISOR)
#define PP_REFERENCE_DIVIDER_MASK REG_GENMASK(31, 8)
#define PANEL_POWER_CYCLE_DELAY_MASK REG_GENMASK(4, 0)
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 430ad4ef7146..ae9053919211 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -28,7 +28,6 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_vblank.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
@@ -37,6 +36,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
@@ -47,6 +47,7 @@
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_snps_phy.h"
+#include "intel_step.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
#include "skl_universal_plane.h"
@@ -447,7 +448,6 @@ static void psr_event_print(struct intel_display *display,
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
ktime_t time_ns = ktime_get();
@@ -492,7 +492,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
0, psr_irq_psr_error_bit_get(intel_dp));
- queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
+ queue_work(display->wq.unordered, &intel_dp->psr.work);
}
}
@@ -516,7 +516,7 @@ static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
if (intel_dp->psr.sink_panel_replay_su_support)
drm_dp_dpcd_readb(&intel_dp->aux,
- DP_PANEL_PANEL_REPLAY_CAPABILITY,
+ DP_PANEL_REPLAY_CAP_CAPABILITY,
&su_capability);
else
su_capability = intel_dp->psr_dpcd[1];
@@ -528,7 +528,7 @@ static unsigned int
intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
{
return intel_dp->psr.sink_panel_replay_su_support ?
- DP_PANEL_PANEL_REPLAY_X_GRANULARITY :
+ DP_PANEL_REPLAY_CAP_X_GRANULARITY :
DP_PSR2_SU_X_GRANULARITY;
}
@@ -536,7 +536,7 @@ static unsigned int
intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
{
return intel_dp->psr.sink_panel_replay_su_support ?
- DP_PANEL_PANEL_REPLAY_Y_GRANULARITY :
+ DP_PANEL_REPLAY_CAP_Y_GRANULARITY :
DP_PSR2_SU_Y_GRANULARITY;
}
@@ -608,7 +608,8 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
return;
}
- if (!(intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
+ if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
drm_dbg_kms(display->drm,
"Panel doesn't support early transport, eDP Panel Replay not possible\n");
return;
@@ -617,7 +618,8 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
intel_dp->psr.sink_panel_replay_support = true;
- if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
+ if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_SU_SUPPORT)
intel_dp->psr.sink_panel_replay_su_support = true;
drm_dbg_kms(display->drm,
@@ -676,10 +678,12 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
- drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP,
- &intel_dp->pr_dpcd);
- if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SUPPORT)
+ drm_dp_dpcd_read(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
+ &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
+
+ if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_SUPPORT)
_panel_replay_init_dpcd(intel_dp);
if (intel_dp->psr_dpcd[0])
@@ -736,7 +740,8 @@ static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay
return false;
return panel_replay ?
- intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
+ intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
psr2_su_region_et_global_enabled(intel_dp);
}
@@ -1574,6 +1579,12 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
if (!CAN_PSR(intel_dp))
return false;
+ /*
+ * Currently PSR doesn't work reliably with VRR enabled.
+ */
+ if (crtc_state->vrr.enable)
+ return false;
+
entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
if (entry_setup_frames >= 0) {
@@ -1691,12 +1702,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- /*
- * Currently PSR/PR doesn't work reliably with VRR enabled.
- */
- if (crtc_state->vrr.enable)
- return;
-
crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
crtc_state,
conn_state);
@@ -2883,6 +2888,26 @@ skip_sel_fetch_set_loop:
return 0;
}
+void intel_psr2_panic_force_full_update(struct intel_display *display,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val = man_trk_ctl_enable_bit_get(display);
+
+ /* SF partial frame enable has to be set even on full update */
+ val |= man_trk_ctl_partial_frame_bit_get(display);
+ val |= man_trk_ctl_continuos_full_frame(display);
+
+ /* Directly write the register */
+ intel_de_write_fw(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder), val);
+
+ if (!crtc_state->enable_psr2_su_region_et)
+ return;
+
+ intel_de_write_fw(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 0);
+}
+
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -3314,7 +3339,6 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
!intel_dp->psr.active)
@@ -3329,14 +3353,13 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
return;
tgl_psr2_enable_dc3co(intel_dp);
- mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
+ mod_delayed_work(display->wq.unordered, &intel_dp->psr.dc3co_work,
intel_dp->psr.dc3co_exit_delay);
}
static void _psr_flush_handle(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
if (intel_dp->psr.psr2_sel_fetch_enabled) {
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
@@ -3361,7 +3384,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active &&
!intel_dp->psr.busy_frontbuffer_bits)
- queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
+ queue_work(display->wq.unordered, &intel_dp->psr.work);
}
/**
@@ -3916,7 +3939,8 @@ static void intel_psr_sink_capability(struct intel_dp *intel_dp,
seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
seq_printf(m, ", Panel Replay Selective Update = %s",
str_yes_no(psr->sink_panel_replay_su_support));
- if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
+ if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
seq_printf(m, " (Early Transport)");
seq_printf(m, "\n");
}
@@ -4021,24 +4045,30 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
int frame;
/*
- * Reading all 3 registers before hand to minimize crossing a
- * frame boundary between register reads
+ * PSR2_SU_STATUS register has been tied-off since DG2/ADL-P
+ * (it returns zeros only) and it has been removed on Xe2_LPD.
*/
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
- val = intel_de_read(display,
- PSR2_SU_STATUS(display, cpu_transcoder, frame));
- su_frames_val[frame / 3] = val;
- }
+ if (DISPLAY_VER(display) < 13) {
+ /*
+ * Reading all 3 registers before hand to minimize crossing a
+ * frame boundary between register reads
+ */
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+ val = intel_de_read(display,
+ PSR2_SU_STATUS(display, cpu_transcoder, frame));
+ su_frames_val[frame / 3] = val;
+ }
- seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
+ seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
- u32 su_blocks;
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
+ u32 su_blocks;
- su_blocks = su_frames_val[frame / 3] &
- PSR2_SU_STATUS_MASK(frame);
- su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
- seq_printf(m, "%d\t%d\n", frame, su_blocks);
+ su_blocks = su_frames_val[frame / 3] &
+ PSR2_SU_STATUS_MASK(frame);
+ su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
+ seq_printf(m, "%d\t%d\n", frame, su_blocks);
+ }
}
seq_printf(m, "PSR2 selective fetch: %s\n",
@@ -4234,3 +4264,9 @@ bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_sta
return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
crtc_state->has_panel_replay);
}
+
+bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 73c3fa40844b..9b061a22361f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -57,6 +57,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
+void intel_psr2_panic_force_full_update(struct intel_display *display,
+ struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state);
@@ -77,5 +79,7 @@ int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state);
void intel_psr_connector_debugfs_add(struct intel_connector *connector);
void intel_psr_debugfs_register(struct intel_display *display);
bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state);
+bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index 248136456048..8afbf5a38335 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -266,6 +266,16 @@
#define _PIPE_SRCSZ_ERLY_TPT_B 0x71074
#define PIPE_SRCSZ_ERLY_TPT(pipe) _MMIO_PIPE((pipe), _PIPE_SRCSZ_ERLY_TPT_A, _PIPE_SRCSZ_ERLY_TPT_B)
+#define _PR_ALPM_CTL_A 0x60948
+#define PR_ALPM_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PR_ALPM_CTL_A)
+#define PR_ALPM_CTL_ALLOW_LINK_OFF_BETWEEN_AS_SDP_AND_SU BIT(6)
+#define PR_ALPM_CTL_RFB_UPDATE_CONTROL BIT(5)
+#define PR_ALPM_CTL_AS_SDP_TRANSMISSION_IN_ACTIVE_DISABLE BIT(4)
+#define PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_MASK REG_GENMASK(1, 0)
+#define PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T1_OR_T2 REG_FIELD_PREP(PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_MASK, 0)
+#define PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T1 REG_FIELD_PREP(PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_MASK, 1)
+#define PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T2 REG_FIELD_PREP(PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_MASK, 2)
+
#define _ALPM_CTL_A 0x60950
#define ALPM_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _ALPM_CTL_A)
#define ALPM_CTL_ALPM_ENABLE REG_BIT(31)
diff --git a/drivers/gpu/drm/i915/display/intel_sbi.c b/drivers/gpu/drm/i915/display/intel_sbi.c
new file mode 100644
index 000000000000..dfcff924f0ed
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sbi.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ *
+ * LPT/WPT IOSF sideband.
+ */
+
+#include <drm/drm_print.h>
+
+#include "intel_de.h"
+#include "intel_display_core.h"
+#include "intel_sbi.h"
+#include "intel_sbi_regs.h"
+
+/* SBI access */
+static int intel_sbi_rw(struct intel_display *display, u16 reg,
+ enum intel_sbi_destination destination,
+ u32 *val, bool is_read)
+{
+ u32 cmd;
+
+ lockdep_assert_held(&display->sbi.lock);
+
+ if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_STATUS_MASK, SBI_STATUS_READY, 100, NULL)) {
+ drm_err(display->drm, "timeout waiting for SBI to become ready\n");
+ return -EBUSY;
+ }
+
+ intel_de_write_fw(display, SBI_ADDR, SBI_ADDR_VALUE(reg));
+ intel_de_write_fw(display, SBI_DATA, is_read ? 0 : *val);
+
+ if (destination == SBI_ICLK)
+ cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+ else
+ cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+ if (!is_read)
+ cmd |= SBI_CTL_OP_WR;
+ intel_de_write_fw(display, SBI_CTL_STAT, cmd | SBI_STATUS_BUSY);
+
+ if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_STATUS_MASK, SBI_STATUS_READY, 100, &cmd)) {
+ drm_err(display->drm, "timeout waiting for SBI to complete read\n");
+ return -ETIMEDOUT;
+ }
+
+ if (cmd & SBI_RESPONSE_FAIL) {
+ drm_err(display->drm, "error during SBI read of reg %x\n", reg);
+ return -ENXIO;
+ }
+
+ if (is_read)
+ *val = intel_de_read_fw(display, SBI_DATA);
+
+ return 0;
+}
+
+void intel_sbi_lock(struct intel_display *display)
+{
+ mutex_lock(&display->sbi.lock);
+}
+
+void intel_sbi_unlock(struct intel_display *display)
+{
+ mutex_unlock(&display->sbi.lock);
+}
+
+u32 intel_sbi_read(struct intel_display *display, u16 reg,
+ enum intel_sbi_destination destination)
+{
+ u32 result = 0;
+
+ intel_sbi_rw(display, reg, destination, &result, true);
+
+ return result;
+}
+
+void intel_sbi_write(struct intel_display *display, u16 reg, u32 value,
+ enum intel_sbi_destination destination)
+{
+ intel_sbi_rw(display, reg, destination, &value, false);
+}
+
+void intel_sbi_init(struct intel_display *display)
+{
+ mutex_init(&display->sbi.lock);
+}
+
+void intel_sbi_fini(struct intel_display *display)
+{
+ mutex_destroy(&display->sbi.lock);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_sbi.h b/drivers/gpu/drm/i915/display/intel_sbi.h
new file mode 100644
index 000000000000..841f77a142a1
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sbi.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#ifndef _INTEL_SBI_H_
+#define _INTEL_SBI_H_
+
+#include <linux/types.h>
+
+struct intel_display;
+
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
+
+void intel_sbi_init(struct intel_display *display);
+void intel_sbi_fini(struct intel_display *display);
+void intel_sbi_lock(struct intel_display *display);
+void intel_sbi_unlock(struct intel_display *display);
+u32 intel_sbi_read(struct intel_display *display, u16 reg,
+ enum intel_sbi_destination destination);
+void intel_sbi_write(struct intel_display *display, u16 reg, u32 value,
+ enum intel_sbi_destination destination);
+
+#endif /* _INTEL_SBI_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_sbi_regs.h b/drivers/gpu/drm/i915/display/intel_sbi_regs.h
new file mode 100644
index 000000000000..ec76652de02d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sbi_regs.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_SBI_REGS_H__
+#define __INTEL_SBI_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/*
+ * Sideband Interface (SBI) is programmed indirectly, via SBI_ADDR, which
+ * contains the register offset; and SBI_DATA, which contains the payload.
+ */
+#define SBI_ADDR _MMIO(0xC6000)
+#define SBI_ADDR_MASK REG_GENMASK(31, 16)
+#define SBI_ADDR_VALUE(addr) REG_FIELD_PREP(SBI_ADDR_MASK, (addr))
+
+#define SBI_DATA _MMIO(0xC6004)
+
+#define SBI_CTL_STAT _MMIO(0xC6008)
+#define SBI_CTL_DEST_MASK REG_GENMASK(16, 16)
+#define SBI_CTL_DEST_ICLK REG_FIELD_PREP(SBI_CTL_DEST_MASK, 0)
+#define SBI_CTL_DEST_MPHY REG_FIELD_PREP(SBI_CTL_DEST_MASK, 1)
+#define SBI_CTL_OP_MASK REG_GENMASK(15, 8)
+#define SBI_CTL_OP_IORD REG_FIELD_PREP(SBI_CTL_OP_MASK, 2)
+#define SBI_CTL_OP_IOWR REG_FIELD_PREP(SBI_CTL_OP_MASK, 3)
+#define SBI_CTL_OP_CRRD REG_FIELD_PREP(SBI_CTL_OP_MASK, 6)
+#define SBI_CTL_OP_CRWR REG_FIELD_PREP(SBI_CTL_OP_MASK, 7)
+#define SBI_CTL_OP_WR REG_BIT(8)
+#define SBI_RESPONSE_MASK REG_GENMASK(2, 1)
+#define SBI_RESPONSE_FAIL REG_FIELD_PREP(SBI_RESPONSE_MASK, 1)
+#define SBI_RESPONSE_SUCCESS REG_FIELD_PREP(SBI_RESPONSE_MASK, 0)
+#define SBI_STATUS_MASK REG_GENMASK(0, 0)
+#define SBI_STATUS_BUSY REG_FIELD_PREP(SBI_STATUS_MASK, 1)
+#define SBI_STATUS_READY REG_FIELD_PREP(SBI_STATUS_MASK, 0)
+
+/* SBI offsets */
+#define SBI_SSCDIVINTPHASE 0x0200
+
+#define SBI_SSCDIVINTPHASE6 0x0600
+#define SBI_SSCDIVINTPHASE_DIVSEL_SHIFT 1
+#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f << 1)
+#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x) << 1)
+#define SBI_SSCDIVINTPHASE_INCVAL_SHIFT 8
+#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f << 8)
+#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x) << 8)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x) << 15)
+#define SBI_SSCDIVINTPHASE_PROPAGATE (1 << 0)
+
+#define SBI_SSCDITHPHASE 0x0204
+#define SBI_SSCCTL 0x020c
+#define SBI_SSCCTL6 0x060C
+#define SBI_SSCCTL_PATHALT (1 << 3)
+#define SBI_SSCCTL_DISABLE (1 << 0)
+
+#define SBI_SSCAUXDIV6 0x0610
+#define SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT 4
+#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1 << 4)
+#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x) << 4)
+
+#define SBI_DBUFF0 0x2a00
+
+#define SBI_GEN0 0x1f00
+#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1 << 0)
+
+#endif /* __INTEL_SBI_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 8a38df2c0283..87aff2754f69 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -39,13 +39,13 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
index 5111bdc3075b..7fe6b4a18213 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
@@ -106,7 +106,7 @@ static void get_ana_cp_int_prop(u64 vco_clk,
DIV64_U64_ROUND_CLOSEST(DIV_ROUND_DOWN_ULL(adjusted_vco_clk1, curve_2_scaled1),
CURVE2_MULTIPLIER);
- *ana_cp_int = max(1, min(ana_cp_int_temp, 127));
+ *ana_cp_int = clamp(ana_cp_int_temp, 1, 127);
curve_2_scaled_int = curve_2_scaled1 * (*ana_cp_int);
@@ -125,7 +125,7 @@ static void get_ana_cp_int_prop(u64 vco_clk,
curve_1_interpolated);
*ana_cp_prop = DIV64_U64_ROUND_UP(adjusted_vco_clk2, curve_2_scaled2);
- *ana_cp_prop = max(1, min(*ana_cp_prop, 127));
+ *ana_cp_prop = clamp(*ana_cp_prop, 1, 127);
}
static void compute_hdmi_tmds_pll(u64 pixel_clock, u32 refclk,
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 2b53ac9f4935..b2dd69a11124 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -7,11 +7,11 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_snps_hdmi_pll.h"
#include "intel_snps_phy.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index fd92e6b89b43..e6844df837af 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -41,11 +41,11 @@
#include "i915_utils.h"
#include "i9xx_plane.h"
-#include "intel_atomic_plane.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_sprite.h"
#include "intel_sprite_regs.h"
@@ -1366,8 +1366,8 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
}
}
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- min_scale, max_scale, true);
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ min_scale, max_scale, true);
if (ret)
return ret;
@@ -1421,10 +1421,10 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true);
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index c1014e74791f..3bc57579fe53 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -14,6 +14,7 @@
#include "intel_display.h"
#include "intel_display_driver.h"
#include "intel_display_power_map.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index acf0b3733908..e3ab49815a3c 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -36,12 +36,12 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 139fa5deba80..70ba7aa26bf4 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -6,10 +6,10 @@
#include <drm/drm_vblank.h>
#include "i915_drv.h"
-#include "i915_reg.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index e9b809568cd4..92c04811aa28 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -446,7 +446,7 @@ enum vbt_gmbus_ddi {
* basically any of the fields to ensure the correct interpretation for the BDB
* version in question.
*
- * When we copy the child device configs to dev_priv->display.vbt.child_dev, we
+ * When we copy the child device configs to display->vbt.child_dev, we
* reserve space for the full structure below, and initialize the tail not
* actually present in VBT to zeros. Accessing those fields is fine, as long as
* the default zero is taken into account, again according to the BDB version.
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index 05d140c8032d..6e125564db34 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -13,7 +13,6 @@
#include "soc/intel_gmch.h"
-#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_vga.h"
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index c6565baf815a..3eed37f271b0 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -6,8 +6,8 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_vrr.h"
@@ -576,6 +576,25 @@ bool intel_vrr_always_use_vrr_tg(struct intel_display *display)
return false;
}
+static
+void intel_vrr_set_db_point_and_transmission_line(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ /*
+ * For BMG and LNL+ onwards the EMP_AS_SDP_TL is used for programming
+ * double buffering point and transmission line for VRR packets for
+ * HDMI2.1/DP/eDP/DP->HDMI2.1 PCON.
+ * Since currently we support VRR only for DP/eDP, so this is programmed
+ * to for Adaptive Sync SDP to Vsync start.
+ */
+ if (DISPLAY_VERx100(display) == 1401 || DISPLAY_VER(display) >= 20)
+ intel_de_write(display,
+ EMP_AS_SDP_TL(display, cpu_transcoder),
+ EMP_AS_SDP_DB_TL(crtc_state->vrr.vsync_start));
+}
+
void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -595,6 +614,8 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
TRANS_PUSH_EN);
if (!intel_vrr_always_use_vrr_tg(display)) {
+ intel_vrr_set_db_point_and_transmission_line(crtc_state);
+
if (crtc_state->cmrr.enable) {
intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
@@ -646,6 +667,8 @@ void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
TRANS_PUSH_EN);
+ intel_vrr_set_db_point_and_transmission_line(crtc_state);
+
intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr_regs.h b/drivers/gpu/drm/i915/display/intel_vrr_regs.h
index 6ed0e0dc97e7..ba9b9215dc11 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr_regs.h
@@ -8,120 +8,119 @@
#include "intel_display_reg_defs.h"
-/* VRR registers */
#define _TRANS_VRR_CTL_A 0x60420
#define _TRANS_VRR_CTL_B 0x61420
#define _TRANS_VRR_CTL_C 0x62420
#define _TRANS_VRR_CTL_D 0x63420
-#define TRANS_VRR_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_CTL_A)
-#define VRR_CTL_VRR_ENABLE REG_BIT(31)
-#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
-#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
-#define VRR_CTL_PIPELINE_FULL_MASK REG_GENMASK(10, 3)
-#define VRR_CTL_PIPELINE_FULL(x) REG_FIELD_PREP(VRR_CTL_PIPELINE_FULL_MASK, (x))
-#define VRR_CTL_PIPELINE_FULL_OVERRIDE REG_BIT(0)
-#define XELPD_VRR_CTL_VRR_GUARDBAND_MASK REG_GENMASK(15, 0)
-#define XELPD_VRR_CTL_VRR_GUARDBAND(x) REG_FIELD_PREP(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, (x))
+#define TRANS_VRR_CTL(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_CTL_A)
+#define VRR_CTL_VRR_ENABLE REG_BIT(31)
+#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
+#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
+#define VRR_CTL_CMRR_ENABLE REG_BIT(27)
+#define VRR_CTL_PIPELINE_FULL_MASK REG_GENMASK(10, 3)
+#define VRR_CTL_PIPELINE_FULL(x) REG_FIELD_PREP(VRR_CTL_PIPELINE_FULL_MASK, (x))
+#define VRR_CTL_PIPELINE_FULL_OVERRIDE REG_BIT(0)
+#define XELPD_VRR_CTL_VRR_GUARDBAND_MASK REG_GENMASK(15, 0)
+#define XELPD_VRR_CTL_VRR_GUARDBAND(x) REG_FIELD_PREP(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, (x))
#define _TRANS_VRR_VMAX_A 0x60424
#define _TRANS_VRR_VMAX_B 0x61424
#define _TRANS_VRR_VMAX_C 0x62424
#define _TRANS_VRR_VMAX_D 0x63424
-#define TRANS_VRR_VMAX(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMAX_A)
-#define VRR_VMAX_MASK REG_GENMASK(19, 0)
+#define TRANS_VRR_VMAX(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VMAX_A)
+#define VRR_VMAX_MASK REG_GENMASK(19, 0)
#define _TRANS_VRR_VMIN_A 0x60434
#define _TRANS_VRR_VMIN_B 0x61434
#define _TRANS_VRR_VMIN_C 0x62434
#define _TRANS_VRR_VMIN_D 0x63434
-#define TRANS_VRR_VMIN(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMIN_A)
-#define VRR_VMIN_MASK REG_GENMASK(15, 0)
+#define TRANS_VRR_VMIN(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VMIN_A)
+#define VRR_VMIN_MASK REG_GENMASK(15, 0)
#define _TRANS_VRR_VMAXSHIFT_A 0x60428
#define _TRANS_VRR_VMAXSHIFT_B 0x61428
#define _TRANS_VRR_VMAXSHIFT_C 0x62428
#define _TRANS_VRR_VMAXSHIFT_D 0x63428
-#define TRANS_VRR_VMAXSHIFT(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, \
- _TRANS_VRR_VMAXSHIFT_A)
-#define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16)
-#define VRR_VMAXSHIFT_DEC REG_BIT(16)
-#define VRR_VMAXSHIFT_INC_MASK REG_GENMASK(12, 0)
+#define TRANS_VRR_VMAXSHIFT(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VMAXSHIFT_A)
+#define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16)
+#define VRR_VMAXSHIFT_DEC REG_BIT(16)
+#define VRR_VMAXSHIFT_INC_MASK REG_GENMASK(12, 0)
#define _TRANS_VRR_STATUS_A 0x6042c
#define _TRANS_VRR_STATUS_B 0x6142c
#define _TRANS_VRR_STATUS_C 0x6242c
#define _TRANS_VRR_STATUS_D 0x6342c
-#define TRANS_VRR_STATUS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS_A)
-#define VRR_STATUS_VMAX_REACHED REG_BIT(31)
-#define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30)
-#define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29)
-#define VRR_STATUS_NO_FLIP_FRAME REG_BIT(28)
-#define VRR_STATUS_VRR_EN_LIVE REG_BIT(27)
-#define VRR_STATUS_FLIPS_SERVICED REG_BIT(26)
-#define VRR_STATUS_VBLANK_MASK REG_GENMASK(22, 20)
-#define STATUS_FSM_IDLE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 0)
-#define STATUS_FSM_WAIT_TILL_FDB REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 1)
-#define STATUS_FSM_WAIT_TILL_FS REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 2)
-#define STATUS_FSM_WAIT_TILL_FLIP REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 3)
-#define STATUS_FSM_PIPELINE_FILL REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 4)
-#define STATUS_FSM_ACTIVE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 5)
-#define STATUS_FSM_LEGACY_VBLANK REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 6)
+#define TRANS_VRR_STATUS(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_STATUS_A)
+#define VRR_STATUS_VMAX_REACHED REG_BIT(31)
+#define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30)
+#define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29)
+#define VRR_STATUS_NO_FLIP_FRAME REG_BIT(28)
+#define VRR_STATUS_VRR_EN_LIVE REG_BIT(27)
+#define VRR_STATUS_FLIPS_SERVICED REG_BIT(26)
+#define VRR_STATUS_VBLANK_MASK REG_GENMASK(22, 20)
+#define STATUS_FSM_IDLE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 0)
+#define STATUS_FSM_WAIT_TILL_FDB REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 1)
+#define STATUS_FSM_WAIT_TILL_FS REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 2)
+#define STATUS_FSM_WAIT_TILL_FLIP REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 3)
+#define STATUS_FSM_PIPELINE_FILL REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 4)
+#define STATUS_FSM_ACTIVE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 5)
+#define STATUS_FSM_LEGACY_VBLANK REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 6)
#define _TRANS_VRR_VTOTAL_PREV_A 0x60480
#define _TRANS_VRR_VTOTAL_PREV_B 0x61480
#define _TRANS_VRR_VTOTAL_PREV_C 0x62480
#define _TRANS_VRR_VTOTAL_PREV_D 0x63480
-#define TRANS_VRR_VTOTAL_PREV(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, \
- _TRANS_VRR_VTOTAL_PREV_A)
-#define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31)
-#define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30)
-#define VRR_VTOTAL_FLIP_AFTER_DBLBUF REG_BIT(29)
-#define VRR_VTOTAL_PREV_FRAME_MASK REG_GENMASK(19, 0)
+#define TRANS_VRR_VTOTAL_PREV(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VTOTAL_PREV_A)
+#define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31)
+#define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30)
+#define VRR_VTOTAL_FLIP_AFTER_DBLBUF REG_BIT(29)
+#define VRR_VTOTAL_PREV_FRAME_MASK REG_GENMASK(19, 0)
#define _TRANS_VRR_FLIPLINE_A 0x60438
#define _TRANS_VRR_FLIPLINE_B 0x61438
#define _TRANS_VRR_FLIPLINE_C 0x62438
#define _TRANS_VRR_FLIPLINE_D 0x63438
-#define TRANS_VRR_FLIPLINE(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, \
- _TRANS_VRR_FLIPLINE_A)
-#define VRR_FLIPLINE_MASK REG_GENMASK(19, 0)
+#define TRANS_VRR_FLIPLINE(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_FLIPLINE_A)
+#define VRR_FLIPLINE_MASK REG_GENMASK(19, 0)
#define _TRANS_VRR_STATUS2_A 0x6043c
#define _TRANS_VRR_STATUS2_B 0x6143c
#define _TRANS_VRR_STATUS2_C 0x6243c
#define _TRANS_VRR_STATUS2_D 0x6343c
-#define TRANS_VRR_STATUS2(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS2_A)
-#define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0)
+#define TRANS_VRR_STATUS2(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_STATUS2_A)
+#define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0)
#define _TRANS_PUSH_A 0x60a70
#define _TRANS_PUSH_B 0x61a70
#define _TRANS_PUSH_C 0x62a70
#define _TRANS_PUSH_D 0x63a70
-#define TRANS_PUSH(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_PUSH_A)
-#define TRANS_PUSH_EN REG_BIT(31)
-#define TRANS_PUSH_SEND REG_BIT(30)
+#define TRANS_PUSH(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_PUSH_A)
+#define TRANS_PUSH_EN REG_BIT(31)
+#define TRANS_PUSH_SEND REG_BIT(30)
#define _TRANS_VRR_VSYNC_A 0x60078
-#define TRANS_VRR_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VSYNC_A)
-#define VRR_VSYNC_END_MASK REG_GENMASK(28, 16)
-#define VRR_VSYNC_END(vsync_end) REG_FIELD_PREP(VRR_VSYNC_END_MASK, (vsync_end))
-#define VRR_VSYNC_START_MASK REG_GENMASK(12, 0)
-#define VRR_VSYNC_START(vsync_start) REG_FIELD_PREP(VRR_VSYNC_START_MASK, (vsync_start))
-
-/*CMRR Registers*/
+#define TRANS_VRR_VSYNC(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VSYNC_A)
+#define VRR_VSYNC_END_MASK REG_GENMASK(28, 16)
+#define VRR_VSYNC_END(vsync_end) REG_FIELD_PREP(VRR_VSYNC_END_MASK, (vsync_end))
+#define VRR_VSYNC_START_MASK REG_GENMASK(12, 0)
+#define VRR_VSYNC_START(vsync_start) REG_FIELD_PREP(VRR_VSYNC_START_MASK, (vsync_start))
+
+/* Common register for HDMI EMP and DP AS SDP */
+#define _EMP_AS_SDP_TL_A 0x60204
+#define EMP_AS_SDP_TL(display, trans) _MMIO_TRANS2((display), (trans), _EMP_AS_SDP_TL_A)
+#define EMP_AS_SDP_DB_TL_MASK REG_GENMASK(12, 0)
+#define EMP_AS_SDP_DB_TL(db_transmit_line) REG_FIELD_PREP(EMP_AS_SDP_DB_TL_MASK, (db_transmit_line))
#define _TRANS_CMRR_M_LO_A 0x604F0
-#define TRANS_CMRR_M_LO(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_M_LO_A)
+#define TRANS_CMRR_M_LO(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_M_LO_A)
#define _TRANS_CMRR_M_HI_A 0x604F4
-#define TRANS_CMRR_M_HI(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_M_HI_A)
+#define TRANS_CMRR_M_HI(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_M_HI_A)
#define _TRANS_CMRR_N_LO_A 0x604F8
-#define TRANS_CMRR_N_LO(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_N_LO_A)
+#define TRANS_CMRR_N_LO(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_N_LO_A)
#define _TRANS_CMRR_N_HI_A 0x604FC
-#define TRANS_CMRR_N_HI(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_N_HI_A)
-
-#define VRR_CTL_CMRR_ENABLE REG_BIT(27)
+#define TRANS_CMRR_N_HI(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_N_HI_A)
#endif /* __INTEL_VRR_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index c855426544cf..d77798499c57 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -5,9 +5,9 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fb.h"
@@ -695,15 +695,14 @@ static void glk_program_nearest_filter_coefs(struct intel_display *display,
GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
}
-static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
+static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter)
{
- if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
+ if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR)
return (PS_FILTER_PROGRAMMED |
- PS_Y_VERT_FILTER_SELECT(set) |
- PS_Y_HORZ_FILTER_SELECT(set) |
- PS_UV_VERT_FILTER_SELECT(set) |
- PS_UV_HORZ_FILTER_SELECT(set));
- }
+ PS_Y_VERT_FILTER_SELECT(0) |
+ PS_Y_HORZ_FILTER_SELECT(0) |
+ PS_UV_VERT_FILTER_SELECT(0) |
+ PS_UV_HORZ_FILTER_SELECT(0));
return PS_FILTER_MEDIUM;
}
@@ -761,7 +760,7 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
id = scaler_state->scaler_id;
ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode |
- skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
+ skl_scaler_get_filter_select(crtc_state->hw.scaling_filter);
trace_intel_pipe_scaler_update_arm(crtc, id, x, y, width, height);
@@ -827,7 +826,7 @@ skl_program_plane_scaler(struct intel_dsb *dsb,
}
ps_ctrl = PS_SCALER_EN | PS_BINDING_PLANE(plane->id) | scaler->mode |
- skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
+ skl_scaler_get_filter_select(plane_state->hw.scaling_filter);
trace_intel_plane_scaler_update_arm(plane, scaler_id,
crtc_x, crtc_y, crtc_w, crtc_h);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index c7b336359a5e..e20972ddfa09 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -8,24 +8,24 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include "pxp/intel_pxp.h"
#include "i915_drv.h"
-#include "i915_reg.h"
-#include "intel_atomic_plane.h"
#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_universal_plane_regs.h"
#include "skl_watermark.h"
-#include "pxp/intel_pxp.h"
static const u32 skl_plane_formats[] = {
DRM_FORMAT_C8,
@@ -2327,8 +2327,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
max_scale = skl_plane_max_scale(display, fb);
}
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- min_scale, max_scale, true);
+ ret = intel_plane_check_clipping(plane_state, crtc_state,
+ min_scale, max_scale, true);
if (ret)
return ret;
@@ -2791,6 +2791,32 @@ static u8 tgl_plane_caps(struct intel_display *display,
return caps;
}
+static void skl_disable_tiling(struct intel_plane *plane)
+{
+ struct intel_plane_state *state = to_intel_plane_state(plane->base.state);
+ struct intel_display *display = to_intel_display(plane);
+ const struct drm_framebuffer *fb = state->hw.fb;
+ u32 plane_ctl;
+
+ plane_ctl = intel_de_read(display, PLANE_CTL(plane->pipe, plane->id));
+
+ if (intel_fb_uses_dpt(fb)) {
+ /* if DPT is enabled, keep tiling, but disable compression */
+ plane_ctl &= ~PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+ } else {
+ /* if DPT is not supported, disable tiling, and update stride */
+ u32 stride = state->view.color_plane[0].scanout_stride / 64;
+
+ plane_ctl &= ~PLANE_CTL_TILED_MASK;
+ intel_de_write_fw(display, PLANE_STRIDE(plane->pipe, plane->id),
+ PLANE_STRIDE_(stride));
+ }
+ intel_de_write_fw(display, PLANE_CTL(plane->pipe, plane->id), plane_ctl);
+
+ intel_de_write_fw(display, PLANE_SURF(plane->pipe, plane->id),
+ skl_plane_surf(state, 0));
+}
+
struct intel_plane *
skl_universal_plane_create(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id)
@@ -2837,6 +2863,7 @@ skl_universal_plane_create(struct intel_display *display,
plane->max_height = skl_plane_max_height;
plane->min_cdclk = skl_plane_min_cdclk;
}
+ plane->disable_tiling = skl_disable_tiling;
if (DISPLAY_VER(display) >= 13)
plane->max_stride = adl_plane_max_stride;
@@ -3009,7 +3036,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
return;
}
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ intel_fb = intel_bo_alloc_framebuffer();
if (!intel_fb) {
drm_dbg_kms(display->drm, "failed to alloc fb\n");
return;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 8080f777910a..222c069fdadb 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -6,12 +6,14 @@
#include <linux/debugfs.h>
#include <drm/drm_blend.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
+#include "soc/intel_dram.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_crtc.h"
@@ -19,21 +21,38 @@
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
+#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fixed.h"
+#include "intel_flipq.h"
#include "intel_pcode.h"
+#include "intel_plane.h"
#include "intel_wm.h"
#include "skl_universal_plane_regs.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
-/*It is expected that DSB can do posted writes to every register in
- * the pipe and planes within 100us. For flip queue use case, the
- * recommended DSB execution time is 100us + one SAGV block time.
- */
-#define DSB_EXE_TIME 100
+struct intel_dbuf_state {
+ struct intel_global_state base;
+
+ struct skl_ddb_entry ddb[I915_MAX_PIPES];
+ unsigned int weight[I915_MAX_PIPES];
+ u8 slices[I915_MAX_PIPES];
+ u8 enabled_slices;
+ u8 active_pipes;
+ u8 mdclk_cdclk_ratio;
+ bool joined_mbus;
+};
+
+#define to_intel_dbuf_state(global_state) \
+ container_of_const((global_state), struct intel_dbuf_state, base)
+
+#define intel_atomic_get_old_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
+#define intel_atomic_get_new_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
static void skl_sagv_disable(struct intel_display *display);
@@ -84,8 +103,6 @@ intel_has_sagv(struct intel_display *display)
static u32
intel_sagv_block_time(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 14) {
u32 val;
@@ -96,9 +113,9 @@ intel_sagv_block_time(struct intel_display *display)
u32 val = 0;
int ret;
- ret = snb_pcode_read(&i915->uncore,
- GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
- &val, NULL);
+ ret = intel_pcode_read(display->drm,
+ GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
+ &val, NULL);
if (ret) {
drm_dbg_kms(display->drm, "Couldn't read SAGV block time!\n");
return 0;
@@ -156,7 +173,6 @@ static void intel_sagv_init(struct intel_display *display)
*/
static void skl_sagv_enable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (!intel_has_sagv(display))
@@ -166,8 +182,8 @@ static void skl_sagv_enable(struct intel_display *display)
return;
drm_dbg_kms(display->drm, "Enabling SAGV\n");
- ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_ENABLE);
+ ret = intel_pcode_write(display->drm, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_ENABLE);
/* We don't need to wait for SAGV when enabling */
@@ -189,7 +205,6 @@ static void skl_sagv_enable(struct intel_display *display)
static void skl_sagv_disable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (!intel_has_sagv(display))
@@ -200,10 +215,9 @@ static void skl_sagv_disable(struct intel_display *display)
drm_dbg_kms(display->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_DISABLE,
- GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
- 1);
+ ret = intel_pcode_request(display->drm, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_DISABLE,
+ GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 1);
/*
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
@@ -229,7 +243,7 @@ static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
if (!new_bw_state)
return;
- if (!intel_can_enable_sagv(display, new_bw_state))
+ if (!intel_bw_can_enable_sagv(display, new_bw_state))
skl_sagv_disable(display);
}
@@ -242,74 +256,10 @@ static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
if (!new_bw_state)
return;
- if (intel_can_enable_sagv(display, new_bw_state))
+ if (intel_bw_can_enable_sagv(display, new_bw_state))
skl_sagv_enable(display);
}
-static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
-{
- struct intel_display *display = to_intel_display(state);
- const struct intel_bw_state *old_bw_state =
- intel_atomic_get_old_bw_state(state);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
- u16 old_mask, new_mask;
-
- if (!new_bw_state)
- return;
-
- old_mask = old_bw_state->qgv_points_mask;
- new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
-
- if (old_mask == new_mask)
- return;
-
- WARN_ON(!new_bw_state->base.changed);
-
- drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
- old_mask, new_mask);
-
- /*
- * Restrict required qgv points before updating the configuration.
- * According to BSpec we can't mask and unmask qgv points at the same
- * time. Also masking should be done before updating the configuration
- * and unmasking afterwards.
- */
- icl_pcode_restrict_qgv_points(display, new_mask);
-}
-
-static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
-{
- struct intel_display *display = to_intel_display(state);
- const struct intel_bw_state *old_bw_state =
- intel_atomic_get_old_bw_state(state);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
- u16 old_mask, new_mask;
-
- if (!new_bw_state)
- return;
-
- old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
- new_mask = new_bw_state->qgv_points_mask;
-
- if (old_mask == new_mask)
- return;
-
- WARN_ON(!new_bw_state->base.changed);
-
- drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
- old_mask, new_mask);
-
- /*
- * Allow required qgv points after updating the configuration.
- * According to BSpec we can't mask and unmask qgv points at the same
- * time. Also masking should be done before updating the configuration
- * and unmasking afterwards.
- */
- icl_pcode_restrict_qgv_points(display, new_mask);
-}
-
void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
@@ -443,16 +393,6 @@ bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
return skl_crtc_can_enable_sagv(crtc_state);
}
-bool intel_can_enable_sagv(struct intel_display *display,
- const struct intel_bw_state *bw_state)
-{
- if (DISPLAY_VER(display) < 11 &&
- bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
- return false;
-
- return bw_state->pipe_sagv_reject == 0;
-}
-
static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
u16 start, u16 end)
{
@@ -2233,7 +2173,7 @@ cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state)
}
return min(1, DIV_ROUND_UP(crtc_state->pixel_rate,
- 2 * cdclk_state->logical.cdclk));
+ 2 * intel_cdclk_logical(cdclk_state)));
}
static int
@@ -2677,6 +2617,97 @@ static char enast(bool enable)
return enable ? '*' : ' ';
}
+static noinline_for_stack void
+skl_print_plane_changes(struct intel_display *display,
+ struct intel_plane *plane,
+ const struct skl_plane_wm *old_wm,
+ const struct skl_plane_wm *new_wm)
+{
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
+ enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
+ enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
+ enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
+ enast(old_wm->trans_wm.enable),
+ enast(old_wm->sagv.wm0.enable),
+ enast(old_wm->sagv.trans_wm.enable),
+ enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
+ enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
+ enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
+ enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
+ enast(new_wm->trans_wm.enable),
+ enast(new_wm->sagv.wm0.enable),
+ enast(new_wm->sagv.trans_wm.enable));
+
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
+ " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
+ enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
+ enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
+ enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
+ enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
+ enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
+ enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
+ enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
+ enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
+ enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
+ enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
+ enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
+ enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
+ enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
+ enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
+ enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
+ enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
+ enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
+ enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
+ enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
+ enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
+
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].blocks, old_wm->wm[1].blocks,
+ old_wm->wm[2].blocks, old_wm->wm[3].blocks,
+ old_wm->wm[4].blocks, old_wm->wm[5].blocks,
+ old_wm->wm[6].blocks, old_wm->wm[7].blocks,
+ old_wm->trans_wm.blocks,
+ old_wm->sagv.wm0.blocks,
+ old_wm->sagv.trans_wm.blocks,
+ new_wm->wm[0].blocks, new_wm->wm[1].blocks,
+ new_wm->wm[2].blocks, new_wm->wm[3].blocks,
+ new_wm->wm[4].blocks, new_wm->wm[5].blocks,
+ new_wm->wm[6].blocks, new_wm->wm[7].blocks,
+ new_wm->trans_wm.blocks,
+ new_wm->sagv.wm0.blocks,
+ new_wm->sagv.trans_wm.blocks);
+
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
+ old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
+ old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
+ old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
+ old_wm->trans_wm.min_ddb_alloc,
+ old_wm->sagv.wm0.min_ddb_alloc,
+ old_wm->sagv.trans_wm.min_ddb_alloc,
+ new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
+ new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
+ new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
+ new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
+ new_wm->trans_wm.min_ddb_alloc,
+ new_wm->sagv.wm0.min_ddb_alloc,
+ new_wm->sagv.trans_wm.min_ddb_alloc);
+}
+
static void
skl_print_wm_changes(struct intel_atomic_state *state)
{
@@ -2706,7 +2737,6 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_ddb_entry_equal(old, new))
continue;
-
drm_dbg_kms(display->drm,
"[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
plane->base.base.id, plane->base.name,
@@ -2724,89 +2754,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_plane_wm_equals(display, old_wm, new_wm))
continue;
- drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
- " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
- enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
- enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
- enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
- enast(old_wm->trans_wm.enable),
- enast(old_wm->sagv.wm0.enable),
- enast(old_wm->sagv.trans_wm.enable),
- enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
- enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
- enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
- enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
- enast(new_wm->trans_wm.enable),
- enast(new_wm->sagv.wm0.enable),
- enast(new_wm->sagv.trans_wm.enable));
-
- drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
- " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
- enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
- enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
- enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
- enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
- enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
- enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
- enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
- enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
- enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
- enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
- enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
- enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
- enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
- enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
- enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
- enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
- enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
- enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
- enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
- enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
- enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
-
- drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].blocks, old_wm->wm[1].blocks,
- old_wm->wm[2].blocks, old_wm->wm[3].blocks,
- old_wm->wm[4].blocks, old_wm->wm[5].blocks,
- old_wm->wm[6].blocks, old_wm->wm[7].blocks,
- old_wm->trans_wm.blocks,
- old_wm->sagv.wm0.blocks,
- old_wm->sagv.trans_wm.blocks,
- new_wm->wm[0].blocks, new_wm->wm[1].blocks,
- new_wm->wm[2].blocks, new_wm->wm[3].blocks,
- new_wm->wm[4].blocks, new_wm->wm[5].blocks,
- new_wm->wm[6].blocks, new_wm->wm[7].blocks,
- new_wm->trans_wm.blocks,
- new_wm->sagv.wm0.blocks,
- new_wm->sagv.trans_wm.blocks);
-
- drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
- old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
- old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
- old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
- old_wm->trans_wm.min_ddb_alloc,
- old_wm->sagv.wm0.min_ddb_alloc,
- old_wm->sagv.trans_wm.min_ddb_alloc,
- new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
- new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
- new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
- new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
- new_wm->trans_wm.min_ddb_alloc,
- new_wm->sagv.wm0.min_ddb_alloc,
- new_wm->sagv.trans_wm.min_ddb_alloc);
+ skl_print_plane_changes(display, plane, old_wm, new_wm);
}
}
}
@@ -2910,67 +2858,79 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
return 0;
}
-/*
- * If Fixed Refresh Rate or For VRR case Vmin = Vmax = Flipline:
- * Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from
- * watermark level1 and up and above. If watermark level 1 is
- * invalid program it with all 1's.
- * Program PKG_C_LATENCY Added Wake Time = DSB execution time
- * If Variable Refresh Rate where Vmin != Vmax != Flipline:
- * Program DEEP PKG_C_LATENCY Pkg C with all 1's.
- * Program PKG_C_LATENCY Added Wake Time = 0
- */
+static int pkgc_max_linetime(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i, max_linetime;
+
+ /*
+ * Apparenty the hardware uses WM_LINETIME internally for
+ * this stuff, compute everything based on that.
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ display->pkgc.disable[crtc->pipe] = crtc_state->vrr.enable;
+ display->pkgc.linetime[crtc->pipe] = DIV_ROUND_UP(crtc_state->linetime, 8);
+ }
+
+ max_linetime = 0;
+ for_each_intel_crtc(display->drm, crtc) {
+ if (display->pkgc.disable[crtc->pipe])
+ return 0;
+
+ max_linetime = max(display->pkgc.linetime[crtc->pipe], max_linetime);
+ }
+
+ return max_linetime;
+}
+
void
intel_program_dpkgc_latency(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct intel_crtc *crtc;
- struct intel_crtc_state *new_crtc_state;
- u32 latency = LNL_PKG_C_LATENCY_MASK;
- u32 added_wake_time = 0;
- u32 max_linetime = 0;
- u32 clear, val;
- bool fixed_refresh_rate = false;
- int i;
+ int max_linetime, latency, added_wake_time = 0;
if (DISPLAY_VER(display) < 20)
return;
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!new_crtc_state->vrr.enable ||
- (new_crtc_state->vrr.vmin == new_crtc_state->vrr.vmax &&
- new_crtc_state->vrr.vmin == new_crtc_state->vrr.flipline))
- fixed_refresh_rate = true;
+ mutex_lock(&display->wm.wm_mutex);
- max_linetime = max(new_crtc_state->linetime, max_linetime);
- }
+ latency = skl_watermark_max_latency(display, 1);
- if (fixed_refresh_rate) {
- added_wake_time = DSB_EXE_TIME +
- display->sagv.block_time_us;
+ /* FIXME runtime changes to enable_flipq are racy */
+ if (display->params.enable_flipq)
+ added_wake_time = intel_flipq_exec_time_us(display);
- latency = skl_watermark_max_latency(display, 1);
+ /*
+ * Wa_22020432604
+ * "PKG_C_LATENCY Added Wake Time field is not working"
+ */
+ if (latency && IS_DISPLAY_VER(display, 20, 30)) {
+ latency += added_wake_time;
+ added_wake_time = 0;
+ }
- /* Wa_22020432604 */
- if ((DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30) && !latency) {
- latency += added_wake_time;
- added_wake_time = 0;
- }
+ max_linetime = pkgc_max_linetime(state);
- /* Wa_22020299601 */
- if ((latency && max_linetime) &&
- (DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30)) {
- latency = max_linetime * DIV_ROUND_UP(latency, max_linetime);
- } else if (!latency) {
- latency = LNL_PKG_C_LATENCY_MASK;
- }
+ if (max_linetime == 0 || latency == 0) {
+ latency = REG_FIELD_GET(LNL_PKG_C_LATENCY_MASK,
+ LNL_PKG_C_LATENCY_MASK);
+ added_wake_time = 0;
+ } else {
+ /*
+ * Wa_22020299601
+ * "Increase the latency programmed in PKG_C_LATENCY Pkg C Latency to be a
+ * multiple of the pipeline time from WM_LINETIME"
+ */
+ latency = roundup(latency, max_linetime);
}
- clear = LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
- val = REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, latency) |
- REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time);
+ intel_de_write(display, LNL_PKG_C_LATENCY,
+ REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time) |
+ REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, latency));
- intel_de_rmw(display, LNL_PKG_C_LATENCY, clear, val);
+ mutex_unlock(&display->wm.wm_mutex);
}
static int
@@ -3008,7 +2968,7 @@ skl_compute_wm(struct intel_atomic_state *state)
* drm_atomic_check_only() gets upset if we pull more crtcs
* into the state, so we have to calculate this based on the
* individual intel_crtc_can_enable_sagv() rather than
- * the overall intel_can_enable_sagv(). Otherwise the
+ * the overall intel_bw_can_enable_sagv(). Otherwise the
* crtcs not included in the commit would not switch to the
* SAGV watermarks when we are about to enable SAGV, and that
* would lead to underruns. This does mean extra power draw
@@ -3184,8 +3144,6 @@ void skl_watermark_ipc_update(struct intel_display *display)
static bool skl_watermark_ipc_can_enable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
/* Display WA #0477 WaDisableIPC: skl */
if (display->platform.skylake)
return false;
@@ -3193,8 +3151,11 @@ static bool skl_watermark_ipc_can_enable(struct intel_display *display)
/* Display WA #1141: SKL:all KBL:all CFL */
if (display->platform.kabylake ||
display->platform.coffeelake ||
- display->platform.cometlake)
- return i915->dram_info.symmetric_memory;
+ display->platform.cometlake) {
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
+
+ return dram_info->symmetric_memory;
+ }
return true;
}
@@ -3213,8 +3174,7 @@ static void
adjust_wm_latency(struct intel_display *display,
u16 wm[], int num_levels, int read_latency)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
int i, level;
/*
@@ -3250,7 +3210,7 @@ adjust_wm_latency(struct intel_display *display,
* any underrun. If not able to get Dimm info assume 16GB dimm
* to avoid any underrun.
*/
- if (wm_lv_0_adjust_needed)
+ if (!display->platform.dg2 && dram_info->wm_lv_0_adjust_needed)
wm[0] += 1;
}
@@ -3276,7 +3236,6 @@ static void mtl_read_wm_latency(struct intel_display *display, u16 wm[])
static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int num_levels = display->wm.num_levels;
int read_latency = DISPLAY_VER(display) >= 12 ? 3 : 2;
int mult = display->platform.dg2 ? 2 : 1;
@@ -3285,7 +3244,7 @@ static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
- ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ ret = intel_pcode_read(display->drm, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
@@ -3298,7 +3257,7 @@ static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
- ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ ret = intel_pcode_read(display->drm, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
@@ -3690,6 +3649,38 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
gen9_dbuf_slices_update(display, new_slices);
}
+int intel_dbuf_num_enabled_slices(const struct intel_dbuf_state *dbuf_state)
+{
+ return hweight8(dbuf_state->enabled_slices);
+}
+
+int intel_dbuf_num_active_pipes(const struct intel_dbuf_state *dbuf_state)
+{
+ return hweight8(dbuf_state->active_pipes);
+}
+
+bool intel_dbuf_pmdemand_needs_update(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
+
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+
+ if (new_dbuf_state &&
+ new_dbuf_state->active_pipes != old_dbuf_state->active_pipes)
+ return true;
+
+ if (DISPLAY_VER(display) < 30) {
+ if (new_dbuf_state &&
+ new_dbuf_state->enabled_slices !=
+ old_dbuf_state->enabled_slices)
+ return true;
+ }
+
+ return false;
+}
+
static void skl_mbus_sanitize(struct intel_display *display)
{
struct intel_dbuf_state *dbuf_state =
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index 95b0b599d5c3..62790816f030 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -8,17 +8,15 @@
#include <linux/types.h>
-#include "intel_display_limits.h"
-#include "intel_global_state.h"
-#include "intel_wm_types.h"
-
+enum plane_id;
struct intel_atomic_state;
-struct intel_bw_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dbuf_state;
struct intel_display;
struct intel_plane;
struct intel_plane_state;
+struct skl_ddb_entry;
struct skl_pipe_wm;
struct skl_wm_level;
@@ -27,8 +25,6 @@ u8 intel_enabled_dbuf_slices_mask(struct intel_display *display);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state);
-bool intel_can_enable_sagv(struct intel_display *display,
- const struct intel_bw_state *bw_state);
bool intel_has_sagv(struct intel_display *display);
u32 skl_ddb_dbuf_slice_mask(struct intel_display *display,
@@ -63,28 +59,11 @@ unsigned int skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_st
struct intel_plane *plane, int width,
int height, int cpp);
-struct intel_dbuf_state {
- struct intel_global_state base;
-
- struct skl_ddb_entry ddb[I915_MAX_PIPES];
- unsigned int weight[I915_MAX_PIPES];
- u8 slices[I915_MAX_PIPES];
- u8 enabled_slices;
- u8 active_pipes;
- u8 mdclk_cdclk_ratio;
- bool joined_mbus;
-};
-
struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
-#define to_intel_dbuf_state(global_state) \
- container_of_const((global_state), struct intel_dbuf_state, base)
-
-#define intel_atomic_get_old_dbuf_state(state) \
- to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
-#define intel_atomic_get_new_dbuf_state(state) \
- to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
+int intel_dbuf_num_enabled_slices(const struct intel_dbuf_state *dbuf_state);
+int intel_dbuf_num_active_pipes(const struct intel_dbuf_state *dbuf_state);
int intel_dbuf_init(struct intel_display *display);
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
@@ -98,5 +77,7 @@ void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state);
void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state);
void intel_program_dpkgc_latency(struct intel_atomic_state *state);
+bool intel_dbuf_pmdemand_needs_update(struct intel_atomic_state *state);
+
#endif /* __SKL_WATERMARK_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 2007bb9d974d..6d9f3312de7e 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -30,15 +30,17 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
@@ -253,18 +255,16 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
static void band_gap_reset(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ vlv_flisdsi_get(display->drm);
- vlv_flisdsi_get(dev_priv);
-
- vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
- vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
- vlv_flisdsi_write(dev_priv, 0x0F, 0x0025);
+ vlv_flisdsi_write(display->drm, 0x08, 0x0001);
+ vlv_flisdsi_write(display->drm, 0x0F, 0x0005);
+ vlv_flisdsi_write(display->drm, 0x0F, 0x0025);
udelay(150);
- vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
- vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
+ vlv_flisdsi_write(display->drm, 0x0F, 0x0000);
+ vlv_flisdsi_write(display->drm, 0x08, 0x0000);
- vlv_flisdsi_put(dev_priv);
+ vlv_flisdsi_put(display->drm);
}
static int intel_dsi_compute_config(struct intel_encoder *encoder,
@@ -457,17 +457,16 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
drm_dbg_kms(display->drm, "\n");
- vlv_flisdsi_get(dev_priv);
+ vlv_flisdsi_get(display->drm);
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
* needed everytime after power gate */
- vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
- vlv_flisdsi_put(dev_priv);
+ vlv_flisdsi_write(display->drm, 0x04, 0x0004);
+ vlv_flisdsi_put(display->drm);
/* bandgap reset is needed after everytime we do power gate */
band_gap_reset(display);
@@ -1020,7 +1019,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
unsigned int lane_count = intel_dsi->lane_count;
unsigned int bpp, fmt;
enum port port;
- u16 hactive, hfp, hsync, hbp, vfp, vsync;
+ u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
u16 hfp_sw, hsync_sw, hbp_sw;
u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
crtc_hblank_start_sw, crtc_hblank_end_sw;
@@ -1084,6 +1083,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
/* vertical values are in terms of lines */
vfp = intel_de_read(display, MIPI_VFP_COUNT(display, port));
+ vbp = intel_de_read(display, MIPI_VBP_COUNT(display, port));
vsync = intel_de_read(display, MIPI_VSYNC_PADDING_COUNT(display, port));
adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
@@ -1092,6 +1092,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal;
+ drm_WARN_ON(display->drm, adjusted_mode->crtc_vdisplay +
+ vfp + vsync + vbp != adjusted_mode->crtc_vtotal);
adjusted_mode->crtc_vsync_start = vfp + adjusted_mode->crtc_vdisplay;
adjusted_mode->crtc_vsync_end = vsync + adjusted_mode->crtc_vsync_start;
adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 7ce924a5ef90..d42b61e6f076 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -28,7 +28,9 @@
#include <linux/kernel.h>
#include <linux/string_helpers.h>
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
@@ -214,15 +216,14 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
drm_dbg_kms(display->drm, "\n");
- vlv_cck_get(dev_priv);
+ vlv_cck_get(display->drm);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL,
+ vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL, 0);
+ vlv_cck_write(display->drm, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
+ vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL,
config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN);
/* wait at least 0.5 us after ungating before enabling VCO,
@@ -230,16 +231,16 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
*/
usleep_range(10, 50);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
+ vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
- if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
+ if (wait_for(vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL) &
DSI_PLL_LOCK, 20)) {
- vlv_cck_put(dev_priv);
+ vlv_cck_put(display->drm);
drm_err(display->drm, "DSI PLL lock failed\n");
return;
}
- vlv_cck_put(dev_priv);
+ vlv_cck_put(display->drm);
drm_dbg_kms(display->drm, "DSI PLL locked\n");
}
@@ -247,19 +248,18 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
void vlv_dsi_pll_disable(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
drm_dbg_kms(display->drm, "\n");
- vlv_cck_get(dev_priv);
+ vlv_cck_get(display->drm);
- tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ tmp = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL);
tmp &= ~DSI_PLL_VCO_EN;
tmp |= DSI_PLL_LDO_GATE;
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+ vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL, tmp);
- vlv_cck_put(dev_priv);
+ vlv_cck_put(display->drm);
}
bool bxt_dsi_pll_is_enabled(struct intel_display *display)
@@ -323,15 +323,14 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pll_ctl, pll_div;
drm_dbg_kms(display->drm, "\n");
- vlv_cck_get(dev_priv);
- pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
- pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
- vlv_cck_put(dev_priv);
+ vlv_cck_get(display->drm);
+ pll_ctl = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL);
+ pll_div = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_DIVIDER);
+ vlv_cck_put(display->drm);
config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
config->dsi_pll.div = pll_div;
@@ -592,12 +591,11 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
static void assert_dsi_pll(struct intel_display *display, bool state)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
bool cur_state;
- vlv_cck_get(i915);
- cur_state = vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_VCO_EN;
- vlv_cck_put(i915);
+ vlv_cck_get(display->drm);
+ cur_state = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_VCO_EN;
+ vlv_cck_put(display->drm);
INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
"DSI PLL state assertion failure (expected %s, current %s)\n",
diff --git a/drivers/gpu/drm/i915/display/vlv_sideband.c b/drivers/gpu/drm/i915/display/vlv_sideband.c
new file mode 100644
index 000000000000..e18045f2b89d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_sideband.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_print.h>
+
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "intel_dpio_phy.h"
+#include "vlv_sideband.h"
+
+static enum vlv_iosf_sb_unit vlv_dpio_phy_to_unit(struct intel_display *display,
+ enum dpio_phy phy)
+{
+ /*
+ * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D)
+ * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C)
+ */
+ if (display->platform.cherryview)
+ return phy == DPIO_PHY0 ? VLV_IOSF_SB_DPIO_2 : VLV_IOSF_SB_DPIO;
+ else
+ return VLV_IOSF_SB_DPIO;
+}
+
+u32 vlv_dpio_read(struct drm_device *drm, enum dpio_phy phy, int reg)
+{
+ struct intel_display *display = to_intel_display(drm);
+ enum vlv_iosf_sb_unit unit = vlv_dpio_phy_to_unit(display, phy);
+ u32 val;
+
+ val = vlv_iosf_sb_read(drm, unit, reg);
+
+ /*
+ * FIXME: There might be some registers where all 1's is a valid value,
+ * so ideally we should check the register offset instead...
+ */
+ drm_WARN(display->drm, val == 0xffffffff,
+ "DPIO PHY%d read reg 0x%x == 0x%x\n",
+ phy, reg, val);
+
+ return val;
+}
+
+void vlv_dpio_write(struct drm_device *drm,
+ enum dpio_phy phy, int reg, u32 val)
+{
+ struct intel_display *display = to_intel_display(drm);
+ enum vlv_iosf_sb_unit unit = vlv_dpio_phy_to_unit(display, phy);
+
+ vlv_iosf_sb_write(drm, unit, reg, val);
+}
diff --git a/drivers/gpu/drm/i915/display/vlv_sideband.h b/drivers/gpu/drm/i915/display/vlv_sideband.h
new file mode 100644
index 000000000000..2c240d81fead
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_sideband.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef _VLV_SIDEBAND_H_
+#define _VLV_SIDEBAND_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+#include "vlv_iosf_sb.h"
+#include "vlv_iosf_sb_reg.h"
+
+enum dpio_phy;
+struct drm_device;
+
+static inline void vlv_bunit_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_BUNIT));
+}
+
+static inline u32 vlv_bunit_read(struct drm_device *drm, u32 reg)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_BUNIT, reg);
+}
+
+static inline void vlv_bunit_write(struct drm_device *drm, u32 reg, u32 val)
+{
+ vlv_iosf_sb_write(drm, VLV_IOSF_SB_BUNIT, reg, val);
+}
+
+static inline void vlv_bunit_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_BUNIT));
+}
+
+static inline void vlv_cck_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_CCK));
+}
+
+static inline u32 vlv_cck_read(struct drm_device *drm, u32 reg)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_CCK, reg);
+}
+
+static inline void vlv_cck_write(struct drm_device *drm, u32 reg, u32 val)
+{
+ vlv_iosf_sb_write(drm, VLV_IOSF_SB_CCK, reg, val);
+}
+
+static inline void vlv_cck_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_CCK));
+}
+
+static inline void vlv_ccu_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_CCU));
+}
+
+static inline u32 vlv_ccu_read(struct drm_device *drm, u32 reg)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_CCU, reg);
+}
+
+static inline void vlv_ccu_write(struct drm_device *drm, u32 reg, u32 val)
+{
+ vlv_iosf_sb_write(drm, VLV_IOSF_SB_CCU, reg, val);
+}
+
+static inline void vlv_ccu_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_CCU));
+}
+
+static inline void vlv_dpio_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_DPIO) | BIT(VLV_IOSF_SB_DPIO_2));
+}
+
+#ifdef I915
+u32 vlv_dpio_read(struct drm_device *drm, enum dpio_phy phy, int reg);
+void vlv_dpio_write(struct drm_device *drm,
+ enum dpio_phy phy, int reg, u32 val);
+#else
+static inline u32 vlv_dpio_read(struct drm_device *drm, int phy, int reg)
+{
+ return 0;
+}
+static inline void vlv_dpio_write(struct drm_device *drm,
+ int phy, int reg, u32 val)
+{
+}
+#endif
+
+static inline void vlv_dpio_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_DPIO) | BIT(VLV_IOSF_SB_DPIO_2));
+}
+
+static inline void vlv_flisdsi_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_FLISDSI));
+}
+
+static inline u32 vlv_flisdsi_read(struct drm_device *drm, u32 reg)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_FLISDSI, reg);
+}
+
+static inline void vlv_flisdsi_write(struct drm_device *drm, u32 reg, u32 val)
+{
+ vlv_iosf_sb_write(drm, VLV_IOSF_SB_FLISDSI, reg, val);
+}
+
+static inline void vlv_flisdsi_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_FLISDSI));
+}
+
+static inline void vlv_nc_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_NC));
+}
+
+static inline u32 vlv_nc_read(struct drm_device *drm, u8 addr)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_NC, addr);
+}
+
+static inline void vlv_nc_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_NC));
+}
+
+static inline void vlv_punit_get(struct drm_device *drm)
+{
+ vlv_iosf_sb_get(drm, BIT(VLV_IOSF_SB_PUNIT));
+}
+
+static inline u32 vlv_punit_read(struct drm_device *drm, u32 addr)
+{
+ return vlv_iosf_sb_read(drm, VLV_IOSF_SB_PUNIT, addr);
+}
+
+static inline int vlv_punit_write(struct drm_device *drm, u32 addr, u32 val)
+{
+ return vlv_iosf_sb_write(drm, VLV_IOSF_SB_PUNIT, addr, val);
+}
+
+static inline void vlv_punit_put(struct drm_device *drm)
+{
+ vlv_iosf_sb_put(drm, BIT(VLV_IOSF_SB_PUNIT));
+}
+
+#endif /* _VLV_SIDEBAND_H_ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 7a0cc51923b3..ef3b14ae2e0d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -3,7 +3,6 @@
* Copyright © 2014-2016 Intel Corporation
*/
-#include "display/intel_display.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index c34f41605b46..565f8fa330db 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -16,7 +16,9 @@
#include "i915_gem_ww.h"
#include "i915_vma_types.h"
+struct drm_scanout_buffer;
enum intel_region_id;
+struct intel_framebuffer;
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
@@ -691,6 +693,10 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void);
+int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb);
+void i915_gem_object_panic_finish(struct intel_framebuffer *fb);
+
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj: the object to map into kernel address space
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
index 9fbf14867a2a..b6dc3d1b9bb1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
@@ -77,7 +77,7 @@ i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj)
* Set object's frontbuffer pointer. If frontbuffer is already set for the
* object keep it and return it's pointer to the caller. Please note that RCU
* mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. This
- * function is protected by i915->display.fb_tracking.lock
+ * function is protected by i915->display->fb_tracking.lock
*
* Return: pointer to frontbuffer which was set.
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 7f83f8bdc8fb..c16a57160b26 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -4,8 +4,11 @@
*/
#include <drm/drm_cache.h>
+#include <drm/drm_panic.h>
#include <linux/vmalloc.h>
+#include "display/intel_fb.h"
+#include "display/intel_display_types.h"
#include "gt/intel_gt.h"
#include "gt/intel_tlb.h"
@@ -354,6 +357,145 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
return vaddr ?: ERR_PTR(-ENOMEM);
}
+struct i915_panic_data {
+ struct page **pages;
+ int page;
+ void *vaddr;
+};
+
+struct i915_framebuffer {
+ struct intel_framebuffer base;
+ struct i915_panic_data panic;
+};
+
+static inline struct i915_panic_data *to_i915_panic_data(struct intel_framebuffer *fb)
+{
+ return &container_of_const(fb, struct i915_framebuffer, base)->panic;
+}
+
+static void i915_panic_kunmap(struct i915_panic_data *panic)
+{
+ if (panic->vaddr) {
+ drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
+ kunmap_local(panic->vaddr);
+ panic->vaddr = NULL;
+ }
+}
+
+static struct page **i915_gem_object_panic_pages(struct drm_i915_gem_object *obj)
+{
+ unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
+ struct page *page;
+ struct page **pages;
+ struct sgt_iter iter;
+
+ /* For a 3840x2160 32 bits Framebuffer, this should require ~64K */
+ pages = kmalloc_array(n_pages, sizeof(*pages), GFP_ATOMIC);
+ if (!pages)
+ return NULL;
+
+ i = 0;
+ for_each_sgt_page(page, iter, obj->mm.pages)
+ pages[i++] = page;
+ return pages;
+}
+
+static void i915_gem_object_panic_map_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color)
+{
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ unsigned int offset = fb->panic_tiling(sb->width, x, y);
+
+ iosys_map_wr(&sb->map[0], offset, u32, color);
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color)
+{
+ unsigned int new_page;
+ unsigned int offset;
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct i915_panic_data *panic = to_i915_panic_data(fb);
+
+ if (fb->panic_tiling)
+ offset = fb->panic_tiling(sb->width, x, y);
+ else
+ offset = y * sb->pitch[0] + x * sb->format->cpp[0];
+
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != panic->page) {
+ i915_panic_kunmap(panic);
+ panic->page = new_page;
+ panic->vaddr =
+ kmap_local_page_try_from_panic(panic->pages[panic->page]);
+ }
+ if (panic->vaddr) {
+ u32 *pix = panic->vaddr + offset;
+ *pix = color;
+ }
+}
+
+struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void)
+{
+ struct i915_framebuffer *i915_fb;
+
+ i915_fb = kzalloc(sizeof(*i915_fb), GFP_KERNEL);
+ if (i915_fb)
+ return &i915_fb->base;
+ return NULL;
+}
+
+/*
+ * Setup the gem framebuffer for drm_panic access.
+ * Use current vaddr if it exists, or setup a list of pages.
+ * pfn is not supported yet.
+ */
+int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb)
+{
+ enum i915_map_type has_type;
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct i915_panic_data *panic = to_i915_panic_data(fb);
+ struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base));
+ void *ptr;
+
+ ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+ if (ptr) {
+ if (i915_gem_object_has_iomem(obj))
+ iosys_map_set_vaddr_iomem(&sb->map[0], (void __iomem *)ptr);
+ else
+ iosys_map_set_vaddr(&sb->map[0], ptr);
+
+ if (fb->panic_tiling)
+ sb->set_pixel = i915_gem_object_panic_map_set_pixel;
+ return 0;
+ }
+ if (i915_gem_object_has_struct_page(obj)) {
+ panic->pages = i915_gem_object_panic_pages(obj);
+ if (!panic->pages)
+ return -ENOMEM;
+ panic->page = -1;
+ sb->set_pixel = i915_gem_object_panic_page_set_pixel;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+void i915_gem_object_panic_finish(struct intel_framebuffer *fb)
+{
+ struct i915_panic_data *panic = to_i915_panic_data(fb);
+
+ i915_panic_kunmap(panic);
+ panic->page = -1;
+ kfree(panic->pages);
+ panic->pages = NULL;
+}
+
/* get, pin, and map the pages of the object into kernel space */
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 7127e90c1a8f..991666fd9f85 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -106,11 +106,6 @@ static void fence_set_priority(struct dma_fence *fence,
rcu_read_unlock();
}
-static inline bool __dma_fence_is_chain(const struct dma_fence *fence)
-{
- return fence->ops == &dma_fence_chain_ops;
-}
-
void i915_gem_fence_wait_priority(struct dma_fence *fence,
const struct i915_sched_attr *attr)
{
@@ -126,7 +121,7 @@ void i915_gem_fence_wait_priority(struct dma_fence *fence,
for (i = 0; i < array->num_fences; i++)
fence_set_priority(array->fences[i], attr);
- } else if (__dma_fence_is_chain(fence)) {
+ } else if (dma_fence_is_chain(fence)) {
struct dma_fence *iter;
/* The chain is ordered; if we boost the last, we boost all */
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index bac15196b4d2..86d9d2fcb6a6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -5,6 +5,7 @@
#include "i915_selftest.h"
+#include "display/intel_display_core.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_engine_user.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 325da0414d94..f6a98cf1e5a5 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -79,6 +79,29 @@ struct lock_class_key;
#define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__)
#define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
+#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
+#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
+
+#define __ENGINE_INSTANCES_MASK(mask, first, count) ({ \
+ unsigned int first__ = (first); \
+ unsigned int count__ = (count); \
+ ((mask) & GENMASK(first__ + count__ - 1, first__)) >> first__; \
+})
+
+#define ENGINE_INSTANCES_MASK(gt, first, count) \
+ __ENGINE_INSTANCES_MASK((gt)->info.engine_mask, first, count)
+
+#define RCS_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS)
+#define BCS_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS)
+#define VDBOX_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
+#define VEBOX_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
+#define CCS_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
+
#define GEN6_RING_FAULT_REG_READ(engine__) \
intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__))
@@ -355,4 +378,12 @@ u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value);
u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value);
u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value);
+#define rb_to_uabi_engine(rb) \
+ rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
+
+#define for_each_uabi_engine(engine__, i915__) \
+ for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
+ (engine__); \
+ (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 0c723e7c71a2..889e61843ff3 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -328,7 +328,7 @@ static bool fence_is_active(const struct i915_fence_reg *fence)
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
- struct intel_display *display = &ggtt->vm.i915->display;
+ struct intel_display *display = ggtt->vm.i915->display;
struct i915_fence_reg *active = NULL;
struct i915_fence_reg *fence, *fn;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 3182f19b9837..c7f59d60fac6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -6,6 +6,8 @@
#include <linux/string_helpers.h>
#include <linux/suspend.h>
+#include "display/intel_display_power.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_params.h"
@@ -70,7 +72,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
{
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
struct drm_i915_private *i915 = gt->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
GT_TRACE(gt, "\n");
@@ -104,7 +106,7 @@ static int __gt_park(struct intel_wakeref *wf)
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
struct drm_i915_private *i915 = gt->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
GT_TRACE(gt, "\n");
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index b635aa2820d9..87ef85483bae 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -22,7 +22,7 @@
#include "intel_rps.h"
#include "intel_runtime_pm.h"
#include "intel_uncore.h"
-#include "vlv_sideband.h"
+#include "vlv_iosf_sb.h"
void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
{
@@ -366,9 +366,9 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
drm_printf(p, "SW control enabled: %s\n",
str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE));
- vlv_punit_get(i915);
- freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(i915);
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
+ freq_sts = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
drm_printf(p, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index d1a382dfaa1d..93298820bee2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -250,11 +250,17 @@ void intel_gt_watchdog_work(struct work_struct *work)
llist_for_each_entry_safe(rq, rn, first, watchdog.link) {
if (!i915_request_completed(rq)) {
struct dma_fence *f = &rq->fence;
+ const char __rcu *timeline;
+ const char __rcu *driver;
+ rcu_read_lock();
+ driver = dma_fence_driver_name(f);
+ timeline = dma_fence_timeline_name(f);
pr_notice("Fence expiration time out i915-%s:%s:%llx!\n",
- f->ops->get_driver_name(f),
- f->ops->get_timeline_name(f),
+ rcu_dereference(driver),
+ rcu_dereference(timeline),
f->seqno);
+ rcu_read_unlock();
i915_request_cancel(rq, -EINTR);
}
i915_request_put(rq);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index dbdcfe130ad4..4a1675dea1c7 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1205,7 +1205,7 @@ void intel_gt_reset(struct intel_gt *gt,
intel_engine_mask_t stalled_mask,
const char *reason)
{
- struct intel_display *display = &gt->i915->display;
+ struct intel_display *display = gt->i915->display;
intel_engine_mask_t awake;
int ret;
@@ -1423,7 +1423,7 @@ static void intel_gt_reset_global(struct intel_gt *gt,
/* Use a watchdog to ensure that our reset completes */
intel_wedge_on_timeout(&w, gt, 60 * HZ) {
struct drm_i915_private *i915 = gt->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
bool need_display_reset;
bool reset_display;
@@ -1448,7 +1448,8 @@ static void intel_gt_reset_global(struct intel_gt *gt,
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
else
drm_dev_wedged_event(&gt->i915->drm,
- DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET);
+ DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET,
+ NULL);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index eb89948cc112..0b35fdd461d4 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -23,7 +23,7 @@
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_rps.h"
-#include "vlv_sideband.h"
+#include "vlv_iosf_sb.h"
#include "../../../platform/x86/intel_ips.h"
#define BUSY_MAX_EI 20u /* ms */
@@ -550,7 +550,7 @@ static unsigned int init_emon(struct intel_uncore *uncore)
static bool gen5_rps_enable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
struct intel_uncore *uncore = rps_to_uncore(rps);
u8 fstart, vstart;
u32 rgvmodectl;
@@ -620,7 +620,7 @@ static bool gen5_rps_enable(struct intel_rps *rps)
static void gen5_rps_disable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
struct intel_uncore *uncore = rps_to_uncore(rps);
u16 rgvswctl;
@@ -820,9 +820,9 @@ static int vlv_rps_set(struct intel_rps *rps, u8 val)
struct drm_i915_private *i915 = rps_to_i915(rps);
int err;
- vlv_punit_get(i915);
- err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
- vlv_punit_put(i915);
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
+ err = vlv_iosf_sb_write(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_REQ, val);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
val, intel_gpu_freq(rps, val));
@@ -1280,7 +1280,7 @@ static int chv_rps_max_freq(struct intel_rps *rps)
struct intel_gt *gt = rps_to_gt(rps);
u32 val;
- val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, FB_GFX_FMAX_AT_VMAX_FUSE);
switch (gt->info.sseu.eu_total) {
case 8:
@@ -1307,7 +1307,7 @@ static int chv_rps_rpe_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
- val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_GPU_DUTYCYCLE_REG);
val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
@@ -1318,7 +1318,7 @@ static int chv_rps_guar_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
- val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, FB_GFX_FMAX_AT_VMAX_FUSE);
return val & FB_GFX_FREQ_FUSE_MASK;
}
@@ -1328,7 +1328,7 @@ static u32 chv_rps_min_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
- val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, FB_GFX_FMIN_AT_VMIN_FUSE);
val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
return val & FB_GFX_FREQ_FUSE_MASK;
@@ -1362,14 +1362,14 @@ static bool chv_rps_enable(struct intel_rps *rps)
GEN6_PM_RP_DOWN_TIMEOUT);
/* Setting Fixed Bias */
- vlv_punit_get(i915);
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
- vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+ vlv_iosf_sb_write(&i915->drm, VLV_IOSF_SB_PUNIT, VLV_TURBO_SOC_OVERRIDE, val);
- val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(i915);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
/* RPS code assumes GPLL is used */
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
@@ -1387,7 +1387,7 @@ static int vlv_rps_guar_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val, rp1;
- val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FREQ_FUSE);
rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
@@ -1400,7 +1400,7 @@ static int vlv_rps_max_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val, rp0;
- val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FREQ_FUSE);
rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
/* Clamp to max */
@@ -1414,9 +1414,9 @@ static int vlv_rps_rpe_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val, rpe;
- val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
- val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
return rpe;
@@ -1427,7 +1427,7 @@ static int vlv_rps_min_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
- val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_LFM) & 0xff;
/*
* According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
* for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
@@ -1463,15 +1463,15 @@ static bool vlv_rps_enable(struct intel_rps *rps)
/* WaGsvRC0ResidencyMethod:vlv */
rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
- vlv_punit_get(i915);
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
/* Setting Fixed Bias */
val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
- vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+ vlv_iosf_sb_write(&i915->drm, VLV_IOSF_SB_PUNIT, VLV_TURBO_SOC_OVERRIDE, val);
- val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(i915);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
/* RPS code assumes GPLL is used */
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
@@ -1684,7 +1684,7 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
rps->gpll_ref_freq =
- vlv_get_cck_clock(i915, "GPLL ref",
+ vlv_get_cck_clock(&i915->drm, "GPLL ref",
CCK_GPLL_CLOCK_CONTROL,
i915->czclk_freq);
@@ -1696,7 +1696,7 @@ static void vlv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- vlv_iosf_sb_get(i915,
+ vlv_iosf_sb_get(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
@@ -1720,7 +1720,7 @@ static void vlv_rps_init(struct intel_rps *rps)
drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
- vlv_iosf_sb_put(i915,
+ vlv_iosf_sb_put(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
@@ -1730,7 +1730,7 @@ static void chv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- vlv_iosf_sb_get(i915,
+ vlv_iosf_sb_get(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
@@ -1754,7 +1754,7 @@ static void chv_rps_init(struct intel_rps *rps)
drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
- vlv_iosf_sb_put(i915,
+ vlv_iosf_sb_put(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
@@ -2119,9 +2119,9 @@ static u32 __read_cagf(struct intel_rps *rps, bool take_fw)
} else if (GRAPHICS_VER(i915) >= 12) {
r = GEN12_RPSTAT1;
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
- vlv_punit_get(i915);
- freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(i915);
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
+ freq = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
} else if (GRAPHICS_VER(i915) >= 6) {
r = GEN6_RPSTAT1;
} else {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 9df80c325fc1..f360f020d8f1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -313,8 +313,13 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
*
* The same WA bit is used for both and 22011391025 is applicable to
* all DG2.
+ *
+ * Platforms post DG2 prevent this issue in hardware by stalling
+ * submissions. With this flag GuC will schedule as to avoid such
+ * stalls.
*/
- if (IS_DG2(gt->i915))
+ if (IS_DG2(gt->i915) ||
+ (CCS_MASK(gt) && GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)))
flags |= GUC_WA_DUAL_QUEUE;
/* Wa_22011802037: graphics version 11/12 */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index ec33ad942115..e848a04a80dc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -1116,7 +1116,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
_MASKED_BIT_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */
- ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
+ ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100, NULL);
if (ret)
gt_err(gt, "DMA for %s fw failed, DMA_CTRL=%u\n",
intel_uc_fw_type_repr(uc_fw->type),
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index f25ee2953baf..a91e23c22ea1 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -38,6 +38,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt_regs.h"
@@ -50,6 +51,7 @@
#include "trace.h"
#include "display/i9xx_plane_regs.h"
+#include "display/intel_display_core.h"
#include "display/intel_sprite_regs.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
@@ -1286,7 +1288,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct plane_code_mapping gen8_plane_code[] = {
[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
@@ -1333,7 +1335,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
@@ -1421,7 +1423,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 1e1af5e545a4..74197e337585 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -36,6 +36,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gvt.h"
#include "display/bxt_dpio_phy_regs.h"
@@ -43,6 +44,7 @@
#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
#include "display/intel_display.h"
+#include "display/intel_display_core.h"
#include "display/intel_dpio_phy.h"
#include "display/intel_sprite_regs.h"
@@ -69,7 +71,7 @@ static int get_edp_pipe(struct intel_vgpu *vgpu)
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
if (!(vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_EDP)) & TRANSCONF_ENABLE))
return 0;
@@ -82,7 +84,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
if (drm_WARN_ON(&dev_priv->drm,
pipe < PIPE_A || pipe >= I915_MAX_PIPES))
@@ -183,7 +185,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
int pipe;
if (IS_BROXTON(dev_priv)) {
@@ -634,7 +636,7 @@ void vgpu_update_vblank_emulation(struct intel_vgpu *vgpu, bool turnon)
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK,
@@ -664,7 +666,7 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
void intel_vgpu_emulate_vblank(struct intel_vgpu *vgpu)
{
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
int pipe;
mutex_lock(&vgpu->vgpu_lock);
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index 8090bc53c7e1..bc7f05f9a271 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -63,19 +63,6 @@ struct intel_vgpu;
#define AUX_BURST_SIZE 20
-#define SBI_RESPONSE_MASK 0x3
-#define SBI_RESPONSE_SHIFT 0x1
-#define SBI_STAT_MASK 0x1
-#define SBI_STAT_SHIFT 0x0
-#define SBI_OPCODE_SHIFT 8
-#define SBI_OPCODE_MASK (0xff << SBI_OPCODE_SHIFT)
-#define SBI_CMD_IORD 2
-#define SBI_CMD_IOWR 3
-#define SBI_CMD_CRRD 6
-#define SBI_CMD_CRWR 7
-#define SBI_ADDR_OFFSET_SHIFT 16
-#define SBI_ADDR_OFFSET_MASK (0xffff << SBI_ADDR_OFFSET_SHIFT)
-
struct intel_vgpu_sbi_register {
unsigned int offset;
u32 value;
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 89147d33168c..2031b97de2b7 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -35,6 +35,7 @@
#include <drm/display/drm_dp.h>
#include "display/intel_dp_aux_regs.h"
+#include "display/intel_gmbus.h"
#include "display/intel_gmbus_regs.h"
#include "gvt.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index f9f7ef131371..a8079cfa8e1d 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -39,9 +39,11 @@
#include "i915_drv.h"
#include "i915_pvinfo.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "display/i9xx_plane_regs.h"
#include "display/intel_cursor_regs.h"
+#include "display/intel_display_core.h"
#include "display/intel_sprite_regs.h"
#include "display/skl_universal_plane_regs.h"
@@ -154,7 +156,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 tiled, int stride_mask, int bpp)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(display, pipe)) & stride_mask;
u32 stride = stride_reg;
@@ -211,7 +213,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
u32 val, fmt;
int pipe;
@@ -342,7 +344,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
u32 val, mode, index;
u32 alpha_plane, alpha_force;
int pipe;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 1344e6d20a34..f446f73f0fe2 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -40,6 +40,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#include "intel_mchbar_regs.h"
@@ -47,6 +48,7 @@
#include "display/i9xx_plane_regs.h"
#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
+#include "display/intel_display_core.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
@@ -55,6 +57,7 @@
#include "display/intel_fdi_regs.h"
#include "display/intel_pps_regs.h"
#include "display/intel_psr_regs.h"
+#include "display/intel_sbi_regs.h"
#include "display/intel_sprite_regs.h"
#include "display/intel_vga_regs.h"
#include "display/skl_universal_plane_regs.h"
@@ -658,7 +661,7 @@ static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
enum port port;
u32 dp_br, link_m, link_n, htotal, vtotal;
@@ -1022,7 +1025,7 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
u32 pipe = DSPSURF_TO_PIPE(display, offset);
int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
@@ -1064,7 +1067,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
enum pipe pipe = REG_50080_TO_PIPE(offset);
enum plane_id plane = REG_50080_TO_PLANE(offset);
int event = SKL_FLIP_EVENT(pipe, plane);
@@ -1412,12 +1415,12 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
- SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
- unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
- SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
- vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
- sbi_offset);
+ if ((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_CTL_OP_MASK) == SBI_CTL_OP_CRRD) {
+ unsigned int sbi_offset;
+
+ sbi_offset = REG_FIELD_GET(SBI_ADDR_MASK, vgpu_vreg_t(vgpu, SBI_ADDR));
+
+ vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu, sbi_offset);
}
read_vreg(vgpu, offset, p_data, bytes);
return 0;
@@ -1431,21 +1434,20 @@ static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
- data |= SBI_READY;
+ data &= ~SBI_STATUS_MASK;
+ data |= SBI_STATUS_READY;
- data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
+ data &= ~SBI_RESPONSE_MASK;
data |= SBI_RESPONSE_SUCCESS;
vgpu_vreg(vgpu, offset) = data;
- if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
- SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
- unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
- SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
+ if ((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_CTL_OP_MASK) == SBI_CTL_OP_CRWR) {
+ unsigned int sbi_offset;
+
+ sbi_offset = REG_FIELD_GET(SBI_ADDR_MASK, vgpu_vreg_t(vgpu, SBI_ADDR));
- write_virtual_sbi_register(vgpu, sbi_offset,
- vgpu_vreg_t(vgpu, SBI_DATA));
+ write_virtual_sbi_register(vgpu, sbi_offset, vgpu_vreg_t(vgpu, SBI_DATA));
}
return 0;
}
@@ -2200,7 +2202,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
static int init_generic_mmio_info(struct intel_gvt *gvt)
{
struct drm_i915_private *dev_priv = gvt->gt->i915;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
int ret;
MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 336d079c4207..a956da68e6bd 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -33,6 +33,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gvt.h"
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index e16e0d4c9534..da1135fa7cda 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -36,6 +36,7 @@
#include <linux/vmalloc.h>
#include "i915_drv.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gvt.h"
#include "display/bxt_dpio_phy_regs.h"
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 273bc43468a0..c6263c6d3384 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -51,6 +51,7 @@
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
#include "display/intel_crtc.h"
+#include "display/intel_display_core.h"
#include "display/intel_display_driver.h"
#include "display/intel_dmc.h"
#include "display/intel_dp.h"
@@ -58,9 +59,11 @@
#include "display/intel_encoder.h"
#include "display/intel_fbdev.h"
#include "display/intel_hotplug.h"
+#include "display/intel_opregion.h"
#include "display/intel_overlay.h"
#include "display/intel_pch_refclk.h"
#include "display/intel_pps.h"
+#include "display/intel_sbi.h"
#include "display/intel_sprite_uapi.h"
#include "display/skl_watermark.h"
@@ -107,8 +110,7 @@
#include "intel_pci_config.h"
#include "intel_pcode.h"
#include "intel_region_ttm.h"
-#include "intel_sbi.h"
-#include "vlv_sideband.h"
+#include "vlv_iosf_sb.h"
#include "vlv_suspend.h"
static const struct drm_driver i915_drm_driver;
@@ -133,10 +135,6 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
if (dev_priv->wq == NULL)
goto out_err;
- dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
- if (dev_priv->display.hotplug.dp_wq == NULL)
- goto out_free_wq;
-
/*
* The unordered i915 workqueue should be used for all work
* scheduling that do not require running in order, which used
@@ -145,12 +143,10 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
*/
dev_priv->unordered_wq = alloc_workqueue("i915-unordered", 0, 0);
if (dev_priv->unordered_wq == NULL)
- goto out_free_dp_wq;
+ goto out_free_wq;
return 0;
-out_free_dp_wq:
- destroy_workqueue(dev_priv->display.hotplug.dp_wq);
out_free_wq:
destroy_workqueue(dev_priv->wq);
out_err:
@@ -162,7 +158,6 @@ out_err:
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
destroy_workqueue(dev_priv->unordered_wq);
- destroy_workqueue(dev_priv->display.hotplug.dp_wq);
destroy_workqueue(dev_priv->wq);
}
@@ -222,7 +217,7 @@ static void sanitize_gpu(struct drm_i915_private *i915)
*/
static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
int ret = 0;
if (i915_inject_probe_failure(dev_priv))
@@ -236,7 +231,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
spin_lock_init(&dev_priv->gpu_error.lock);
- intel_sbi_init(dev_priv);
+ intel_sbi_init(display);
vlv_iosf_sb_init(dev_priv);
mutex_init(&dev_priv->sb_lock);
@@ -285,7 +280,7 @@ err_workqueues:
*/
static void i915_driver_late_release(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
intel_irq_fini(dev_priv);
intel_power_domains_cleanup(display);
@@ -297,9 +292,11 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
mutex_destroy(&dev_priv->sb_lock);
vlv_iosf_sb_fini(dev_priv);
- intel_sbi_fini(dev_priv);
+ intel_sbi_fini(display);
i915_params_free(&dev_priv->params);
+
+ intel_display_device_remove(display);
}
/**
@@ -313,7 +310,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
*/
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt;
int ret, i;
@@ -460,7 +457,7 @@ static int i915_pcode_init(struct drm_i915_private *i915)
*/
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
@@ -571,7 +568,9 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
* Fill the dram structure to get the system dram info. This will be
* used for memory latency calculation.
*/
- intel_dram_detect(dev_priv);
+ ret = intel_dram_detect(dev_priv);
+ if (ret)
+ goto err_opregion;
intel_bw_init_hw(display);
@@ -599,7 +598,7 @@ err_perf:
*/
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
i915_perf_fini(dev_priv);
@@ -619,7 +618,7 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
*/
static int i915_driver_register(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt;
unsigned int i;
int ret;
@@ -670,7 +669,7 @@ static int i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt;
unsigned int i;
@@ -742,6 +741,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct intel_device_info *match_info =
(struct intel_device_info *)ent->driver_data;
struct drm_i915_private *i915;
+ struct intel_display *display;
i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
struct drm_i915_private, drm);
@@ -756,7 +756,11 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, match_info);
- intel_display_device_probe(pdev);
+ display = intel_display_device_probe(pdev);
+ if (IS_ERR(display))
+ return ERR_CAST(display);
+
+ i915->display = display;
return i915;
}
@@ -790,7 +794,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return PTR_ERR(i915);
}
- display = &i915->display;
+ display = i915->display;
ret = i915_driver_early_probe(i915);
if (ret < 0)
@@ -882,7 +886,7 @@ out_pci_disable:
void i915_driver_remove(struct drm_i915_private *i915)
{
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
@@ -915,7 +919,6 @@ void i915_driver_remove(struct drm_i915_private *i915)
static void i915_driver_release(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_display *display = &dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
intel_wakeref_t wakeref;
@@ -938,8 +941,6 @@ static void i915_driver_release(struct drm_device *dev)
intel_runtime_pm_driver_release(rpm);
i915_driver_late_release(dev_priv);
-
- intel_display_device_remove(display);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@ -969,7 +970,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
void i915_driver_shutdown(struct drm_i915_private *i915)
{
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
disable_rpm_wakeref_asserts(&i915->runtime_pm);
intel_runtime_pm_disable(&i915->runtime_pm);
@@ -991,10 +992,10 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
if (HAS_DISPLAY(i915))
intel_display_driver_suspend_access(display);
- intel_encoder_suspend_all(&i915->display);
- intel_encoder_shutdown_all(&i915->display);
+ intel_encoder_suspend_all(display);
+ intel_encoder_shutdown_all(display);
- intel_dmc_suspend(&i915->display);
+ intel_dmc_suspend(display);
i915_gem_suspend(i915);
@@ -1049,7 +1050,7 @@ static int i915_drm_prepare(struct drm_device *dev)
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
pci_power_t opregion_target_state;
@@ -1074,7 +1075,7 @@ static int i915_drm_suspend(struct drm_device *dev)
if (HAS_DISPLAY(dev_priv))
intel_display_driver_suspend_access(display);
- intel_encoder_suspend_all(&dev_priv->display);
+ intel_encoder_suspend_all(display);
/* Must be called before GGTT is suspended. */
intel_dpt_suspend(display);
@@ -1099,7 +1100,7 @@ static int i915_drm_suspend(struct drm_device *dev)
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct intel_gt *gt;
@@ -1171,7 +1172,7 @@ int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt;
int ret, i;
@@ -1256,7 +1257,7 @@ static int i915_drm_resume(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_gt *gt;
int ret, i;
@@ -1488,7 +1489,7 @@ static int i915_pm_restore(struct device *kdev)
static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
@@ -1587,7 +1588,7 @@ static int intel_runtime_suspend(struct device *kdev)
static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d0e1980dcba2..4e4e89746aa6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -32,13 +32,11 @@
#include <uapi/drm/i915_drm.h>
+#include <linux/pci.h>
#include <linux/pm_qos.h>
#include <drm/ttm/ttm_device.h>
-#include "display/intel_display_limits.h"
-#include "display/intel_display_core.h"
-
#include "gem/i915_gem_context_types.h"
#include "gem/i915_gem_shrinker.h"
#include "gem/i915_gem_stolen.h"
@@ -62,11 +60,11 @@
#include "intel_step.h"
#include "intel_uncore.h"
+struct dram_info;
struct drm_i915_clock_gating_funcs;
-struct vlv_s0ix_state;
+struct intel_display;
struct intel_pxp;
-
-#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
+struct vlv_s0ix_state;
/* Data Stolen Memory (DSM) aka "i915 stolen memory" */
struct i915_dsm {
@@ -177,7 +175,7 @@ struct i915_selftest_stash {
struct drm_i915_private {
struct drm_device drm;
- struct intel_display display;
+ struct intel_display *display;
/* FIXME: Device release actions should all be moved to drmm_ */
bool do_release;
@@ -224,12 +222,10 @@ struct drm_i915_private {
bool irqs_enabled;
- /* LPT/WPT IOSF sideband protection */
- struct mutex sbi_lock;
-
/* VLV/CHV IOSF sideband */
struct {
struct mutex lock; /* protect sideband access */
+ unsigned long locked_unit_mask;
struct pm_qos_request qos;
} vlv_iosf_sb;
@@ -285,25 +281,7 @@ struct drm_i915_private {
u32 suspend_count;
struct vlv_s0ix_state *vlv_s0ix_state;
- struct dram_info {
- bool wm_lv_0_adjust_needed;
- u8 num_channels;
- bool symmetric_memory;
- enum intel_dram_type {
- INTEL_DRAM_UNKNOWN,
- INTEL_DRAM_DDR3,
- INTEL_DRAM_DDR4,
- INTEL_DRAM_LPDDR3,
- INTEL_DRAM_LPDDR4,
- INTEL_DRAM_DDR5,
- INTEL_DRAM_LPDDR5,
- INTEL_DRAM_GDDR,
- INTEL_DRAM_GDDR_ECC,
- __INTEL_DRAM_TYPE_MAX,
- } type;
- u8 num_qgv_points;
- u8 num_psf_gv_points;
- } dram_info;
+ const struct dram_info *dram_info;
struct intel_runtime_pm runtime_pm;
@@ -374,14 +352,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
return i915->gt[0];
}
-#define rb_to_uabi_engine(rb) \
- rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
-
-#define for_each_uabi_engine(engine__, i915__) \
- for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
- (engine__); \
- (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
-
#define INTEL_INFO(i915) ((i915)->__info)
#define RUNTIME_INFO(i915) (&(i915)->__runtime)
#define DRIVER_CAPS(i915) (&(i915)->caps)
@@ -590,29 +560,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_GEN9_LP(i915) (IS_BROXTON(i915) || IS_GEMINILAKE(i915))
#define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_GEN9_LP(i915))
-#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
-#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
-
-#define __ENGINE_INSTANCES_MASK(mask, first, count) ({ \
- unsigned int first__ = (first); \
- unsigned int count__ = (count); \
- ((mask) & GENMASK(first__ + count__ - 1, first__)) >> first__; \
-})
-
-#define ENGINE_INSTANCES_MASK(gt, first, count) \
- __ENGINE_INSTANCES_MASK((gt)->info.engine_mask, first, count)
-
-#define RCS_MASK(gt) \
- ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS)
-#define BCS_MASK(gt) \
- ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS)
-#define VDBOX_MASK(gt) \
- ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
-#define VEBOX_MASK(gt) \
- ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
-#define CCS_MASK(gt) \
- ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
-
#define HAS_MEDIA_RATIO_MODE(i915) (INTEL_INFO(i915)->has_media_ratio_mode)
/*
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 82e9d289398c..20b3cb29cfff 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -134,4 +134,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
+#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index be8149e46281..6fcda6d7b5b7 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -16,7 +16,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *i915 = to_i915(dev);
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
struct pci_dev *pdev = to_pci_dev(dev->dev);
const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
drm_i915_getparam_t *param = data;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 568525d49428..0e4b832dff84 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -2067,7 +2067,7 @@ static struct i915_gpu_coredump *
__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
{
struct drm_i915_private *i915 = gt->i915;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
struct i915_gpu_coredump *error;
/* Check if GPU capture has been disabled */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 95042879bec4..191ed8bb1d9c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -33,6 +33,7 @@
#include <drm/drm_drv.h>
+#include "display/intel_display_core.h"
#include "display/intel_display_irq.h"
#include "display/intel_hotplug.h"
#include "display/intel_hotplug_irq.h"
@@ -230,7 +231,7 @@ out:
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -324,7 +325,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -418,7 +419,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
static irqreturn_t ilk_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
@@ -507,7 +508,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
u32 master_ctl;
@@ -558,7 +559,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
struct intel_gt *gt = to_gt(i915);
u32 master_ctl;
@@ -616,7 +617,7 @@ static inline void dg1_master_intr_enable(void __iomem * const regs)
static irqreturn_t dg1_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = arg;
- struct intel_display *display = &i915->display;
+ struct intel_display *display = i915->display;
struct intel_gt *gt = to_gt(i915);
void __iomem * const regs = intel_uncore_regs(gt->uncore);
u32 master_tile_ctl, master_ctl;
@@ -660,7 +661,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
static void ilk_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
gen2_irq_reset(uncore, DE_IRQ_REGS);
@@ -681,7 +682,7 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
@@ -693,7 +694,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
gen8_master_intr_disable(intel_uncore_regs(uncore));
@@ -705,7 +706,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
@@ -720,7 +721,7 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
static void dg1_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_gt *gt;
unsigned int i;
@@ -740,7 +741,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
@@ -755,7 +756,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
gen5_gt_irq_postinstall(to_gt(dev_priv));
@@ -764,7 +765,7 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
gen5_gt_irq_postinstall(to_gt(dev_priv));
@@ -776,7 +777,7 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
gen8_gt_irq_postinstall(to_gt(dev_priv));
gen8_de_irq_postinstall(display);
@@ -786,7 +787,7 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
@@ -802,7 +803,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
struct intel_gt *gt;
@@ -821,7 +822,7 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
gen8_gt_irq_postinstall(to_gt(dev_priv));
@@ -894,7 +895,7 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
static void i915_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
i9xx_display_irq_reset(display);
@@ -906,7 +907,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -941,7 +942,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -996,7 +997,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
static void i965_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
i9xx_display_irq_reset(display);
@@ -1027,7 +1028,7 @@ static u32 i965_error_mask(struct drm_i915_private *i915)
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -1059,7 +1060,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -1152,71 +1153,62 @@ void intel_irq_fini(struct drm_i915_private *i915)
static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
{
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv))
- return cherryview_irq_handler;
- else if (IS_VALLEYVIEW(dev_priv))
- return valleyview_irq_handler;
- else if (GRAPHICS_VER(dev_priv) == 4)
- return i965_irq_handler;
- else
- return i915_irq_handler;
- } else {
- if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
- return dg1_irq_handler;
- else if (GRAPHICS_VER(dev_priv) >= 11)
- return gen11_irq_handler;
- else if (GRAPHICS_VER(dev_priv) >= 8)
- return gen8_irq_handler;
- else
- return ilk_irq_handler;
- }
+ if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
+ return dg1_irq_handler;
+ else if (GRAPHICS_VER(dev_priv) >= 11)
+ return gen11_irq_handler;
+ else if (IS_CHERRYVIEW(dev_priv))
+ return cherryview_irq_handler;
+ else if (GRAPHICS_VER(dev_priv) >= 8)
+ return gen8_irq_handler;
+ else if (IS_VALLEYVIEW(dev_priv))
+ return valleyview_irq_handler;
+ else if (GRAPHICS_VER(dev_priv) >= 5)
+ return ilk_irq_handler;
+ else if (GRAPHICS_VER(dev_priv) == 4)
+ return i965_irq_handler;
+ else
+ return i915_irq_handler;
}
static void intel_irq_reset(struct drm_i915_private *dev_priv)
{
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_irq_reset(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_irq_reset(dev_priv);
- else if (GRAPHICS_VER(dev_priv) == 4)
- i965_irq_reset(dev_priv);
- else
- i915_irq_reset(dev_priv);
- } else {
- if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
- dg1_irq_reset(dev_priv);
- else if (GRAPHICS_VER(dev_priv) >= 11)
- gen11_irq_reset(dev_priv);
- else if (GRAPHICS_VER(dev_priv) >= 8)
- gen8_irq_reset(dev_priv);
- else
- ilk_irq_reset(dev_priv);
- }
+ if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
+ dg1_irq_reset(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 11)
+ gen11_irq_reset(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv))
+ cherryview_irq_reset(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 8)
+ gen8_irq_reset(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_irq_reset(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 5)
+ ilk_irq_reset(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) == 4)
+ i965_irq_reset(dev_priv);
+ else
+ i915_irq_reset(dev_priv);
}
static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
{
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_irq_postinstall(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_irq_postinstall(dev_priv);
- else if (GRAPHICS_VER(dev_priv) == 4)
- i965_irq_postinstall(dev_priv);
- else
- i915_irq_postinstall(dev_priv);
- } else {
- if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
- dg1_irq_postinstall(dev_priv);
- else if (GRAPHICS_VER(dev_priv) >= 11)
- gen11_irq_postinstall(dev_priv);
- else if (GRAPHICS_VER(dev_priv) >= 8)
- gen8_irq_postinstall(dev_priv);
- else
- ilk_irq_postinstall(dev_priv);
- }
+ if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
+ dg1_irq_postinstall(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 11)
+ gen11_irq_postinstall(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv))
+ cherryview_irq_postinstall(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 8)
+ gen8_irq_postinstall(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_irq_postinstall(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) >= 5)
+ ilk_irq_postinstall(dev_priv);
+ else if (GRAPHICS_VER(dev_priv) == 4)
+ i965_irq_postinstall(dev_priv);
+ else
+ i915_irq_postinstall(dev_priv);
}
/**
@@ -1265,7 +1257,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display = dev_priv->display;
int irq = to_pci_dev(dev_priv->drm.dev)->irq;
if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled))
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 21006c7f615c..b2e311f4791a 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -663,7 +663,6 @@ static const struct intel_device_info dg1_info = {
DGFX_FEATURES,
.__runtime.graphics.ip.rel = 10,
PLATFORM(INTEL_DG1),
- .require_force_probe = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
BIT(VCS0) | BIT(VCS2),
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2e4190da3e0d..03b895897f60 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -144,10 +144,6 @@
#define GEN6_STOLEN_RESERVED_ENABLE (1 << 0)
#define GEN11_STOLEN_RESERVED_ADDR_MASK (0xFFFFFFFFFFFULL << 20)
-#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
-#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
-#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL)
-
/*
* Reset registers
*/
@@ -187,46 +183,6 @@
/* DPIO registers */
#define DPIO_DEVFN 0
-#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
-#define DPIO_MODSEL1 (1 << 3) /* if ref clk b == 27 */
-#define DPIO_MODSEL0 (1 << 2) /* if ref clk a == 27 */
-#define DPIO_SFR_BYPASS (1 << 1)
-#define DPIO_CMNRST (1 << 0)
-
-#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
-#define MIPIO_RST_CTRL (1 << 2)
-
-#define _BXT_PHY_CTL_DDI_A 0x64C00
-#define _BXT_PHY_CTL_DDI_B 0x64C10
-#define _BXT_PHY_CTL_DDI_C 0x64C20
-#define BXT_PHY_CMNLANE_POWERDOWN_ACK (1 << 10)
-#define BXT_PHY_LANE_POWERDOWN_ACK (1 << 9)
-#define BXT_PHY_LANE_ENABLED (1 << 8)
-#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \
- _BXT_PHY_CTL_DDI_B)
-
-#define _PHY_CTL_FAMILY_DDI 0x64C90
-#define _PHY_CTL_FAMILY_EDP 0x64C80
-#define _PHY_CTL_FAMILY_DDI_C 0x64CA0
-#define COMMON_RESET_DIS (1 << 31)
-#define BXT_PHY_CTL_FAMILY(phy) \
- _MMIO(_PICK_EVEN_2RANGES(phy, 1, \
- _PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \
- _PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C))
-
-/* UAIMI scratch pad register 1 */
-#define UAIMI_SPR1 _MMIO(0x4F074)
-/* SKL VccIO mask */
-#define SKL_VCCIO_MASK 0x1
-/* SKL balance leg register */
-#define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C)
-/* I_boost values */
-#define BALANCE_LEG_SHIFT(port) (8 + 3 * (port))
-#define BALANCE_LEG_MASK(port) (7 << (8 + 3 * (port)))
-/* Balance leg disable bits */
-#define BALANCE_LEG_DISABLE_SHIFT 23
-#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
-
/*
* Fence registers
* [0-7] @ 0x2000 gen2,gen3
@@ -372,16 +328,6 @@
#define GEN7_MEDIA_MAX_REQ_COUNT _MMIO(0x4070)
#define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074)
-#define ILK_GTT_FAULT _MMIO(0x44040) /* ilk/snb */
-#define GTT_FAULT_INVALID_GTT_PTE (1 << 7)
-#define GTT_FAULT_INVALID_PTE_DATA (1 << 6)
-#define GTT_FAULT_CURSOR_B_FAULT (1 << 5)
-#define GTT_FAULT_CURSOR_A_FAULT (1 << 4)
-#define GTT_FAULT_SPRITE_B_FAULT (1 << 3)
-#define GTT_FAULT_SPRITE_A_FAULT (1 << 2)
-#define GTT_FAULT_PRIMARY_B_FAULT (1 << 1)
-#define GTT_FAULT_PRIMARY_A_FAULT (1 << 0)
-
#define GEN7_ERR_INT _MMIO(0x44040)
#define ERR_INT_POISON (1 << 31)
#define ERR_INT_INVALID_GTT_PTE (1 << 29)
@@ -413,25 +359,6 @@
#define CLAIM_ER_OVERFLOW REG_BIT(16)
#define CLAIM_ER_CTR_MASK REG_GENMASK(15, 0)
-#define DERRMR _MMIO(0x44050)
-/* Note that HBLANK events are reserved on bdw+ */
-#define DERRMR_PIPEA_SCANLINE (1 << 0)
-#define DERRMR_PIPEA_PRI_FLIP_DONE (1 << 1)
-#define DERRMR_PIPEA_SPR_FLIP_DONE (1 << 2)
-#define DERRMR_PIPEA_VBLANK (1 << 3)
-#define DERRMR_PIPEA_HBLANK (1 << 5)
-#define DERRMR_PIPEB_SCANLINE (1 << 8)
-#define DERRMR_PIPEB_PRI_FLIP_DONE (1 << 9)
-#define DERRMR_PIPEB_SPR_FLIP_DONE (1 << 10)
-#define DERRMR_PIPEB_VBLANK (1 << 11)
-#define DERRMR_PIPEB_HBLANK (1 << 13)
-/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
-#define DERRMR_PIPEC_SCANLINE (1 << 14)
-#define DERRMR_PIPEC_PRI_FLIP_DONE (1 << 15)
-#define DERRMR_PIPEC_SPR_FLIP_DONE (1 << 20)
-#define DERRMR_PIPEC_VBLANK (1 << 21)
-#define DERRMR_PIPEC_HBLANK (1 << 22)
-
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
@@ -458,11 +385,6 @@
#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
#define VLV_PCBR_ADDR_SHIFT 12
-#define VLV_IRQ_REGS I915_IRQ_REGS(VLV_IMR, \
- VLV_IER, \
- VLV_IIR)
-
-#define DISPLAY_PLANE_FLIP_PENDING(plane) (1 << (11 - (plane))) /* A and B only */
#define EIR _MMIO(0x20b0)
#define EMR _MMIO(0x20b4)
#define ESR _MMIO(0x20b8)
@@ -475,16 +397,6 @@
#define GEN2_ERROR_REGS I915_ERROR_REGS(EMR, EIR)
-#define VLV_EIR _MMIO(VLV_DISPLAY_BASE + 0x20b0)
-#define VLV_EMR _MMIO(VLV_DISPLAY_BASE + 0x20b4)
-#define VLV_ESR _MMIO(VLV_DISPLAY_BASE + 0x20b8)
-#define VLV_ERROR_GUNIT_TLB_DATA (1 << 6)
-#define VLV_ERROR_GUNIT_TLB_PTE (1 << 5)
-#define VLV_ERROR_PAGE_TABLE (1 << 4)
-#define VLV_ERROR_CLAIM (1 << 0)
-
-#define VLV_ERROR_REGS I915_ERROR_REGS(VLV_EMR, VLV_EIR)
-
#define INSTPM _MMIO(0x20c0)
#define INSTPM_SELF_EN (1 << 12) /* 915GM only */
#define INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
@@ -509,23 +421,6 @@
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */
-#define _MBUS_ABOX0_CTL 0x45038
-#define _MBUS_ABOX1_CTL 0x45048
-#define _MBUS_ABOX2_CTL 0x4504C
-#define MBUS_ABOX_CTL(x) \
- _MMIO(_PICK_EVEN_2RANGES(x, 2, \
- _MBUS_ABOX0_CTL, _MBUS_ABOX1_CTL, \
- _MBUS_ABOX2_CTL, _MBUS_ABOX2_CTL))
-
-#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
-#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
-#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
-#define MBUS_ABOX_B_CREDIT(x) ((x) << 16)
-#define MBUS_ABOX_BT_CREDIT_POOL2_MASK (0x1F << 8)
-#define MBUS_ABOX_BT_CREDIT_POOL2(x) ((x) << 8)
-#define MBUS_ABOX_BT_CREDIT_POOL1_MASK (0x1F << 0)
-#define MBUS_ABOX_BT_CREDIT_POOL1(x) ((x) << 0)
-
/*
* Make render/texture TLB fetches lower priority than associated data
* fetches. This is not turned on by default.
@@ -700,173 +595,6 @@
#define IVB_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 2)
#define IVB_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 3)
-#define IPS_CTL _MMIO(0x43408)
-#define IPS_ENABLE REG_BIT(31)
-#define IPS_FALSE_COLOR REG_BIT(4)
-
-/*
- * Clock control & power management
- */
-#define _DPLL_A 0x6014
-#define _DPLL_B 0x6018
-#define _CHV_DPLL_C 0x6030
-#define DPLL(dev_priv, pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
- (pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
-
-#define VGA0 _MMIO(0x6000)
-#define VGA1 _MMIO(0x6004)
-#define VGA_PD _MMIO(0x6010)
-#define VGA0_PD_P2_DIV_4 (1 << 7)
-#define VGA0_PD_P1_DIV_2 (1 << 5)
-#define VGA0_PD_P1_SHIFT 0
-#define VGA0_PD_P1_MASK (0x1f << 0)
-#define VGA1_PD_P2_DIV_4 (1 << 15)
-#define VGA1_PD_P1_DIV_2 (1 << 13)
-#define VGA1_PD_P1_SHIFT 8
-#define VGA1_PD_P1_MASK (0x1f << 8)
-#define DPLL_VCO_ENABLE (1 << 31)
-#define DPLL_SDVO_HIGH_SPEED (1 << 30)
-#define DPLL_DVO_2X_MODE (1 << 30)
-#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
-#define DPLL_SYNCLOCK_ENABLE (1 << 29)
-#define DPLL_REF_CLK_ENABLE_VLV (1 << 29)
-#define DPLL_VGA_MODE_DIS (1 << 28)
-#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
-#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
-#define DPLL_MODE_MASK (3 << 26)
-#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
-#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
-#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
-#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
-#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
-#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
-#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
-#define DPLL_LOCK_VLV (1 << 15)
-#define DPLL_INTEGRATED_CRI_CLK_VLV (1 << 14)
-#define DPLL_INTEGRATED_REF_CLK_VLV (1 << 13)
-#define DPLL_SSC_REF_CLK_CHV (1 << 13)
-#define DPLL_PORTC_READY_MASK (0xf << 4)
-#define DPLL_PORTB_READY_MASK (0xf)
-
-#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
-
-/* Additional CHV pll/phy registers */
-#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
-#define DPLL_PORTD_READY_MASK (0xf)
-#define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
-#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2 * (phy) + (ch) + 27))
-#define PHY_LDO_DELAY_0NS 0x0
-#define PHY_LDO_DELAY_200NS 0x1
-#define PHY_LDO_DELAY_600NS 0x2
-#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2 * (phy) + 23))
-#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8 * (phy) + 4 * (ch) + 11))
-#define PHY_CH_SU_PSR 0x1
-#define PHY_CH_DEEP_PSR 0x7
-#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6 * (phy) + 3 * (ch) + 2))
-#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
-#define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
-#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1 << 31) : (1 << 30))
-#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6 - (6 * (phy) + 3 * (ch))))
-#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8 - (6 * (phy) + 3 * (ch) + (spline))))
-
-/*
- * The i830 generation, in LVDS mode, defines P1 as the bit number set within
- * this field (only one bit may be set).
- */
-#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
-#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
-#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
-/* i830, required in DVO non-gang */
-#define PLL_P2_DIVIDE_BY_4 (1 << 23)
-#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
-#define PLL_REF_INPUT_DREFCLK (0 << 13)
-#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
-#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
-#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
-#define PLL_REF_INPUT_MASK (3 << 13)
-#define PLL_LOAD_PULSE_PHASE_SHIFT 9
-/* Ironlake */
-# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
-# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
-# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x) - 1) << 9)
-# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
-# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
-
-/*
- * Parallel to Serial Load Pulse phase selection.
- * Selects the phase for the 10X DPLL clock for the PCIe
- * digital display port. The range is 4 to 13; 10 or more
- * is just a flip delay. The default is 6
- */
-#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
-#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
-/*
- * SDVO multiplier for 945G/GM. Not used on 965.
- */
-#define SDVO_MULTIPLIER_MASK 0x000000ff
-#define SDVO_MULTIPLIER_SHIFT_HIRES 4
-#define SDVO_MULTIPLIER_SHIFT_VGA 0
-
-#define _DPLL_A_MD 0x601c
-#define _DPLL_B_MD 0x6020
-#define _CHV_DPLL_C_MD 0x603c
-#define DPLL_MD(dev_priv, pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
- (pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
-
-/*
- * UDI pixel divider, controlling how many pixels are stuffed into a packet.
- *
- * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
- */
-#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
-#define DPLL_MD_UDI_DIVIDER_SHIFT 24
-/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
-#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
-#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
-/*
- * SDVO/UDI pixel multiplier.
- *
- * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
- * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
- * modes, the bus rate would be below the limits, so SDVO allows for stuffing
- * dummy bytes in the datastream at an increased clock rate, with both sides of
- * the link knowing how many bytes are fill.
- *
- * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
- * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
- * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
- * through an SDVO command.
- *
- * This register field has values of multiplication factor minus 1, with
- * a maximum multiplier of 5 for SDVO.
- */
-#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
-#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
-/*
- * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
- * This best be set to the default value (3) or the CRT won't work. No,
- * I don't entirely understand what this does...
- */
-#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
-#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
-
-#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
-
-#define _FPA0 0x6040
-#define _FPA1 0x6044
-#define _FPB0 0x6048
-#define _FPB1 0x604c
-#define FP0(pipe) _MMIO_PIPE(pipe, _FPA0, _FPB0)
-#define FP1(pipe) _MMIO_PIPE(pipe, _FPA1, _FPB1)
-#define FP_N_DIV_MASK 0x003f0000
-#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
-#define FP_N_DIV_SHIFT 16
-#define FP_M1_DIV_MASK 0x00003f00
-#define FP_M1_DIV_SHIFT 8
-#define FP_M2_DIV_MASK 0x0000003f
-#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
-#define FP_M2_DIV_SHIFT 0
-
#define DPLL_TEST _MMIO(0x606c)
#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
@@ -1000,27 +728,6 @@
#define RAMCLK_GATE_D _MMIO(0x6210) /* CRL only */
#define DEUC _MMIO(0x6214) /* CRL only */
-#define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500)
-#define FW_CSPWRDWNEN (1 << 15)
-
-#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
-
-#define CZCLK_CDCLK_FREQ_RATIO _MMIO(VLV_DISPLAY_BASE + 0x6508)
-#define CDCLK_FREQ_SHIFT 4
-#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
-#define CZCLK_FREQ_MASK 0xf
-
-#define GCI_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x650C)
-#define PFI_CREDIT_63 (9 << 28) /* chv only */
-#define PFI_CREDIT_31 (8 << 28) /* chv only */
-#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */
-#define PFI_CREDIT_RESEND (1 << 27)
-#define VGA_FAST_MODE_DISABLE (1 << 14)
-
-#define GMBUSFREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6510)
-
-#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
-
#define BXT_RP_STATE_CAP _MMIO(0x138170)
#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
@@ -1051,25 +758,11 @@
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
/*
- * Overlay regs
- */
-#define OVADD _MMIO(0x30000)
-#define DOVSTA _MMIO(0x30008)
-#define OC_BUF (0x3 << 20)
-#define OGAMC5 _MMIO(0x30010)
-#define OGAMC4 _MMIO(0x30014)
-#define OGAMC3 _MMIO(0x30018)
-#define OGAMC2 _MMIO(0x3001c)
-#define OGAMC1 _MMIO(0x30020)
-#define OGAMC0 _MMIO(0x30024)
-
-/*
* GEN9 clock gating regs
*/
#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
#define DARBF_GATING_DIS REG_BIT(27)
-#define MTL_PIPEDMC_GATING_DIS_A REG_BIT(15)
-#define MTL_PIPEDMC_GATING_DIS_B REG_BIT(14)
+#define MTL_PIPEDMC_GATING_DIS(pipe) REG_BIT(15 - (pipe))
#define PWM2_GATING_DIS REG_BIT(14)
#define PWM1_GATING_DIS REG_BIT(13)
@@ -1077,577 +770,6 @@
#define TGL_VRH_GATING_DIS REG_BIT(31)
#define DPT_GATING_DIS REG_BIT(22)
-#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
-#define BXT_GMBUS_GATING_DIS (1 << 14)
-#define DG2_DPFC_GATING_DIS REG_BIT(31)
-
-#define GEN9_CLKGATE_DIS_5 _MMIO(0x46540)
-#define DPCE_GATING_DIS REG_BIT(17)
-
-#define _CLKGATE_DIS_PSL_A 0x46520
-#define _CLKGATE_DIS_PSL_B 0x46524
-#define _CLKGATE_DIS_PSL_C 0x46528
-#define DUPS1_GATING_DIS (1 << 15)
-#define DUPS2_GATING_DIS (1 << 19)
-#define DUPS3_GATING_DIS (1 << 23)
-#define CURSOR_GATING_DIS REG_BIT(28)
-#define DPF_GATING_DIS (1 << 10)
-#define DPF_RAM_GATING_DIS (1 << 9)
-#define DPFR_GATING_DIS (1 << 8)
-
-#define CLKGATE_DIS_PSL(pipe) \
- _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B)
-
-#define _CLKGATE_DIS_PSL_EXT_A 0x4654C
-#define _CLKGATE_DIS_PSL_EXT_B 0x46550
-#define PIPEDMC_GATING_DIS REG_BIT(12)
-
-#define CLKGATE_DIS_PSL_EXT(pipe) \
- _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B)
-
-/*
- * Display engine regs
- */
-/* Pipe/transcoder A timing regs */
-#define _TRANS_HTOTAL_A 0x60000
-#define _TRANS_HTOTAL_B 0x61000
-#define TRANS_HTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A)
-#define HTOTAL_MASK REG_GENMASK(31, 16)
-#define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal))
-#define HACTIVE_MASK REG_GENMASK(15, 0)
-#define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay))
-
-#define _TRANS_HBLANK_A 0x60004
-#define _TRANS_HBLANK_B 0x61004
-#define TRANS_HBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A)
-#define HBLANK_END_MASK REG_GENMASK(31, 16)
-#define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end))
-#define HBLANK_START_MASK REG_GENMASK(15, 0)
-#define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start))
-
-#define _TRANS_HSYNC_A 0x60008
-#define _TRANS_HSYNC_B 0x61008
-#define TRANS_HSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A)
-#define HSYNC_END_MASK REG_GENMASK(31, 16)
-#define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end))
-#define HSYNC_START_MASK REG_GENMASK(15, 0)
-#define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start))
-
-#define _TRANS_VTOTAL_A 0x6000c
-#define _TRANS_VTOTAL_B 0x6100c
-#define TRANS_VTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A)
-#define VTOTAL_MASK REG_GENMASK(31, 16)
-#define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal))
-#define VACTIVE_MASK REG_GENMASK(15, 0)
-#define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay))
-
-#define _TRANS_VBLANK_A 0x60010
-#define _TRANS_VBLANK_B 0x61010
-#define TRANS_VBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A)
-#define VBLANK_END_MASK REG_GENMASK(31, 16)
-#define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end))
-#define VBLANK_START_MASK REG_GENMASK(15, 0)
-#define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start))
-
-#define _TRANS_VSYNC_A 0x60014
-#define _TRANS_VSYNC_B 0x61014
-#define TRANS_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A)
-#define VSYNC_END_MASK REG_GENMASK(31, 16)
-#define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end))
-#define VSYNC_START_MASK REG_GENMASK(15, 0)
-#define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start))
-
-#define _PIPEASRC 0x6001c
-#define _PIPEBSRC 0x6101c
-#define PIPESRC(dev_priv, pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC)
-#define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16)
-#define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w))
-#define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0)
-#define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h))
-
-#define _BCLRPAT_A 0x60020
-#define _BCLRPAT_B 0x61020
-#define BCLRPAT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _BCLRPAT_A)
-
-#define _TRANS_VSYNCSHIFT_A 0x60028
-#define _TRANS_VSYNCSHIFT_B 0x61028
-#define TRANS_VSYNCSHIFT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNCSHIFT_A)
-
-#define _TRANS_MULT_A 0x6002c
-#define _TRANS_MULT_B 0x6102c
-#define TRANS_MULT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A)
-
-/* Hotplug control (945+ only) */
-#define PORT_HOTPLUG_EN(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
-#define PORTB_HOTPLUG_INT_EN (1 << 29)
-#define PORTC_HOTPLUG_INT_EN (1 << 28)
-#define PORTD_HOTPLUG_INT_EN (1 << 27)
-#define SDVOB_HOTPLUG_INT_EN (1 << 26)
-#define SDVOC_HOTPLUG_INT_EN (1 << 25)
-#define TV_HOTPLUG_INT_EN (1 << 18)
-#define CRT_HOTPLUG_INT_EN (1 << 9)
-#define HOTPLUG_INT_EN_MASK (PORTB_HOTPLUG_INT_EN | \
- PORTC_HOTPLUG_INT_EN | \
- PORTD_HOTPLUG_INT_EN | \
- SDVOC_HOTPLUG_INT_EN | \
- SDVOB_HOTPLUG_INT_EN | \
- CRT_HOTPLUG_INT_EN)
-#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
-#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
-/* must use period 64 on GM45 according to docs */
-#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
-#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
-#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
-#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
-#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
-#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
-#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
-#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
-#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
-#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
-#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
-#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
-
-#define PORT_HOTPLUG_STAT(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
-/* HDMI/DP bits are g4x+ */
-#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
-#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
-#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
-#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
-#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
-#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
-#define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19)
-#define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19)
-#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
-#define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17)
-#define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17)
-/* CRT/TV common between gen3+ */
-#define CRT_HOTPLUG_INT_STATUS (1 << 11)
-#define TV_HOTPLUG_INT_STATUS (1 << 10)
-#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
-#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
-#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
-#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
-#define DP_AUX_CHANNEL_D_INT_STATUS_G4X (1 << 6)
-#define DP_AUX_CHANNEL_C_INT_STATUS_G4X (1 << 5)
-#define DP_AUX_CHANNEL_B_INT_STATUS_G4X (1 << 4)
-#define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (7 << 4)
-
-/* SDVO is different across gen3/4 */
-#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
-#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
-/*
- * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
- * since reality corrobates that they're the same as on gen3. But keep these
- * bits here (and the comment!) to help any other lost wanderers back onto the
- * right tracks.
- */
-#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
-#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
-#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
-#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
-#define HOTPLUG_INT_STATUS_G4X (CRT_HOTPLUG_INT_STATUS | \
- SDVOB_HOTPLUG_INT_STATUS_G4X | \
- SDVOC_HOTPLUG_INT_STATUS_G4X | \
- PORTB_HOTPLUG_INT_STATUS | \
- PORTC_HOTPLUG_INT_STATUS | \
- PORTD_HOTPLUG_INT_STATUS)
-
-#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
- SDVOB_HOTPLUG_INT_STATUS_I915 | \
- SDVOC_HOTPLUG_INT_STATUS_I915 | \
- PORTB_HOTPLUG_INT_STATUS | \
- PORTC_HOTPLUG_INT_STATUS | \
- PORTD_HOTPLUG_INT_STATUS)
-
-/* SDVO and HDMI port control.
- * The same register may be used for SDVO or HDMI */
-#define _GEN3_SDVOB 0x61140
-#define _GEN3_SDVOC 0x61160
-#define GEN3_SDVOB _MMIO(_GEN3_SDVOB)
-#define GEN3_SDVOC _MMIO(_GEN3_SDVOC)
-#define GEN4_HDMIB GEN3_SDVOB
-#define GEN4_HDMIC GEN3_SDVOC
-#define VLV_HDMIB _MMIO(VLV_DISPLAY_BASE + 0x61140)
-#define VLV_HDMIC _MMIO(VLV_DISPLAY_BASE + 0x61160)
-#define CHV_HDMID _MMIO(VLV_DISPLAY_BASE + 0x6116C)
-#define PCH_SDVOB _MMIO(0xe1140)
-#define PCH_HDMIB PCH_SDVOB
-#define PCH_HDMIC _MMIO(0xe1150)
-#define PCH_HDMID _MMIO(0xe1160)
-
-#define PORT_DFT_I9XX _MMIO(0x61150)
-#define DC_BALANCE_RESET (1 << 25)
-#define PORT_DFT2_G4X(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
-#define DC_BALANCE_RESET_VLV (1 << 31)
-#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
-#define PIPE_C_SCRAMBLE_RESET REG_BIT(14) /* chv */
-#define PIPE_B_SCRAMBLE_RESET REG_BIT(1)
-#define PIPE_A_SCRAMBLE_RESET REG_BIT(0)
-
-/* Gen 3 SDVO bits: */
-#define SDVO_ENABLE (1 << 31)
-#define SDVO_PIPE_SEL_SHIFT 30
-#define SDVO_PIPE_SEL_MASK (1 << 30)
-#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
-#define SDVO_STALL_SELECT (1 << 29)
-#define SDVO_INTERRUPT_ENABLE (1 << 26)
-/*
- * 915G/GM SDVO pixel multiplier.
- * Programmed value is multiplier - 1, up to 5x.
- * \sa DPLL_MD_UDI_MULTIPLIER_MASK
- */
-#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
-#define SDVO_PORT_MULTIPLY_SHIFT 23
-#define SDVO_PHASE_SELECT_MASK (15 << 19)
-#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
-#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
-#define SDVOC_GANG_MODE (1 << 16) /* Port C only */
-#define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */
-#define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */
-#define SDVO_DETECTED (1 << 2)
-/* Bits to be preserved when writing */
-#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
- SDVO_INTERRUPT_ENABLE)
-#define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
-
-/* Gen 4 SDVO/HDMI bits: */
-#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
-#define SDVO_COLOR_FORMAT_MASK (7 << 26)
-#define SDVO_ENCODING_SDVO (0 << 10)
-#define SDVO_ENCODING_HDMI (2 << 10)
-#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
-#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */
-#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */
-#define HDMI_AUDIO_ENABLE (1 << 6) /* HDMI only */
-/* VSYNC/HSYNC bits new with 965, default is to be set */
-#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
-#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
-
-/* Gen 5 (IBX) SDVO/HDMI bits: */
-#define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */
-#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
-
-/* Gen 6 (CPT) SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_SHIFT_CPT 29
-#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
-#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
-
-/* CHV SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_SHIFT_CHV 24
-#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
-#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
-
-/* Video Data Island Packet control */
-#define VIDEO_DIP_DATA _MMIO(0x61178)
-/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
- * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
- * of the infoframe structure specified by CEA-861. */
-#define VIDEO_DIP_DATA_SIZE 32
-#define VIDEO_DIP_ASYNC_DATA_SIZE 36
-#define VIDEO_DIP_GMP_DATA_SIZE 36
-#define VIDEO_DIP_VSC_DATA_SIZE 36
-#define VIDEO_DIP_PPS_DATA_SIZE 132
-#define VIDEO_DIP_CTL _MMIO(0x61170)
-/* Pre HSW: */
-#define VIDEO_DIP_ENABLE (1 << 31)
-#define VIDEO_DIP_PORT(port) ((port) << 29)
-#define VIDEO_DIP_PORT_MASK (3 << 29)
-#define VIDEO_DIP_ENABLE_GCP (1 << 25) /* ilk+ */
-#define VIDEO_DIP_ENABLE_AVI (1 << 21)
-#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
-#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) /* ilk+ */
-#define VIDEO_DIP_ENABLE_SPD (8 << 21)
-#define VIDEO_DIP_SELECT_AVI (0 << 19)
-#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
-#define VIDEO_DIP_SELECT_GAMUT (2 << 19)
-#define VIDEO_DIP_SELECT_SPD (3 << 19)
-#define VIDEO_DIP_SELECT_MASK (3 << 19)
-#define VIDEO_DIP_FREQ_ONCE (0 << 16)
-#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
-#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
-#define VIDEO_DIP_FREQ_MASK (3 << 16)
-/* HSW and later: */
-#define VIDEO_DIP_ENABLE_DRM_GLK (1 << 28)
-#define PSR_VSC_BIT_7_SET (1 << 27)
-#define VSC_SELECT_MASK (0x3 << 25)
-#define VSC_SELECT_SHIFT 25
-#define VSC_DIP_HW_HEA_DATA (0 << 25)
-#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
-#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
-#define VSC_DIP_SW_HEA_DATA (3 << 25)
-#define VDIP_ENABLE_PPS (1 << 24)
-#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
-#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
-#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
-#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
-#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
-#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
-/* ADL and later: */
-#define VIDEO_DIP_ENABLE_AS_ADL REG_BIT(23)
-
-#define PCH_GTC_CTL _MMIO(0xe7000)
-#define PCH_GTC_ENABLE (1 << 31)
-
-/* Display Port */
-#define DP_A _MMIO(0x64000) /* eDP */
-#define DP_B _MMIO(0x64100)
-#define DP_C _MMIO(0x64200)
-#define DP_D _MMIO(0x64300)
-#define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100)
-#define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200)
-#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
-#define DP_PORT_EN REG_BIT(31)
-#define DP_PIPE_SEL_MASK REG_GENMASK(30, 30)
-#define DP_PIPE_SEL(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK, (pipe))
-#define DP_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29)
-#define DP_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_IVB, (pipe))
-#define DP_PIPE_SEL_SHIFT_CHV 16
-#define DP_PIPE_SEL_MASK_CHV REG_GENMASK(17, 16)
-#define DP_PIPE_SEL_CHV(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_CHV, (pipe))
-#define DP_LINK_TRAIN_MASK REG_GENMASK(29, 28)
-#define DP_LINK_TRAIN_PAT_1 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 0)
-#define DP_LINK_TRAIN_PAT_2 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 1)
-#define DP_LINK_TRAIN_PAT_IDLE REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 2)
-#define DP_LINK_TRAIN_OFF REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 3)
-#define DP_LINK_TRAIN_MASK_CPT REG_GENMASK(10, 8)
-#define DP_LINK_TRAIN_PAT_1_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 0)
-#define DP_LINK_TRAIN_PAT_2_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 1)
-#define DP_LINK_TRAIN_PAT_IDLE_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 2)
-#define DP_LINK_TRAIN_OFF_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 3)
-#define DP_VOLTAGE_MASK REG_GENMASK(27, 25)
-#define DP_VOLTAGE_0_4 REG_FIELD_PREP(DP_VOLTAGE_MASK, 0)
-#define DP_VOLTAGE_0_6 REG_FIELD_PREP(DP_VOLTAGE_MASK, 1)
-#define DP_VOLTAGE_0_8 REG_FIELD_PREP(DP_VOLTAGE_MASK, 2)
-#define DP_VOLTAGE_1_2 REG_FIELD_PREP(DP_VOLTAGE_MASK, 3)
-#define DP_PRE_EMPHASIS_MASK REG_GENMASK(24, 22)
-#define DP_PRE_EMPHASIS_0 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 0)
-#define DP_PRE_EMPHASIS_3_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 1)
-#define DP_PRE_EMPHASIS_6 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 2)
-#define DP_PRE_EMPHASIS_9_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 3)
-#define DP_PORT_WIDTH_MASK REG_GENMASK(21, 19)
-#define DP_PORT_WIDTH(width) REG_FIELD_PREP(DP_PORT_WIDTH_MASK, (width) - 1)
-#define DP_ENHANCED_FRAMING REG_BIT(18)
-#define EDP_PLL_FREQ_MASK REG_GENMASK(17, 16)
-#define EDP_PLL_FREQ_270MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 0)
-#define EDP_PLL_FREQ_162MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 1)
-#define DP_PORT_REVERSAL REG_BIT(15)
-#define EDP_PLL_ENABLE REG_BIT(14)
-#define DP_CLOCK_OUTPUT_ENABLE REG_BIT(13)
-#define DP_SCRAMBLING_DISABLE REG_BIT(12)
-#define DP_SCRAMBLING_DISABLE_ILK REG_BIT(7)
-#define DP_COLOR_RANGE_16_235 REG_BIT(8)
-#define DP_AUDIO_OUTPUT_ENABLE REG_BIT(6)
-#define DP_SYNC_VS_HIGH REG_BIT(4)
-#define DP_SYNC_HS_HIGH REG_BIT(3)
-#define DP_DETECTED REG_BIT(2)
-
-/*
- * Computing GMCH M and N values for the Display Port link
- *
- * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
- *
- * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
- *
- * The GMCH value is used internally
- *
- * bytes_per_pixel is the number of bytes coming out of the plane,
- * which is after the LUTs, so we want the bytes for our color format.
- * For our current usage, this is always 3, one byte for R, G and B.
- */
-#define _PIPEA_DATA_M_G4X 0x70050
-#define _PIPEB_DATA_M_G4X 0x71050
-#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
-/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
-#define TU_SIZE_MASK REG_GENMASK(30, 25)
-#define TU_SIZE(x) REG_FIELD_PREP(TU_SIZE_MASK, (x) - 1) /* default size 64 */
-#define DATA_LINK_M_N_MASK REG_GENMASK(23, 0)
-#define DATA_LINK_N_MAX (0x800000)
-
-#define _PIPEA_DATA_N_G4X 0x70054
-#define _PIPEB_DATA_N_G4X 0x71054
-#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
-
-/*
- * Computing Link M and N values for the Display Port link
- *
- * Link M / N = pixel_clock / ls_clk
- *
- * (the DP spec calls pixel_clock the 'strm_clk')
- *
- * The Link value is transmitted in the Main Stream
- * Attributes and VB-ID.
- */
-#define _PIPEA_LINK_M_G4X 0x70060
-#define _PIPEB_LINK_M_G4X 0x71060
-#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
-
-#define _PIPEA_LINK_N_G4X 0x70064
-#define _PIPEB_LINK_N_G4X 0x71064
-#define PIPE_LINK_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
-
-/* Pipe A */
-#define _PIPEADSL 0x70000
-#define PIPEDSL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL)
-#define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */
-#define PIPEDSL_LINE_MASK REG_GENMASK(19, 0)
-
-#define _TRANSACONF 0x70008
-#define TRANSCONF(dev_priv, trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF)
-#define TRANSCONF_ENABLE REG_BIT(31)
-#define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */
-#define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */
-#define TRANSCONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */
-#define TRANSCONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */
-#define TRANSCONF_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANSCONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */
-#define TRANSCONF_PIPE_LOCKED REG_BIT(25)
-#define TRANSCONF_FORCE_BORDER REG_BIT(25)
-#define TRANSCONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */
-#define TRANSCONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */
-#define TRANSCONF_GAMMA_MODE_8BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 0)
-#define TRANSCONF_GAMMA_MODE_10BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 1)
-#define TRANSCONF_GAMMA_MODE_12BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */
-#define TRANSCONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */
-#define TRANSCONF_GAMMA_MODE(x) REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */
-#define TRANSCONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */
-#define TRANSCONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 0)
-#define TRANSCONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 4) /* gen4 only */
-#define TRANSCONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 5) /* gen4 only */
-#define TRANSCONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 6)
-#define TRANSCONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 7) /* gen3 only */
-/*
- * ilk+: PF/D=progressive fetch/display, IF/D=interlaced fetch/display,
- * DBL=power saving pixel doubling, PF-ID* requires panel fitter
- */
-#define TRANSCONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */
-#define TRANSCONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */
-#define TRANSCONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 0)
-#define TRANSCONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 1)
-#define TRANSCONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 3)
-#define TRANSCONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */
-#define TRANSCONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */
-#define TRANSCONF_REFRESH_RATE_ALT_ILK REG_BIT(20)
-#define TRANSCONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */
-#define TRANSCONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(TRANSCONF_MSA_TIMING_DELAY_MASK, (x))
-#define TRANSCONF_CXSR_DOWNCLOCK REG_BIT(16)
-#define TRANSCONF_WGC_ENABLE REG_BIT(15) /* vlv/chv only */
-#define TRANSCONF_REFRESH_RATE_ALT_VLV REG_BIT(14)
-#define TRANSCONF_COLOR_RANGE_SELECT REG_BIT(13)
-#define TRANSCONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */
-#define TRANSCONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */
-#define TRANSCONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */
-#define TRANSCONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */
-#define TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */
-#define TRANSCONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */
-#define TRANSCONF_BPC_8 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 0)
-#define TRANSCONF_BPC_10 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 1)
-#define TRANSCONF_BPC_6 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 2)
-#define TRANSCONF_BPC_12 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 3)
-#define TRANSCONF_DITHER_EN REG_BIT(4)
-#define TRANSCONF_DITHER_TYPE_MASK REG_GENMASK(3, 2)
-#define TRANSCONF_DITHER_TYPE_SP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 0)
-#define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1)
-#define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2)
-#define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3)
-#define TRANSCONF_PIXEL_COUNT_SCALING_MASK REG_GENMASK(1, 0)
-#define TRANSCONF_PIXEL_COUNT_SCALING_X4 1
-
-#define _PIPEASTAT 0x70024
-#define PIPESTAT(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT)
-#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
-#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
-#define PIPE_CRC_ERROR_ENABLE (1UL << 29)
-#define PIPE_CRC_DONE_ENABLE (1UL << 28)
-#define PERF_COUNTER2_INTERRUPT_EN (1UL << 27)
-#define PIPE_GMBUS_EVENT_ENABLE (1UL << 27)
-#define PLANE_FLIP_DONE_INT_EN_VLV (1UL << 26)
-#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL << 26)
-#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL << 25)
-#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL << 24)
-#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
-#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL << 22)
-#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
-#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL << 21)
-#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL << 20)
-#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL << 19)
-#define PERF_COUNTER_INTERRUPT_EN (1UL << 19)
-#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL << 18) /* pre-965 */
-#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18) /* 965 or later */
-#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL << 17)
-#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
-#define PIPEA_HBLANK_INT_EN_VLV (1UL << 16)
-#define PIPE_OVERLAY_UPDATED_ENABLE (1UL << 16)
-#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL << 15)
-#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL << 14)
-#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL << 13)
-#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL << 12)
-#define PERF_COUNTER2_INTERRUPT_STATUS (1UL << 11)
-#define PIPE_GMBUS_INTERRUPT_STATUS (1UL << 11)
-#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL << 10)
-#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
-#define PIPE_VSYNC_INTERRUPT_STATUS (1UL << 9)
-#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL << 8)
-#define PIPE_DPST_EVENT_STATUS (1UL << 7)
-#define PIPE_A_PSR_STATUS_VLV (1UL << 6)
-#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL << 6)
-#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL << 5)
-#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL << 4)
-#define PIPE_B_PSR_STATUS_VLV (1UL << 3)
-#define PERF_COUNTER_INTERRUPT_STATUS (1UL << 3)
-#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL << 2) /* pre-965 */
-#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2) /* 965 or later */
-#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL << 1)
-#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
-#define PIPE_HBLANK_INT_STATUS (1UL << 0)
-#define PIPE_OVERLAY_UPDATED_STATUS (1UL << 0)
-#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
-#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
-
-#define _PIPE_ARB_CTL_A 0x70028 /* icl+ */
-#define PIPE_ARB_CTL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPE_ARB_CTL_A)
-#define PIPE_ARB_USE_PROG_SLOTS REG_BIT(13)
-
-#define _PIPE_MISC_A 0x70030
-#define _PIPE_MISC_B 0x71030
-#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
-#define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */
-#define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */
-#define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */
-#define PIPE_MISC_PSR_MASK_PRIMARY_FLIP REG_BIT(23) /* bdw */
-#define PIPE_MISC_PSR_MASK_SPRITE_ENABLE REG_BIT(22) /* bdw */
-#define PIPE_MISC_PSR_MASK_PIPE_REG_WRITE REG_BIT(21) /* skl+ */
-#define PIPE_MISC_PSR_MASK_CURSOR_MOVE REG_BIT(21) /* bdw */
-#define PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT REG_BIT(20)
-#define PIPE_MISC_OUTPUT_COLORSPACE_YUV REG_BIT(11)
-#define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
-/*
- * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
- * valid values of: 6, 8, 10 BPC.
- * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
- * 6, 8, 10, 12 BPC.
- */
-#define PIPE_MISC_BPC_MASK REG_GENMASK(7, 5)
-#define PIPE_MISC_BPC_8 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 0)
-#define PIPE_MISC_BPC_10 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 1)
-#define PIPE_MISC_BPC_6 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 2)
-#define PIPE_MISC_BPC_12_ADLP REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 4) /* adlp+ */
-#define PIPE_MISC_DITHER_ENABLE REG_BIT(4)
-#define PIPE_MISC_DITHER_TYPE_MASK REG_GENMASK(3, 2)
-#define PIPE_MISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 0)
-#define PIPE_MISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 1)
-#define PIPE_MISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 2)
-#define PIPE_MISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 3)
-
-#define _PIPE_MISC2_A 0x7002C
-#define _PIPE_MISC2_B 0x7102C
-#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B)
-#define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24)
-#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80)
-#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20)
-#define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */
-#define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id))
-
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN REG_BIT(29)
#define PIPEB_HLINE_INT_EN REG_BIT(28)
@@ -1669,129 +791,6 @@
#define SPRITEE_FLIPDONE_INT_EN REG_BIT(9)
#define PLANEC_FLIPDONE_INT_EN REG_BIT(8)
-#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
-#define DPINVGTT_EN_MASK_CHV REG_GENMASK(27, 16)
-#define DPINVGTT_EN_MASK_VLV REG_GENMASK(23, 16)
-#define SPRITEF_INVALID_GTT_INT_EN REG_BIT(27)
-#define SPRITEE_INVALID_GTT_INT_EN REG_BIT(26)
-#define PLANEC_INVALID_GTT_INT_EN REG_BIT(25)
-#define CURSORC_INVALID_GTT_INT_EN REG_BIT(24)
-#define CURSORB_INVALID_GTT_INT_EN REG_BIT(23)
-#define CURSORA_INVALID_GTT_INT_EN REG_BIT(22)
-#define SPRITED_INVALID_GTT_INT_EN REG_BIT(21)
-#define SPRITEC_INVALID_GTT_INT_EN REG_BIT(20)
-#define PLANEB_INVALID_GTT_INT_EN REG_BIT(19)
-#define SPRITEB_INVALID_GTT_INT_EN REG_BIT(18)
-#define SPRITEA_INVALID_GTT_INT_EN REG_BIT(17)
-#define PLANEA_INVALID_GTT_INT_EN REG_BIT(16)
-#define DPINVGTT_STATUS_MASK_CHV REG_GENMASK(11, 0)
-#define DPINVGTT_STATUS_MASK_VLV REG_GENMASK(7, 0)
-#define SPRITEF_INVALID_GTT_STATUS REG_BIT(11)
-#define SPRITEE_INVALID_GTT_STATUS REG_BIT(10)
-#define PLANEC_INVALID_GTT_STATUS REG_BIT(9)
-#define CURSORC_INVALID_GTT_STATUS REG_BIT(8)
-#define CURSORB_INVALID_GTT_STATUS REG_BIT(7)
-#define CURSORA_INVALID_GTT_STATUS REG_BIT(6)
-#define SPRITED_INVALID_GTT_STATUS REG_BIT(5)
-#define SPRITEC_INVALID_GTT_STATUS REG_BIT(4)
-#define PLANEB_INVALID_GTT_STATUS REG_BIT(3)
-#define SPRITEB_INVALID_GTT_STATUS REG_BIT(2)
-#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1)
-#define PLANEA_INVALID_GTT_STATUS REG_BIT(0)
-
-#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
-#define CBR_PND_DEADLINE_DISABLE (1 << 31)
-#define CBR_PWM_CLOCK_MUX_SELECT (1 << 30)
-
-#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
-#define CBR_DPLLBMD_PIPE(pipe) (1 << (7 + (pipe) * 11)) /* pipes B and C */
-
-/*
- * The two pipe frame counter registers are not synchronized, so
- * reading a stable value is somewhat tricky. The following code
- * should work:
- *
- * do {
- * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
- * PIPE_FRAME_HIGH_SHIFT;
- * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
- * PIPE_FRAME_LOW_SHIFT);
- * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
- * PIPE_FRAME_HIGH_SHIFT);
- * } while (high1 != high2);
- * frame = (high1 << 8) | low1;
- */
-#define _PIPEAFRAMEHIGH 0x70040
-#define PIPEFRAME(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH)
-#define PIPE_FRAME_HIGH_MASK 0x0000ffff
-#define PIPE_FRAME_HIGH_SHIFT 0
-
-#define _PIPEAFRAMEPIXEL 0x70044
-#define PIPEFRAMEPIXEL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL)
-#define PIPE_FRAME_LOW_MASK 0xff000000
-#define PIPE_FRAME_LOW_SHIFT 24
-#define PIPE_PIXEL_MASK 0x00ffffff
-#define PIPE_PIXEL_SHIFT 0
-
-/* GM45+ just has to be different */
-#define _PIPEA_FRMCOUNT_G4X 0x70040
-#define PIPE_FRMCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FRMCOUNT_G4X)
-
-#define _PIPEA_FLIPCOUNT_G4X 0x70044
-#define PIPE_FLIPCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FLIPCOUNT_G4X)
-
-/* CHV pipe B blender */
-#define _CHV_BLEND_A 0x60a00
-#define CHV_BLEND(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A)
-#define CHV_BLEND_MASK REG_GENMASK(31, 30)
-#define CHV_BLEND_LEGACY REG_FIELD_PREP(CHV_BLEND_MASK, 0)
-#define CHV_BLEND_ANDROID REG_FIELD_PREP(CHV_BLEND_MASK, 1)
-#define CHV_BLEND_MPO REG_FIELD_PREP(CHV_BLEND_MASK, 2)
-
-#define _CHV_CANVAS_A 0x60a04
-#define CHV_CANVAS(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A)
-#define CHV_CANVAS_RED_MASK REG_GENMASK(29, 20)
-#define CHV_CANVAS_GREEN_MASK REG_GENMASK(19, 10)
-#define CHV_CANVAS_BLUE_MASK REG_GENMASK(9, 0)
-
-/* Display/Sprite base address macros */
-#define DISP_BASEADDR_MASK (0xfffff000)
-#define I915_LO_DISPBASE(val) ((val) & ~DISP_BASEADDR_MASK)
-#define I915_HI_DISPBASE(val) ((val) & DISP_BASEADDR_MASK)
-
-/*
- * VBIOS flags
- * gen2:
- * [00:06] alm,mgm
- * [10:16] all
- * [30:32] alm,mgm
- * gen3+:
- * [00:0f] all
- * [10:1f] all
- * [30:32] all
- */
-#define SWF0(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70410 + (i) * 4)
-#define SWF1(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x71410 + (i) * 4)
-#define SWF3(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
-#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
-
-#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030)
-#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
-#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
-#define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */
-#define DIGITAL_PORTA_PULSE_DURATION_6ms (2 << 2) /* pre-HSW */
-#define DIGITAL_PORTA_PULSE_DURATION_100ms (3 << 2) /* pre-HSW */
-#define DIGITAL_PORTA_PULSE_DURATION_MASK (3 << 2) /* pre-HSW */
-#define DIGITAL_PORTA_HOTPLUG_STATUS_MASK (3 << 0)
-#define DIGITAL_PORTA_HOTPLUG_NO_DETECT (0 << 0)
-#define DIGITAL_PORTA_HOTPLUG_SHORT_DETECT (1 << 0)
-#define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0)
-
-/* refresh rate hardware control */
-#define RR_HW_CTL _MMIO(0x45300)
-#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
-#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
-
#define PCH_3DCGDIS0 _MMIO(0x46020)
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
@@ -1799,211 +798,6 @@
#define PCH_3DCGDIS1 _MMIO(0x46024)
# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
-#define _PIPEA_DATA_M1 0x60030
-#define _PIPEB_DATA_M1 0x61030
-#define PIPE_DATA_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M1)
-
-#define _PIPEA_DATA_N1 0x60034
-#define _PIPEB_DATA_N1 0x61034
-#define PIPE_DATA_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N1)
-
-#define _PIPEA_DATA_M2 0x60038
-#define _PIPEB_DATA_M2 0x61038
-#define PIPE_DATA_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M2)
-
-#define _PIPEA_DATA_N2 0x6003c
-#define _PIPEB_DATA_N2 0x6103c
-#define PIPE_DATA_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N2)
-
-#define _PIPEA_LINK_M1 0x60040
-#define _PIPEB_LINK_M1 0x61040
-#define PIPE_LINK_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M1)
-
-#define _PIPEA_LINK_N1 0x60044
-#define _PIPEB_LINK_N1 0x61044
-#define PIPE_LINK_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N1)
-
-#define _PIPEA_LINK_M2 0x60048
-#define _PIPEB_LINK_M2 0x61048
-#define PIPE_LINK_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M2)
-
-#define _PIPEA_LINK_N2 0x6004c
-#define _PIPEB_LINK_N2 0x6104c
-#define PIPE_LINK_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2)
-
-/*
- * Skylake scalers
- */
-#define _ID(id, a, b) _PICK_EVEN(id, a, b)
-#define _PS_1A_CTRL 0x68180
-#define _PS_2A_CTRL 0x68280
-#define _PS_1B_CTRL 0x68980
-#define _PS_2B_CTRL 0x68A80
-#define _PS_1C_CTRL 0x69180
-#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
- _ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
-#define PS_SCALER_EN REG_BIT(31)
-#define PS_SCALER_TYPE_MASK REG_BIT(30) /* icl+ */
-#define PS_SCALER_TYPE_NON_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 0)
-#define PS_SCALER_TYPE_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 1)
-#define SKL_PS_SCALER_MODE_MASK REG_GENMASK(29, 28) /* skl/bxt */
-#define SKL_PS_SCALER_MODE_DYN REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 0)
-#define SKL_PS_SCALER_MODE_HQ REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 1)
-#define SKL_PS_SCALER_MODE_NV12 REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 2)
-#define PS_SCALER_MODE_MASK REG_BIT(29) /* glk-tgl */
-#define PS_SCALER_MODE_NORMAL REG_FIELD_PREP(PS_SCALER_MODE_MASK, 0)
-#define PS_SCALER_MODE_PLANAR REG_FIELD_PREP(PS_SCALER_MODE_MASK, 1)
-#define PS_ADAPTIVE_FILTERING_EN REG_BIT(28) /* icl+ */
-#define PS_BINDING_MASK REG_GENMASK(27, 25)
-#define PS_BINDING_PIPE REG_FIELD_PREP(PS_BINDING_MASK, 0)
-#define PS_BINDING_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_MASK, (plane_id) + 1)
-#define PS_FILTER_MASK REG_GENMASK(24, 23)
-#define PS_FILTER_MEDIUM REG_FIELD_PREP(PS_FILTER_MASK, 0)
-#define PS_FILTER_PROGRAMMED REG_FIELD_PREP(PS_FILTER_MASK, 1)
-#define PS_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_FILTER_MASK, 2)
-#define PS_FILTER_BILINEAR REG_FIELD_PREP(PS_FILTER_MASK, 3)
-#define PS_ADAPTIVE_FILTER_MASK REG_BIT(22) /* icl+ */
-#define PS_ADAPTIVE_FILTER_MEDIUM REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 0)
-#define PS_ADAPTIVE_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_ADAPTIVE_FILTER_MASK, 1)
-#define PS_PIPE_SCALER_LOC_MASK REG_BIT(21) /* icl+ */
-#define PS_PIPE_SCALER_LOC_AFTER_OUTPUT_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 0) /* non-linear */
-#define PS_PIPE_SCALER_LOC_AFTER_CSC REG_FIELD_PREP(PS_SCALER_LOCATION_MASK, 1) /* linear */
-#define PS_VERT3TAP REG_BIT(21) /* skl/bxt */
-#define PS_VERT_INT_INVERT_FIELD REG_BIT(20)
-#define PS_PROG_SCALE_FACTOR REG_BIT(19) /* tgl+ */
-#define PS_PWRUP_PROGRESS REG_BIT(17)
-#define PS_V_FILTER_BYPASS REG_BIT(8)
-#define PS_VADAPT_EN REG_BIT(7) /* skl/bxt */
-#define PS_VADAPT_MODE_MASK REG_GENMASK(6, 5) /* skl/bxt */
-#define PS_VADAPT_MODE_LEAST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 0)
-#define PS_VADAPT_MODE_MOD_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 1)
-#define PS_VADAPT_MODE_MOST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 3)
-#define PS_BINDING_Y_MASK REG_GENMASK(7, 5) /* icl-tgl */
-#define PS_BINDING_Y_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_Y_MASK, (plane_id) + 1)
-#define PS_Y_VERT_FILTER_SELECT_MASK REG_BIT(4) /* glk+ */
-#define PS_Y_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_VERT_FILTER_SELECT_MASK, (set))
-#define PS_Y_HORZ_FILTER_SELECT_MASK REG_BIT(3) /* glk+ */
-#define PS_Y_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_HORZ_FILTER_SELECT_MASK, (set))
-#define PS_UV_VERT_FILTER_SELECT_MASK REG_BIT(2) /* glk+ */
-#define PS_UV_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_VERT_FILTER_SELECT_MASK, (set))
-#define PS_UV_HORZ_FILTER_SELECT_MASK REG_BIT(1) /* glk+ */
-#define PS_UV_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_HORZ_FILTER_SELECT_MASK, (set))
-
-#define _PS_PWR_GATE_1A 0x68160
-#define _PS_PWR_GATE_2A 0x68260
-#define _PS_PWR_GATE_1B 0x68960
-#define _PS_PWR_GATE_2B 0x68A60
-#define _PS_PWR_GATE_1C 0x69160
-#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
- _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
-#define PS_PWR_GATE_DIS_OVERRIDE REG_BIT(31)
-#define PS_PWR_GATE_SETTLING_TIME_MASK REG_GENMASK(4, 3)
-#define PS_PWR_GATE_SETTLING_TIME_32 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 0)
-#define PS_PWR_GATE_SETTLING_TIME_64 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 1)
-#define PS_PWR_GATE_SETTLING_TIME_96 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 2)
-#define PS_PWR_GATE_SETTLING_TIME_128 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 3)
-#define PS_PWR_GATE_SLPEN_MASK REG_GENMASK(1, 0)
-#define PS_PWR_GATE_SLPEN_8 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 0)
-#define PS_PWR_GATE_SLPEN_16 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 1)
-#define PS_PWR_GATE_SLPEN_24 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 2)
-#define PS_PWR_GATE_SLPEN_32 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 3)
-
-#define _PS_WIN_POS_1A 0x68170
-#define _PS_WIN_POS_2A 0x68270
-#define _PS_WIN_POS_1B 0x68970
-#define _PS_WIN_POS_2B 0x68A70
-#define _PS_WIN_POS_1C 0x69170
-#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
- _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
-#define PS_WIN_XPOS_MASK REG_GENMASK(31, 16)
-#define PS_WIN_XPOS(x) REG_FIELD_PREP(PS_WIN_XPOS_MASK, (x))
-#define PS_WIN_YPOS_MASK REG_GENMASK(15, 0)
-#define PS_WIN_YPOS(y) REG_FIELD_PREP(PS_WIN_YPOS_MASK, (y))
-
-#define _PS_WIN_SZ_1A 0x68174
-#define _PS_WIN_SZ_2A 0x68274
-#define _PS_WIN_SZ_1B 0x68974
-#define _PS_WIN_SZ_2B 0x68A74
-#define _PS_WIN_SZ_1C 0x69174
-#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \
- _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
-#define PS_WIN_XSIZE_MASK REG_GENMASK(31, 16)
-#define PS_WIN_XSIZE(w) REG_FIELD_PREP(PS_WIN_XSIZE_MASK, (w))
-#define PS_WIN_YSIZE_MASK REG_GENMASK(15, 0)
-#define PS_WIN_YSIZE(h) REG_FIELD_PREP(PS_WIN_YSIZE_MASK, (h))
-
-#define _PS_VSCALE_1A 0x68184
-#define _PS_VSCALE_2A 0x68284
-#define _PS_VSCALE_1B 0x68984
-#define _PS_VSCALE_2B 0x68A84
-#define _PS_VSCALE_1C 0x69184
-#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \
- _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
-
-#define _PS_HSCALE_1A 0x68190
-#define _PS_HSCALE_2A 0x68290
-#define _PS_HSCALE_1B 0x68990
-#define _PS_HSCALE_2B 0x68A90
-#define _PS_HSCALE_1C 0x69190
-#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \
- _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
-
-#define _PS_VPHASE_1A 0x68188
-#define _PS_VPHASE_2A 0x68288
-#define _PS_VPHASE_1B 0x68988
-#define _PS_VPHASE_2B 0x68A88
-#define _PS_VPHASE_1C 0x69188
-#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \
- _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
-#define PS_Y_PHASE_MASK REG_GENMASK(31, 16)
-#define PS_Y_PHASE(x) REG_FIELD_PREP(PS_Y_PHASE_MASK, (x))
-#define PS_UV_RGB_PHASE_MASK REG_GENMASK(15, 0)
-#define PS_UV_RGB_PHASE(x) REG_FIELD_PREP(PS_UV_RGB_PHASE_MASK, (x))
-#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */
-#define PS_PHASE_TRIP (1 << 0)
-
-#define _PS_HPHASE_1A 0x68194
-#define _PS_HPHASE_2A 0x68294
-#define _PS_HPHASE_1B 0x68994
-#define _PS_HPHASE_2B 0x68A94
-#define _PS_HPHASE_1C 0x69194
-#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \
- _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
-
-#define _PS_ECC_STAT_1A 0x681D0
-#define _PS_ECC_STAT_2A 0x682D0
-#define _PS_ECC_STAT_1B 0x689D0
-#define _PS_ECC_STAT_2B 0x68AD0
-#define _PS_ECC_STAT_1C 0x691D0
-#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \
- _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
-
-#define _PS_COEF_SET0_INDEX_1A 0x68198
-#define _PS_COEF_SET0_INDEX_2A 0x68298
-#define _PS_COEF_SET0_INDEX_1B 0x68998
-#define _PS_COEF_SET0_INDEX_2B 0x68A98
-#define GLK_PS_COEF_INDEX_SET(pipe, id, set) _MMIO_PIPE(pipe, \
- _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \
- _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8)
-#define PS_COEF_INDEX_AUTO_INC REG_BIT(10)
-
-#define _PS_COEF_SET0_DATA_1A 0x6819C
-#define _PS_COEF_SET0_DATA_2A 0x6829C
-#define _PS_COEF_SET0_DATA_1B 0x6899C
-#define _PS_COEF_SET0_DATA_2B 0x68A9C
-#define GLK_PS_COEF_DATA_SET(pipe, id, set) _MMIO_PIPE(pipe, \
- _ID(id, _PS_COEF_SET0_DATA_1A, _PS_COEF_SET0_DATA_2A) + (set) * 8, \
- _ID(id, _PS_COEF_SET0_DATA_1B, _PS_COEF_SET0_DATA_2B) + (set) * 8)
-
/* Display Internal Timeout Register */
#define RM_TIMEOUT _MMIO(0x42060)
#define RM_TIMEOUT_REG_CAPTURE _MMIO(0x420E0)
@@ -2042,25 +836,6 @@
#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8 * (pipe)))
-/* More Ivybridge lolz */
-#define DE_ERR_INT_IVB (1 << 30)
-#define DE_GSE_IVB (1 << 29)
-#define DE_PCH_EVENT_IVB (1 << 28)
-#define DE_DP_A_HOTPLUG_IVB (1 << 27)
-#define DE_AUX_CHANNEL_A_IVB (1 << 26)
-#define DE_EDP_PSR_INT_HSW (1 << 19)
-#define DE_SPRITEC_FLIP_DONE_IVB (1 << 14)
-#define DE_PLANEC_FLIP_DONE_IVB (1 << 13)
-#define DE_PIPEC_VBLANK_IVB (1 << 10)
-#define DE_SPRITEB_FLIP_DONE_IVB (1 << 9)
-#define DE_PLANEB_FLIP_DONE_IVB (1 << 8)
-#define DE_PIPEB_VBLANK_IVB (1 << 5)
-#define DE_SPRITEA_FLIP_DONE_IVB (1 << 4)
-#define DE_PLANEA_FLIP_DONE_IVB (1 << 3)
-#define DE_PLANE_FLIP_DONE_IVB(plane) (1 << (3 + 5 * (plane)))
-#define DE_PIPEA_VBLANK_IVB (1 << 0)
-#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
-
#define VLV_MASTER_IER _MMIO(0x4400c) /* Gunit master IER */
#define MASTER_INTERRUPT_ENABLE (1 << 31)
@@ -2100,8 +875,6 @@
#define GEN8_GT_BCS_IRQ (1 << 1)
#define GEN8_GT_RCS_IRQ (1 << 0)
-#define XELPD_DISPLAY_ERR_FATAL_MASK _MMIO(0x4421c)
-
#define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which)))
#define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which)))
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
@@ -2118,106 +891,6 @@
#define GEN8_VECS_IRQ_SHIFT 0
#define GEN8_WD_IRQ_SHIFT 16
-#define GEN8_DE_PIPE_ISR(pipe) _MMIO(0x44400 + (0x10 * (pipe)))
-#define GEN8_DE_PIPE_IMR(pipe) _MMIO(0x44404 + (0x10 * (pipe)))
-#define GEN8_DE_PIPE_IIR(pipe) _MMIO(0x44408 + (0x10 * (pipe)))
-#define GEN8_DE_PIPE_IER(pipe) _MMIO(0x4440c + (0x10 * (pipe)))
-#define GEN8_PIPE_FIFO_UNDERRUN REG_BIT(31)
-#define GEN8_PIPE_CDCLK_CRC_ERROR REG_BIT(29)
-#define GEN8_PIPE_CDCLK_CRC_DONE REG_BIT(28)
-#define GEN12_PIPEDMC_INTERRUPT REG_BIT(26) /* tgl+ */
-#define GEN12_PIPEDMC_FAULT REG_BIT(25) /* tgl+ */
-#define MTL_PIPEDMC_ATS_FAULT REG_BIT(24) /* mtl+ */
-#define GEN11_PIPE_PLANE7_FAULT REG_BIT(22) /* icl/tgl */
-#define GEN11_PIPE_PLANE6_FAULT REG_BIT(21) /* icl/tgl */
-#define GEN11_PIPE_PLANE5_FAULT REG_BIT(20) /* icl+ */
-#define GEN12_PIPE_VBLANK_UNMOD REG_BIT(19) /* tgl+ */
-#define MTL_PLANE_ATS_FAULT REG_BIT(18) /* mtl+ */
-#define GEN11_PIPE_PLANE7_FLIP_DONE REG_BIT(18) /* icl/tgl */
-#define GEN11_PIPE_PLANE6_FLIP_DONE REG_BIT(17) /* icl/tgl */
-#define GEN11_PIPE_PLANE5_FLIP_DONE REG_BIT(16) /* icl+ */
-#define GEN12_DSB_2_INT REG_BIT(15) /* tgl+ */
-#define GEN12_DSB_1_INT REG_BIT(14) /* tgl+ */
-#define GEN12_DSB_0_INT REG_BIT(13) /* tgl+ */
-#define GEN12_DSB_INT(dsb_id) REG_BIT(13 + (dsb_id))
-#define GEN9_PIPE_CURSOR_FAULT REG_BIT(11) /* skl+ */
-#define GEN9_PIPE_PLANE4_FAULT REG_BIT(10) /* skl+ */
-#define GEN8_PIPE_CURSOR_FAULT REG_BIT(10) /* bdw */
-#define GEN9_PIPE_PLANE3_FAULT REG_BIT(9) /* skl+ */
-#define GEN8_PIPE_SPRITE_FAULT REG_BIT(9) /* bdw */
-#define GEN9_PIPE_PLANE2_FAULT REG_BIT(8) /* skl+ */
-#define GEN8_PIPE_PRIMARY_FAULT REG_BIT(8) /* bdw */
-#define GEN9_PIPE_PLANE1_FAULT REG_BIT(7) /* skl+ */
-#define GEN9_PIPE_PLANE4_FLIP_DONE REG_BIT(6) /* skl+ */
-#define GEN9_PIPE_PLANE3_FLIP_DONE REG_BIT(5) /* skl+ */
-#define GEN8_PIPE_SPRITE_FLIP_DONE REG_BIT(5) /* bdw */
-#define GEN9_PIPE_PLANE2_FLIP_DONE REG_BIT(4) /* skl+ */
-#define GEN8_PIPE_PRIMARY_FLIP_DONE REG_BIT(4) /* bdw */
-#define GEN9_PIPE_PLANE1_FLIP_DONE REG_BIT(3) /* skl+ */
-#define GEN9_PIPE_PLANE_FLIP_DONE(plane_id) \
- REG_BIT(((plane_id) >= PLANE_5 ? 16 - PLANE_5 : 3 - PLANE_1) + (plane_id)) /* skl+ */
-#define GEN8_PIPE_SCAN_LINE_EVENT REG_BIT(2)
-#define GEN8_PIPE_VSYNC REG_BIT(1)
-#define GEN8_PIPE_VBLANK REG_BIT(0)
-
-#define GEN8_DE_PIPE_IRQ_REGS(pipe) I915_IRQ_REGS(GEN8_DE_PIPE_IMR(pipe), \
- GEN8_DE_PIPE_IER(pipe), \
- GEN8_DE_PIPE_IIR(pipe))
-
-#define _HPD_PIN_DDI(hpd_pin) ((hpd_pin) - HPD_PORT_A)
-#define _HPD_PIN_TC(hpd_pin) ((hpd_pin) - HPD_PORT_TC1)
-
-#define GEN8_DE_PORT_ISR _MMIO(0x44440)
-#define GEN8_DE_PORT_IMR _MMIO(0x44444)
-#define GEN8_DE_PORT_IIR _MMIO(0x44448)
-#define GEN8_DE_PORT_IER _MMIO(0x4444c)
-#define DSI1_NON_TE (1 << 31)
-#define DSI0_NON_TE (1 << 30)
-#define ICL_AUX_CHANNEL_E (1 << 29)
-#define ICL_AUX_CHANNEL_F (1 << 28)
-#define GEN9_AUX_CHANNEL_D (1 << 27)
-#define GEN9_AUX_CHANNEL_C (1 << 26)
-#define GEN9_AUX_CHANNEL_B (1 << 25)
-#define DSI1_TE (1 << 24)
-#define DSI0_TE (1 << 23)
-#define GEN8_DE_PORT_HOTPLUG(hpd_pin) REG_BIT(3 + _HPD_PIN_DDI(hpd_pin))
-#define BXT_DE_PORT_HOTPLUG_MASK (GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) | \
- GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | \
- GEN8_DE_PORT_HOTPLUG(HPD_PORT_C))
-#define BDW_DE_PORT_HOTPLUG_MASK GEN8_DE_PORT_HOTPLUG(HPD_PORT_A)
-#define BXT_DE_PORT_GMBUS (1 << 1)
-#define GEN8_AUX_CHANNEL_A (1 << 0)
-#define TGL_DE_PORT_AUX_USBC6 REG_BIT(13)
-#define XELPD_DE_PORT_AUX_DDIE REG_BIT(13)
-#define TGL_DE_PORT_AUX_USBC5 REG_BIT(12)
-#define XELPD_DE_PORT_AUX_DDID REG_BIT(12)
-#define TGL_DE_PORT_AUX_USBC4 REG_BIT(11)
-#define TGL_DE_PORT_AUX_USBC3 REG_BIT(10)
-#define TGL_DE_PORT_AUX_USBC2 REG_BIT(9)
-#define TGL_DE_PORT_AUX_USBC1 REG_BIT(8)
-#define TGL_DE_PORT_AUX_DDIC REG_BIT(2)
-#define TGL_DE_PORT_AUX_DDIB REG_BIT(1)
-#define TGL_DE_PORT_AUX_DDIA REG_BIT(0)
-
-#define GEN8_DE_PORT_IRQ_REGS I915_IRQ_REGS(GEN8_DE_PORT_IMR, \
- GEN8_DE_PORT_IER, \
- GEN8_DE_PORT_IIR)
-
-#define GEN8_DE_MISC_ISR _MMIO(0x44460)
-#define GEN8_DE_MISC_IMR _MMIO(0x44464)
-#define GEN8_DE_MISC_IIR _MMIO(0x44468)
-#define GEN8_DE_MISC_IER _MMIO(0x4446c)
-#define XELPDP_RM_TIMEOUT REG_BIT(29)
-#define XELPDP_PMDEMAND_RSPTOUT_ERR REG_BIT(27)
-#define GEN8_DE_MISC_GSE REG_BIT(27)
-#define GEN8_DE_EDP_PSR REG_BIT(19)
-#define XELPDP_PMDEMAND_RSP REG_BIT(3)
-#define XE2LPD_DBUF_OVERLAP_DETECTED REG_BIT(1)
-
-#define GEN8_DE_MISC_IRQ_REGS I915_IRQ_REGS(GEN8_DE_MISC_IMR, \
- GEN8_DE_MISC_IER, \
- GEN8_DE_MISC_IIR)
-
#define GEN8_PCU_ISR _MMIO(0x444e0)
#define GEN8_PCU_IMR _MMIO(0x444e4)
#define GEN8_PCU_IIR _MMIO(0x444e8)
@@ -2250,110 +923,12 @@
#define DG1_MSTR_IRQ REG_BIT(31)
#define DG1_MSTR_TILE(t) REG_BIT(t)
-#define GEN11_DISPLAY_INT_CTL _MMIO(0x44200)
-#define GEN11_DISPLAY_IRQ_ENABLE (1 << 31)
-#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
-#define GEN11_DE_PCH_IRQ (1 << 23)
-#define GEN11_DE_MISC_IRQ (1 << 22)
-#define GEN11_DE_HPD_IRQ (1 << 21)
-#define GEN11_DE_PORT_IRQ (1 << 20)
-#define GEN11_DE_PIPE_C (1 << 18)
-#define GEN11_DE_PIPE_B (1 << 17)
-#define GEN11_DE_PIPE_A (1 << 16)
-
-#define GEN11_DE_HPD_ISR _MMIO(0x44470)
-#define GEN11_DE_HPD_IMR _MMIO(0x44474)
-#define GEN11_DE_HPD_IIR _MMIO(0x44478)
-#define GEN11_DE_HPD_IER _MMIO(0x4447c)
-#define GEN11_TC_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
-#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC_HOTPLUG(HPD_PORT_TC6) | \
- GEN11_TC_HOTPLUG(HPD_PORT_TC5) | \
- GEN11_TC_HOTPLUG(HPD_PORT_TC4) | \
- GEN11_TC_HOTPLUG(HPD_PORT_TC3) | \
- GEN11_TC_HOTPLUG(HPD_PORT_TC2) | \
- GEN11_TC_HOTPLUG(HPD_PORT_TC1))
-#define GEN11_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
-#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT_HOTPLUG(HPD_PORT_TC6) | \
- GEN11_TBT_HOTPLUG(HPD_PORT_TC5) | \
- GEN11_TBT_HOTPLUG(HPD_PORT_TC4) | \
- GEN11_TBT_HOTPLUG(HPD_PORT_TC3) | \
- GEN11_TBT_HOTPLUG(HPD_PORT_TC2) | \
- GEN11_TBT_HOTPLUG(HPD_PORT_TC1))
-
-#define GEN11_DE_HPD_IRQ_REGS I915_IRQ_REGS(GEN11_DE_HPD_IMR, \
- GEN11_DE_HPD_IER, \
- GEN11_DE_HPD_IIR)
-
-#define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030)
-#define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038)
-#define GEN11_HOTPLUG_CTL_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
-#define GEN11_HOTPLUG_CTL_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
-#define GEN11_HOTPLUG_CTL_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
-#define GEN11_HOTPLUG_CTL_NO_DETECT(hpd_pin) (0 << (_HPD_PIN_TC(hpd_pin) * 4))
-
-#define PICAINTERRUPT_ISR _MMIO(0x16FE50)
-#define PICAINTERRUPT_IMR _MMIO(0x16FE54)
-#define PICAINTERRUPT_IIR _MMIO(0x16FE58)
-#define PICAINTERRUPT_IER _MMIO(0x16FE5C)
-#define XELPDP_DP_ALT_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
-#define XELPDP_DP_ALT_HOTPLUG_MASK REG_GENMASK(19, 16)
-#define XELPDP_AUX_TC(hpd_pin) REG_BIT(8 + _HPD_PIN_TC(hpd_pin))
-#define XELPDP_AUX_TC_MASK REG_GENMASK(11, 8)
-#define XE2LPD_AUX_DDI(hpd_pin) REG_BIT(6 + _HPD_PIN_DDI(hpd_pin))
-#define XE2LPD_AUX_DDI_MASK REG_GENMASK(7, 6)
-#define XELPDP_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
-#define XELPDP_TBT_HOTPLUG_MASK REG_GENMASK(3, 0)
-
-#define PICAINTERRUPT_IRQ_REGS I915_IRQ_REGS(PICAINTERRUPT_IMR, \
- PICAINTERRUPT_IER, \
- PICAINTERRUPT_IIR)
-
-#define XELPDP_PORT_HOTPLUG_CTL(hpd_pin) _MMIO(0x16F270 + (_HPD_PIN_TC(hpd_pin) * 0x200))
-#define XELPDP_TBT_HOTPLUG_ENABLE REG_BIT(6)
-#define XELPDP_TBT_HPD_LONG_DETECT REG_BIT(5)
-#define XELPDP_TBT_HPD_SHORT_DETECT REG_BIT(4)
-#define XELPDP_DP_ALT_HOTPLUG_ENABLE REG_BIT(2)
-#define XELPDP_DP_ALT_HPD_LONG_DETECT REG_BIT(1)
-#define XELPDP_DP_ALT_HPD_SHORT_DETECT REG_BIT(0)
-
-#define XELPDP_INITIATE_PMDEMAND_REQUEST(dword) _MMIO(0x45230 + 4 * (dword))
-#define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16)
-#define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12)
-#define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8)
-#define XE3_PMDEMAND_PIPES_MASK REG_GENMASK(7, 4)
-#define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6)
-#define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4)
-#define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0)
-
-#define XELPDP_PMDEMAND_REQ_ENABLE REG_BIT(31)
-#define XELPDP_PMDEMAND_CDCLK_FREQ_MASK REG_GENMASK(30, 20)
-#define XELPDP_PMDEMAND_DDICLK_FREQ_MASK REG_GENMASK(18, 8)
-#define XELPDP_PMDEMAND_SCALERS_MASK REG_GENMASK(6, 4)
-#define XELPDP_PMDEMAND_PLLS_MASK REG_GENMASK(2, 0)
-
-#define GEN12_DCPR_STATUS_1 _MMIO(0x46440)
-#define XELPDP_PMDEMAND_INFLIGHT_STATUS REG_BIT(26)
-
#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT REG_BIT(25)
#define ILK_DPARB_GATE REG_BIT(22)
#define ILK_VSDPFD_FULL REG_BIT(21)
-#define FUSE_STRAP _MMIO(0x42014)
-#define ILK_INTERNAL_GRAPHICS_DISABLE REG_BIT(31)
-#define ILK_INTERNAL_DISPLAY_DISABLE REG_BIT(30)
-#define ILK_DISPLAY_DEBUG_DISABLE REG_BIT(29)
-#define IVB_PIPE_C_DISABLE REG_BIT(28)
-#define ILK_HDCP_DISABLE REG_BIT(25)
-#define ILK_eDP_A_DISABLE REG_BIT(24)
-#define HSW_CDCLK_LIMIT REG_BIT(24)
-#define ILK_DESKTOP REG_BIT(23)
-#define HSW_CPU_SSC_ENABLE REG_BIT(21)
-
-#define FUSE_STRAP3 _MMIO(0x42020)
-#define HSW_REF_CLK_SELECT REG_BIT(1)
-
#define ILK_DSPCLK_GATE_D _MMIO(0x42020)
#define ILK_VRHUNIT_CLOCK_GATE_DISABLE REG_BIT(28)
#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE REG_BIT(9)
@@ -2378,25 +953,6 @@
#define CHICKEN_PAR2_1 _MMIO(0x42090)
#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT REG_BIT(14)
-#define CHICKEN_MISC_2 _MMIO(0x42084)
-#define CHICKEN_MISC_DISABLE_DPT REG_BIT(30) /* adl,dg2 */
-#define BMG_DARB_HALF_BLK_END_BURST REG_BIT(27)
-#define KBL_ARB_FILL_SPARE_14 REG_BIT(14)
-#define KBL_ARB_FILL_SPARE_13 REG_BIT(13)
-#define GLK_CL2_PWR_DOWN REG_BIT(12)
-#define GLK_CL1_PWR_DOWN REG_BIT(11)
-#define GLK_CL0_PWR_DOWN REG_BIT(10)
-
-#define CHICKEN_MISC_3 _MMIO(0x42088)
-#define DP_MST_DPT_DPTP_ALIGN_WA(trans) REG_BIT(9 + (trans) - TRANSCODER_A)
-#define DP_MST_SHORT_HBLANK_WA(trans) REG_BIT(5 + (trans) - TRANSCODER_A)
-#define DP_MST_FEC_BS_JITTER_WA(trans) REG_BIT(0 + (trans) - TRANSCODER_A)
-
-#define CHICKEN_MISC_4 _MMIO(0x4208c)
-#define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13)
-#define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0)
-#define CHICKEN_FBC_STRIDE(x) REG_FIELD_PREP(CHICKEN_FBC_STRIDE_MASK, (x))
-
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
@@ -2420,72 +976,11 @@
#define SKL_PLANE1_STRETCH_MAX_X1 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3)
#define BDW_UNMASK_VBL_TO_REGS_IN_SRD REG_BIT(0) /* bdw */
-#define _CHICKEN_TRANS_A 0x420c0
-#define _CHICKEN_TRANS_B 0x420c4
-#define _CHICKEN_TRANS_C 0x420c8
-#define _CHICKEN_TRANS_EDP 0x420cc
-#define _CHICKEN_TRANS_D 0x420d8
-#define _CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
- [TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \
- [TRANSCODER_A] = _CHICKEN_TRANS_A, \
- [TRANSCODER_B] = _CHICKEN_TRANS_B, \
- [TRANSCODER_C] = _CHICKEN_TRANS_C, \
- [TRANSCODER_D] = _CHICKEN_TRANS_D))
-#define _MTL_CHICKEN_TRANS_A 0x604e0
-#define _MTL_CHICKEN_TRANS_B 0x614e0
-#define _MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
- _MTL_CHICKEN_TRANS_A, \
- _MTL_CHICKEN_TRANS_B)
-#define CHICKEN_TRANS(display, trans) (DISPLAY_VER(display) >= 14 ? _MTL_CHICKEN_TRANS(trans) : _CHICKEN_TRANS(trans))
-#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
-#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
-#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
-#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
-#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */
-#define FECSTALL_DIS_DPTSTREAM_DPTTG REG_BIT(23)
-#define DDI_TRAINING_OVERRIDE_ENABLE REG_BIT(19)
-#define ADLP_1_BASED_X_GRANULARITY REG_BIT(18)
-#define DDI_TRAINING_OVERRIDE_VALUE REG_BIT(18)
-#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */
-#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */
-#define PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15)
-#define DP_FEC_BS_JITTER_WA REG_BIT(15)
-#define PSR2_VSC_ENABLE_PROG_HEADER REG_BIT(12)
-#define DP_DSC_INSERT_SF_AT_EOL_WA REG_BIT(4)
-#define HDCP_LINE_REKEY_DISABLE REG_BIT(0)
-
#define DISP_ARB_CTL _MMIO(0x45000)
#define DISP_FBC_MEMORY_WAKE REG_BIT(31)
#define DISP_TILE_SURFACE_SWIZZLING REG_BIT(13)
#define DISP_FBC_WM_DIS REG_BIT(15)
-#define DISP_ARB_CTL2 _MMIO(0x45004)
-#define DISP_DATA_PARTITION_5_6 REG_BIT(6)
-#define DISP_IPC_ENABLE REG_BIT(3)
-
-#define GEN7_MSG_CTL _MMIO(0x45010)
-#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
-#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
-
-#define _BW_BUDDY0_CTL 0x45130
-#define _BW_BUDDY1_CTL 0x45140
-#define BW_BUDDY_CTL(x) _MMIO(_PICK_EVEN(x, \
- _BW_BUDDY0_CTL, \
- _BW_BUDDY1_CTL))
-#define BW_BUDDY_DISABLE REG_BIT(31)
-#define BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16)
-#define BW_BUDDY_TLB_REQ_TIMER(x) REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, x)
-
-#define _BW_BUDDY0_PAGE_MASK 0x45134
-#define _BW_BUDDY1_PAGE_MASK 0x45144
-#define BW_BUDDY_PAGE_MASK(x) _MMIO(_PICK_EVEN(x, \
- _BW_BUDDY0_PAGE_MASK, \
- _BW_BUDDY1_PAGE_MASK))
-
-#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
-#define MTL_RESET_PICA_HANDSHAKE_EN REG_BIT(6)
-#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4)
-
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
#define _LATENCY_REPORTING_REMOVED_PIPE_D REG_BIT(31)
#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
@@ -2502,518 +997,24 @@
#define MASK_WAKEMEM REG_BIT(13)
#define DDI_CLOCK_REG_ACCESS REG_BIT(7)
-#define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434)
-#define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27)
-#define DCPR_MASK_LPMODE REG_BIT(26)
-#define DCPR_SEND_RESP_IMM REG_BIT(25)
-#define DCPR_CLEAR_MEMSTAT_DIS REG_BIT(24)
-
-#define XELPD_CHICKEN_DCPR_3 _MMIO(0x46438)
-#define DMD_RSP_TIMEOUT_DISABLE REG_BIT(19)
-
-#define SKL_DFSM _MMIO(0x51000)
-#define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27)
-#define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25)
-#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
-#define ICL_DFSM_DMC_DISABLE (1 << 23)
-#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
-#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
-#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
-#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
-#define GLK_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
-#define XE2LPD_DFSM_DBUF_OVERLAP_DISABLE (1 << 3)
-
-#define XE2LPD_DE_CAP _MMIO(0x41100)
-#define XE2LPD_DE_CAP_3DLUT_MASK REG_GENMASK(31, 30)
-#define XE2LPD_DE_CAP_DSC_MASK REG_GENMASK(29, 28)
-#define XE2LPD_DE_CAP_DSC_REMOVED 1
-#define XE2LPD_DE_CAP_SCALER_MASK REG_GENMASK(27, 26)
-#define XE2LPD_DE_CAP_SCALER_SINGLE 1
-
-#define SKL_DSSM _MMIO(0x51004)
-#define ICL_DSSM_CDCLK_PLL_REFCLK_MASK (7 << 29)
-#define ICL_DSSM_CDCLK_PLL_REFCLK_24MHz (0 << 29)
-#define ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz (1 << 29)
-#define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29)
-
#define GMD_ID_DISPLAY _MMIO(0x510a0)
#define GMD_ID_ARCH_MASK REG_GENMASK(31, 22)
#define GMD_ID_RELEASE_MASK REG_GENMASK(21, 14)
#define GMD_ID_STEP REG_GENMASK(5, 0)
-/*GEN11 chicken */
-#define _PIPEA_CHICKEN 0x70038
-#define _PIPEB_CHICKEN 0x71038
-#define _PIPEC_CHICKEN 0x72038
-#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
- _PIPEB_CHICKEN)
-#define UNDERRUN_RECOVERY_DISABLE_ADLP REG_BIT(30)
-#define UNDERRUN_RECOVERY_ENABLE_DG2 REG_BIT(30)
-#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU REG_BIT(15)
-#define DG2_RENDER_CCSTAG_4_3_EN REG_BIT(12)
-#define PER_PIXEL_ALPHA_BYPASS_EN REG_BIT(7)
-
/* PCH */
-#define PCH_DISPLAY_BASE 0xc0000u
-
-/* south display engine interrupt: IBX */
-#define SDE_AUDIO_POWER_D (1 << 27)
-#define SDE_AUDIO_POWER_C (1 << 26)
-#define SDE_AUDIO_POWER_B (1 << 25)
-#define SDE_AUDIO_POWER_SHIFT (25)
-#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT)
-#define SDE_GMBUS (1 << 24)
-#define SDE_AUDIO_HDCP_TRANSB (1 << 23)
-#define SDE_AUDIO_HDCP_TRANSA (1 << 22)
-#define SDE_AUDIO_HDCP_MASK (3 << 22)
-#define SDE_AUDIO_TRANSB (1 << 21)
-#define SDE_AUDIO_TRANSA (1 << 20)
-#define SDE_AUDIO_TRANS_MASK (3 << 20)
-#define SDE_POISON (1 << 19)
-/* 18 reserved */
-#define SDE_FDI_RXB (1 << 17)
-#define SDE_FDI_RXA (1 << 16)
-#define SDE_FDI_MASK (3 << 16)
-#define SDE_AUXD (1 << 15)
-#define SDE_AUXC (1 << 14)
-#define SDE_AUXB (1 << 13)
-#define SDE_AUX_MASK (7 << 13)
-/* 12 reserved */
-#define SDE_CRT_HOTPLUG (1 << 11)
-#define SDE_PORTD_HOTPLUG (1 << 10)
-#define SDE_PORTC_HOTPLUG (1 << 9)
-#define SDE_PORTB_HOTPLUG (1 << 8)
-#define SDE_SDVOB_HOTPLUG (1 << 6)
-#define SDE_HOTPLUG_MASK (SDE_CRT_HOTPLUG | \
- SDE_SDVOB_HOTPLUG | \
- SDE_PORTB_HOTPLUG | \
- SDE_PORTC_HOTPLUG | \
- SDE_PORTD_HOTPLUG)
-#define SDE_TRANSB_CRC_DONE (1 << 5)
-#define SDE_TRANSB_CRC_ERR (1 << 4)
-#define SDE_TRANSB_FIFO_UNDER (1 << 3)
-#define SDE_TRANSA_CRC_DONE (1 << 2)
-#define SDE_TRANSA_CRC_ERR (1 << 1)
-#define SDE_TRANSA_FIFO_UNDER (1 << 0)
-#define SDE_TRANS_MASK (0x3f)
-
-/* south display engine interrupt: CPT - CNP */
-#define SDE_AUDIO_POWER_D_CPT (1 << 31)
-#define SDE_AUDIO_POWER_C_CPT (1 << 30)
-#define SDE_AUDIO_POWER_B_CPT (1 << 29)
-#define SDE_AUDIO_POWER_SHIFT_CPT 29
-#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
-#define SDE_AUXD_CPT (1 << 27)
-#define SDE_AUXC_CPT (1 << 26)
-#define SDE_AUXB_CPT (1 << 25)
-#define SDE_AUX_MASK_CPT (7 << 25)
-#define SDE_PORTE_HOTPLUG_SPT (1 << 25)
-#define SDE_PORTA_HOTPLUG_SPT (1 << 24)
-#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
-#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
-#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
-#define SDE_CRT_HOTPLUG_CPT (1 << 19)
-#define SDE_SDVOB_HOTPLUG_CPT (1 << 18)
-#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
- SDE_SDVOB_HOTPLUG_CPT | \
- SDE_PORTD_HOTPLUG_CPT | \
- SDE_PORTC_HOTPLUG_CPT | \
- SDE_PORTB_HOTPLUG_CPT)
-#define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \
- SDE_PORTD_HOTPLUG_CPT | \
- SDE_PORTC_HOTPLUG_CPT | \
- SDE_PORTB_HOTPLUG_CPT | \
- SDE_PORTA_HOTPLUG_SPT)
-#define SDE_GMBUS_CPT (1 << 17)
-#define SDE_ERROR_CPT (1 << 16)
-#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
-#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
-#define SDE_FDI_RXC_CPT (1 << 8)
-#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
-#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
-#define SDE_FDI_RXB_CPT (1 << 4)
-#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
-#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
-#define SDE_FDI_RXA_CPT (1 << 0)
-#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
- SDE_AUDIO_CP_REQ_B_CPT | \
- SDE_AUDIO_CP_REQ_A_CPT)
-#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
- SDE_AUDIO_CP_CHG_B_CPT | \
- SDE_AUDIO_CP_CHG_A_CPT)
-#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
- SDE_FDI_RXB_CPT | \
- SDE_FDI_RXA_CPT)
-
-/* south display engine interrupt: ICP/TGP/MTP */
-#define SDE_PICAINTERRUPT REG_BIT(31)
-#define SDE_GMBUS_ICP (1 << 23)
-#define SDE_TC_HOTPLUG_ICP(hpd_pin) REG_BIT(24 + _HPD_PIN_TC(hpd_pin))
-#define SDE_TC_HOTPLUG_DG2(hpd_pin) REG_BIT(25 + _HPD_PIN_TC(hpd_pin)) /* sigh */
-#define SDE_DDI_HOTPLUG_ICP(hpd_pin) REG_BIT(16 + _HPD_PIN_DDI(hpd_pin))
-#define SDE_DDI_HOTPLUG_MASK_ICP (SDE_DDI_HOTPLUG_ICP(HPD_PORT_D) | \
- SDE_DDI_HOTPLUG_ICP(HPD_PORT_C) | \
- SDE_DDI_HOTPLUG_ICP(HPD_PORT_B) | \
- SDE_DDI_HOTPLUG_ICP(HPD_PORT_A))
-#define SDE_TC_HOTPLUG_MASK_ICP (SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6) | \
- SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5) | \
- SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4) | \
- SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3) | \
- SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2) | \
- SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1))
-
#define SDEISR _MMIO(0xc4000)
#define SDEIMR _MMIO(0xc4004)
#define SDEIIR _MMIO(0xc4008)
#define SDEIER _MMIO(0xc400c)
-#define SDE_IRQ_REGS I915_IRQ_REGS(SDEIMR, \
- SDEIER, \
- SDEIIR)
-
-#define SERR_INT _MMIO(0xc4040)
-#define SERR_INT_POISON (1 << 31)
-#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
-
-/* digital port hotplug */
-#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
-#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
-#define BXT_DDIA_HPD_INVERT (1 << 27)
-#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
-#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
-#define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */
-#define PORTA_HOTPLUG_LONG_DETECT (2 << 24) /* SPT+ & BXT */
-#define PORTD_HOTPLUG_ENABLE (1 << 20)
-#define PORTD_PULSE_DURATION_2ms (0 << 18) /* pre-LPT */
-#define PORTD_PULSE_DURATION_4_5ms (1 << 18) /* pre-LPT */
-#define PORTD_PULSE_DURATION_6ms (2 << 18) /* pre-LPT */
-#define PORTD_PULSE_DURATION_100ms (3 << 18) /* pre-LPT */
-#define PORTD_PULSE_DURATION_MASK (3 << 18) /* pre-LPT */
-#define PORTD_HOTPLUG_STATUS_MASK (3 << 16)
-#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
-#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
-#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
-#define PORTC_HOTPLUG_ENABLE (1 << 12)
-#define BXT_DDIC_HPD_INVERT (1 << 11)
-#define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */
-#define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */
-#define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */
-#define PORTC_PULSE_DURATION_100ms (3 << 10) /* pre-LPT */
-#define PORTC_PULSE_DURATION_MASK (3 << 10) /* pre-LPT */
-#define PORTC_HOTPLUG_STATUS_MASK (3 << 8)
-#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
-#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
-#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
-#define PORTB_HOTPLUG_ENABLE (1 << 4)
-#define BXT_DDIB_HPD_INVERT (1 << 3)
-#define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */
-#define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */
-#define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */
-#define PORTB_PULSE_DURATION_100ms (3 << 2) /* pre-LPT */
-#define PORTB_PULSE_DURATION_MASK (3 << 2) /* pre-LPT */
-#define PORTB_HOTPLUG_STATUS_MASK (3 << 0)
-#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
-#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
-#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
-#define BXT_DDI_HPD_INVERT_MASK (BXT_DDIA_HPD_INVERT | \
- BXT_DDIB_HPD_INVERT | \
- BXT_DDIC_HPD_INVERT)
-
-#define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */
-#define PORTE_HOTPLUG_ENABLE (1 << 4)
-#define PORTE_HOTPLUG_STATUS_MASK (3 << 0)
-#define PORTE_HOTPLUG_NO_DETECT (0 << 0)
-#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
-#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
-
-/* This register is a reuse of PCH_PORT_HOTPLUG register. The
- * functionality covered in PCH_PORT_HOTPLUG is split into
- * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
- */
-#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
-#define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin) (0x4 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin) (0x0 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin) (0x1 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(hpd_pin) (0x2 << (_HPD_PIN_DDI(hpd_pin) * 4))
-#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
-
-#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
-#define ICP_TC_HPD_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
-#define ICP_TC_HPD_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
-#define ICP_TC_HPD_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
-
-#define SHPD_FILTER_CNT _MMIO(0xc4038)
-#define SHPD_FILTER_CNT_500_ADJ 0x001D9
-#define SHPD_FILTER_CNT_250 0x000F8
-
-#define _PCH_DPLL_A 0xc6014
-#define _PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
-
-#define _PCH_FPA0 0xc6040
-#define _PCH_FPB0 0xc6048
-#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define FP_CB_TUNE (0x3 << 22)
-
-#define _PCH_FPA1 0xc6044
-#define _PCH_FPB1 0xc604c
-#define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1)
-
-#define PCH_DPLL_TEST _MMIO(0xc606c)
-
-#define PCH_DREF_CONTROL _MMIO(0xC6200)
-#define DREF_CONTROL_MASK 0x7fc3
-#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0 << 13)
-#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2 << 13)
-#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3 << 13)
-#define DREF_CPU_SOURCE_OUTPUT_MASK (3 << 13)
-#define DREF_SSC_SOURCE_DISABLE (0 << 11)
-#define DREF_SSC_SOURCE_ENABLE (2 << 11)
-#define DREF_SSC_SOURCE_MASK (3 << 11)
-#define DREF_NONSPREAD_SOURCE_DISABLE (0 << 9)
-#define DREF_NONSPREAD_CK505_ENABLE (1 << 9)
-#define DREF_NONSPREAD_SOURCE_ENABLE (2 << 9)
-#define DREF_NONSPREAD_SOURCE_MASK (3 << 9)
-#define DREF_SUPERSPREAD_SOURCE_DISABLE (0 << 7)
-#define DREF_SUPERSPREAD_SOURCE_ENABLE (2 << 7)
-#define DREF_SUPERSPREAD_SOURCE_MASK (3 << 7)
-#define DREF_SSC4_DOWNSPREAD (0 << 6)
-#define DREF_SSC4_CENTERSPREAD (1 << 6)
-#define DREF_SSC1_DISABLE (0 << 1)
-#define DREF_SSC1_ENABLE (1 << 1)
-#define DREF_SSC4_DISABLE (0)
-#define DREF_SSC4_ENABLE (1)
-
-#define PCH_RAWCLK_FREQ _MMIO(0xc6204)
-#define FDL_TP1_TIMER_SHIFT 12
-#define FDL_TP1_TIMER_MASK (3 << 12)
-#define FDL_TP2_TIMER_SHIFT 10
-#define FDL_TP2_TIMER_MASK (3 << 10)
-#define RAWCLK_FREQ_MASK 0x3ff
-#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
-#define CNP_RAWCLK_DIV(div) ((div) << 16)
-#define CNP_RAWCLK_FRAC_MASK (0xf << 26)
-#define CNP_RAWCLK_DEN(den) ((den) << 26)
-#define ICP_RAWCLK_NUM(num) ((num) << 11)
-
-#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
-
-#define PCH_SSC4_PARMS _MMIO(0xc6210)
-#define PCH_SSC4_AUX_PARMS _MMIO(0xc6214)
-
-#define PCH_DPLL_SEL _MMIO(0xc7000)
-#define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4))
-#define TRANS_DPLLA_SEL(pipe) 0
-#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
-
-/* transcoder */
-#define _PCH_TRANS_HTOTAL_A 0xe0000
-#define _PCH_TRANS_HTOTAL_B 0xe1000
-#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
-#define TRANS_HTOTAL_SHIFT 16
-#define TRANS_HACTIVE_SHIFT 0
-
-#define _PCH_TRANS_HBLANK_A 0xe0004
-#define _PCH_TRANS_HBLANK_B 0xe1004
-#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
-#define TRANS_HBLANK_END_SHIFT 16
-#define TRANS_HBLANK_START_SHIFT 0
-
-#define _PCH_TRANS_HSYNC_A 0xe0008
-#define _PCH_TRANS_HSYNC_B 0xe1008
-#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
-#define TRANS_HSYNC_END_SHIFT 16
-#define TRANS_HSYNC_START_SHIFT 0
-
-#define _PCH_TRANS_VTOTAL_A 0xe000c
-#define _PCH_TRANS_VTOTAL_B 0xe100c
-#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
-#define TRANS_VTOTAL_SHIFT 16
-#define TRANS_VACTIVE_SHIFT 0
-
-#define _PCH_TRANS_VBLANK_A 0xe0010
-#define _PCH_TRANS_VBLANK_B 0xe1010
-#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
-#define TRANS_VBLANK_END_SHIFT 16
-#define TRANS_VBLANK_START_SHIFT 0
-
-#define _PCH_TRANS_VSYNC_A 0xe0014
-#define _PCH_TRANS_VSYNC_B 0xe1014
-#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
-#define TRANS_VSYNC_END_SHIFT 16
-#define TRANS_VSYNC_START_SHIFT 0
-
-#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
-#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
-#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B)
-
-#define _PCH_TRANSA_DATA_M1 0xe0030
-#define _PCH_TRANSB_DATA_M1 0xe1030
-#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
-
-#define _PCH_TRANSA_DATA_N1 0xe0034
-#define _PCH_TRANSB_DATA_N1 0xe1034
-#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
-
-#define _PCH_TRANSA_DATA_M2 0xe0038
-#define _PCH_TRANSB_DATA_M2 0xe1038
-#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
-
-#define _PCH_TRANSA_DATA_N2 0xe003c
-#define _PCH_TRANSB_DATA_N2 0xe103c
-#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
-
-#define _PCH_TRANSA_LINK_M1 0xe0040
-#define _PCH_TRANSB_LINK_M1 0xe1040
-#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
-
-#define _PCH_TRANSA_LINK_N1 0xe0044
-#define _PCH_TRANSB_LINK_N1 0xe1044
-#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
-
-#define _PCH_TRANSA_LINK_M2 0xe0048
-#define _PCH_TRANSB_LINK_M2 0xe1048
-#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
-
-#define _PCH_TRANSA_LINK_N2 0xe004c
-#define _PCH_TRANSB_LINK_N2 0xe104c
-#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
-
-/* Per-transcoder DIP controls (PCH) */
-#define _VIDEO_DIP_CTL_A 0xe0200
-#define _VIDEO_DIP_CTL_B 0xe1200
-#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
-
-#define _VIDEO_DIP_DATA_A 0xe0208
-#define _VIDEO_DIP_DATA_B 0xe1208
-#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
-
-#define _VIDEO_DIP_GCP_A 0xe0210
-#define _VIDEO_DIP_GCP_B 0xe1210
-#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
-#define GCP_COLOR_INDICATION (1 << 2)
-#define GCP_DEFAULT_PHASE_ENABLE (1 << 1)
-#define GCP_AV_MUTE (1 << 0)
-
-/* Per-transcoder DIP controls (VLV) */
-#define _VLV_VIDEO_DIP_CTL_A 0x60200
-#define _VLV_VIDEO_DIP_CTL_B 0x61170
-#define _CHV_VIDEO_DIP_CTL_C 0x611f0
-#define VLV_TVIDEO_DIP_CTL(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
- _VLV_VIDEO_DIP_CTL_A, \
- _VLV_VIDEO_DIP_CTL_B, \
- _CHV_VIDEO_DIP_CTL_C)
-
-#define _VLV_VIDEO_DIP_DATA_A 0x60208
-#define _VLV_VIDEO_DIP_DATA_B 0x61174
-#define _CHV_VIDEO_DIP_DATA_C 0x611f4
-#define VLV_TVIDEO_DIP_DATA(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
- _VLV_VIDEO_DIP_DATA_A, \
- _VLV_VIDEO_DIP_DATA_B, \
- _CHV_VIDEO_DIP_DATA_C)
-
-#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
-#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
-#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C 0x611f8
-#define VLV_TVIDEO_DIP_GCP(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
- _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
- _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, \
- _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
-
-/* Haswell DIP controls */
-#define _HSW_VIDEO_DIP_CTL_A 0x60200
-#define _HSW_VIDEO_DIP_CTL_B 0x61200
-#define HSW_TVIDEO_DIP_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A)
-
-#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220
-#define _HSW_VIDEO_DIP_AVI_DATA_B 0x61220
-#define HSW_TVIDEO_DIP_AVI_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
-
-#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260
-#define _HSW_VIDEO_DIP_VS_DATA_B 0x61260
-#define HSW_TVIDEO_DIP_VS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
-
-#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
-#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
-#define HSW_TVIDEO_DIP_SPD_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
-
-#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
-#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
-#define HSW_TVIDEO_DIP_GMP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
-
-#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
-#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
-#define HSW_TVIDEO_DIP_VSC_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
-
-/*ADLP and later: */
-#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484
-#define _ADL_VIDEO_DIP_AS_DATA_B 0x61484
-#define ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans,\
- _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4)
-
-#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440
-#define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440
-#define GLK_TVIDEO_DIP_DRM_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
-
-#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
-#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
-#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
-#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
-#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
-#define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
-#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300
-#define _HSW_VIDEO_DIP_GMP_ECC_B 0x61300
-#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344
-#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
-
-#define _HSW_VIDEO_DIP_GCP_A 0x60210
-#define _HSW_VIDEO_DIP_GCP_B 0x61210
-#define HSW_TVIDEO_DIP_GCP(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A)
-
/* Icelake PPS_DATA and _ECC DIP Registers.
* These are available for transcoders B,C and eDP.
* Adding the _A so as to reuse the _MMIO_TRANS2
* definition, with which it offsets to the right location.
*/
-#define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350
-#define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350
-#define ICL_VIDEO_DIP_PPS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
-
-#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
-#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
-#define ICL_VIDEO_DIP_PPS_ECC(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
-
-#define _HSW_STEREO_3D_CTL_A 0x70020
-#define _HSW_STEREO_3D_CTL_B 0x71020
-#define HSW_STEREO_3D_CTL(dev_priv, trans) _MMIO_PIPE2(dev_priv, trans, _HSW_STEREO_3D_CTL_A)
-#define S3D_ENABLE (1 << 31)
-
-#define _PCH_TRANSACONF 0xf0008
-#define _PCH_TRANSBCONF 0xf1008
-#define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
-#define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
-#define TRANS_ENABLE REG_BIT(31)
-#define TRANS_STATE_ENABLE REG_BIT(30)
-#define TRANS_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* ibx */
-#define TRANS_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANS_FRAME_START_DELAY_MASK, (x)) /* ibx: 0-3 */
-#define TRANS_INTERLACE_MASK REG_GENMASK(23, 21)
-#define TRANS_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANS_INTERLACE_MASK, 0)
-#define TRANS_INTERLACE_LEGACY_VSYNC_IBX REG_FIELD_PREP(TRANS_INTERLACE_MASK, 2) /* ibx */
-#define TRANS_INTERLACE_INTERLACED REG_FIELD_PREP(TRANS_INTERLACE_MASK, 3)
-#define TRANS_BPC_MASK REG_GENMASK(7, 5) /* ibx */
-#define TRANS_BPC_8 REG_FIELD_PREP(TRANS_BPC_MASK, 0)
-#define TRANS_BPC_10 REG_FIELD_PREP(TRANS_BPC_MASK, 1)
-#define TRANS_BPC_6 REG_FIELD_PREP(TRANS_BPC_MASK, 2)
-#define TRANS_BPC_12 REG_FIELD_PREP(TRANS_BPC_MASK, 3)
-
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
@@ -3066,88 +1067,6 @@
#define CNP_PWM_CGE_GATING_DISABLE (1 << 13)
#define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12)
-#define PCH_DP_B _MMIO(0xe4100)
-#define PCH_DP_C _MMIO(0xe4200)
-#define PCH_DP_D _MMIO(0xe4300)
-
-/* CPT */
-#define _TRANS_DP_CTL_A 0xe0300
-#define _TRANS_DP_CTL_B 0xe1300
-#define _TRANS_DP_CTL_C 0xe2300
-#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
-#define TRANS_DP_OUTPUT_ENABLE REG_BIT(31)
-#define TRANS_DP_PORT_SEL_MASK REG_GENMASK(30, 29)
-#define TRANS_DP_PORT_SEL_NONE REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, 3)
-#define TRANS_DP_PORT_SEL(port) REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, (port) - PORT_B)
-#define TRANS_DP_AUDIO_ONLY REG_BIT(26)
-#define TRANS_DP_ENH_FRAMING REG_BIT(18)
-#define TRANS_DP_BPC_MASK REG_GENMASK(10, 9)
-#define TRANS_DP_BPC_8 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 0)
-#define TRANS_DP_BPC_10 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 1)
-#define TRANS_DP_BPC_6 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 2)
-#define TRANS_DP_BPC_12 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 3)
-#define TRANS_DP_VSYNC_ACTIVE_HIGH REG_BIT(4)
-#define TRANS_DP_HSYNC_ACTIVE_HIGH REG_BIT(3)
-
-#define _TRANS_DP2_CTL_A 0x600a0
-#define _TRANS_DP2_CTL_B 0x610a0
-#define _TRANS_DP2_CTL_C 0x620a0
-#define _TRANS_DP2_CTL_D 0x630a0
-#define TRANS_DP2_CTL(trans) _MMIO_TRANS(trans, _TRANS_DP2_CTL_A, _TRANS_DP2_CTL_B)
-#define TRANS_DP2_128B132B_CHANNEL_CODING REG_BIT(31)
-#define TRANS_DP2_PANEL_REPLAY_ENABLE REG_BIT(30)
-#define TRANS_DP2_DEBUG_ENABLE REG_BIT(23)
-
-#define _TRANS_DP2_VFREQHIGH_A 0x600a4
-#define _TRANS_DP2_VFREQHIGH_B 0x610a4
-#define _TRANS_DP2_VFREQHIGH_C 0x620a4
-#define _TRANS_DP2_VFREQHIGH_D 0x630a4
-#define TRANS_DP2_VFREQHIGH(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQHIGH_A, _TRANS_DP2_VFREQHIGH_B)
-#define TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK REG_GENMASK(31, 8)
-#define TRANS_DP2_VFREQ_PIXEL_CLOCK(clk_hz) REG_FIELD_PREP(TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK, (clk_hz))
-
-#define _TRANS_DP2_VFREQLOW_A 0x600a8
-#define _TRANS_DP2_VFREQLOW_B 0x610a8
-#define _TRANS_DP2_VFREQLOW_C 0x620a8
-#define _TRANS_DP2_VFREQLOW_D 0x630a8
-#define TRANS_DP2_VFREQLOW(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQLOW_A, _TRANS_DP2_VFREQLOW_B)
-
-#define _DP_MIN_HBLANK_CTL_A 0x600ac
-#define _DP_MIN_HBLANK_CTL_B 0x610ac
-#define DP_MIN_HBLANK_CTL(trans) _MMIO_TRANS(trans, _DP_MIN_HBLANK_CTL_A, _DP_MIN_HBLANK_CTL_B)
-
-/* SNB eDP training params */
-/* SNB A-stepping */
-#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
-#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
-#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
-/* SNB B-stepping */
-#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0 << 22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1 << 22)
-#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a << 22)
-#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39 << 22)
-#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38 << 22)
-#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f << 22)
-
-/* IVB */
-#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 << 22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a << 22)
-#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f << 22)
-#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 << 22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 << 22)
-#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 << 22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e << 22)
-
-/* legacy values */
-#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 << 22)
-#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 << 22)
-#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 << 22)
-#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 << 22)
-#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 << 22)
-
-#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f << 22)
-
#define VLV_PMWGICZ _MMIO(0x1300a4)
#define HSW_EDRAM_CAP _MMIO(0x120010)
@@ -3156,10 +1075,6 @@
#define EDRAM_WAYS_IDX(cap) (((cap) >> 5) & 0x7)
#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3)
-#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C)
-#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
-#define PIXEL_OVERLAP_CNT_SHIFT 30
-
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
#define GEN6_PCODE_READY (1 << 31)
#define GEN6_PCODE_MB_PARAM2 REG_GENMASK(23, 16)
@@ -3288,837 +1203,12 @@
*/
#define GEN7_SO_WRITE_OFFSET(n) _MMIO(0x5280 + (n) * 4)
-/*
- * HSW - ICL power wells
- *
- * Platforms have up to 3 power well control register sets, each set
- * controlling up to 16 power wells via a request/status HW flag tuple:
- * - main (HSW_PWR_WELL_CTL[1-4])
- * - AUX (ICL_PWR_WELL_CTL_AUX[1-4])
- * - DDI (ICL_PWR_WELL_CTL_DDI[1-4])
- * Each control register set consists of up to 4 registers used by different
- * sources that can request a power well to be enabled:
- * - BIOS (HSW_PWR_WELL_CTL1/ICL_PWR_WELL_CTL_AUX1/ICL_PWR_WELL_CTL_DDI1)
- * - DRIVER (HSW_PWR_WELL_CTL2/ICL_PWR_WELL_CTL_AUX2/ICL_PWR_WELL_CTL_DDI2)
- * - KVMR (HSW_PWR_WELL_CTL3) (only in the main register set)
- * - DEBUG (HSW_PWR_WELL_CTL4/ICL_PWR_WELL_CTL_AUX4/ICL_PWR_WELL_CTL_DDI4)
- */
-#define HSW_PWR_WELL_CTL1 _MMIO(0x45400)
-#define HSW_PWR_WELL_CTL2 _MMIO(0x45404)
-#define HSW_PWR_WELL_CTL3 _MMIO(0x45408)
-#define HSW_PWR_WELL_CTL4 _MMIO(0x4540C)
-#define HSW_PWR_WELL_CTL_REQ(pw_idx) (0x2 << ((pw_idx) * 2))
-#define HSW_PWR_WELL_CTL_STATE(pw_idx) (0x1 << ((pw_idx) * 2))
-
-/* HSW/BDW power well */
-#define HSW_PW_CTL_IDX_GLOBAL 15
-
-/* SKL/BXT/GLK power wells */
-#define SKL_PW_CTL_IDX_PW_2 15
-#define SKL_PW_CTL_IDX_PW_1 14
-#define GLK_PW_CTL_IDX_AUX_C 10
-#define GLK_PW_CTL_IDX_AUX_B 9
-#define GLK_PW_CTL_IDX_AUX_A 8
-#define SKL_PW_CTL_IDX_DDI_D 4
-#define SKL_PW_CTL_IDX_DDI_C 3
-#define SKL_PW_CTL_IDX_DDI_B 2
-#define SKL_PW_CTL_IDX_DDI_A_E 1
-#define GLK_PW_CTL_IDX_DDI_A 1
-#define SKL_PW_CTL_IDX_MISC_IO 0
-
-/* ICL/TGL - power wells */
-#define TGL_PW_CTL_IDX_PW_5 4
-#define ICL_PW_CTL_IDX_PW_4 3
-#define ICL_PW_CTL_IDX_PW_3 2
-#define ICL_PW_CTL_IDX_PW_2 1
-#define ICL_PW_CTL_IDX_PW_1 0
-
-/* XE_LPD - power wells */
-#define XELPD_PW_CTL_IDX_PW_D 8
-#define XELPD_PW_CTL_IDX_PW_C 7
-#define XELPD_PW_CTL_IDX_PW_B 6
-#define XELPD_PW_CTL_IDX_PW_A 5
-
-#define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440)
-#define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444)
-#define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C)
-#define TGL_PW_CTL_IDX_AUX_TBT6 14
-#define TGL_PW_CTL_IDX_AUX_TBT5 13
-#define TGL_PW_CTL_IDX_AUX_TBT4 12
-#define ICL_PW_CTL_IDX_AUX_TBT4 11
-#define TGL_PW_CTL_IDX_AUX_TBT3 11
-#define ICL_PW_CTL_IDX_AUX_TBT3 10
-#define TGL_PW_CTL_IDX_AUX_TBT2 10
-#define ICL_PW_CTL_IDX_AUX_TBT2 9
-#define TGL_PW_CTL_IDX_AUX_TBT1 9
-#define ICL_PW_CTL_IDX_AUX_TBT1 8
-#define TGL_PW_CTL_IDX_AUX_TC6 8
-#define XELPD_PW_CTL_IDX_AUX_E 8
-#define TGL_PW_CTL_IDX_AUX_TC5 7
-#define XELPD_PW_CTL_IDX_AUX_D 7
-#define TGL_PW_CTL_IDX_AUX_TC4 6
-#define ICL_PW_CTL_IDX_AUX_F 5
-#define TGL_PW_CTL_IDX_AUX_TC3 5
-#define ICL_PW_CTL_IDX_AUX_E 4
-#define TGL_PW_CTL_IDX_AUX_TC2 4
-#define ICL_PW_CTL_IDX_AUX_D 3
-#define TGL_PW_CTL_IDX_AUX_TC1 3
-#define ICL_PW_CTL_IDX_AUX_C 2
-#define ICL_PW_CTL_IDX_AUX_B 1
-#define ICL_PW_CTL_IDX_AUX_A 0
-
-#define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450)
-#define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454)
-#define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C)
-#define XELPD_PW_CTL_IDX_DDI_E 8
-#define TGL_PW_CTL_IDX_DDI_TC6 8
-#define XELPD_PW_CTL_IDX_DDI_D 7
-#define TGL_PW_CTL_IDX_DDI_TC5 7
-#define TGL_PW_CTL_IDX_DDI_TC4 6
-#define ICL_PW_CTL_IDX_DDI_F 5
-#define TGL_PW_CTL_IDX_DDI_TC3 5
-#define ICL_PW_CTL_IDX_DDI_E 4
-#define TGL_PW_CTL_IDX_DDI_TC2 4
-#define ICL_PW_CTL_IDX_DDI_D 3
-#define TGL_PW_CTL_IDX_DDI_TC1 3
-#define ICL_PW_CTL_IDX_DDI_C 2
-#define ICL_PW_CTL_IDX_DDI_B 1
-#define ICL_PW_CTL_IDX_DDI_A 0
-
-/* HSW - power well misc debug registers */
-#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
-#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1 << 31)
-#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1 << 20)
-#define HSW_PWR_WELL_FORCE_ON (1 << 19)
-#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
-
-/* SKL Fuse Status */
-enum skl_power_gate {
- SKL_PG0,
- SKL_PG1,
- SKL_PG2,
- ICL_PG3,
- ICL_PG4,
-};
-
-#define SKL_FUSE_STATUS _MMIO(0x42000)
-#define SKL_FUSE_DOWNLOAD_STATUS (1 << 31)
-/*
- * PG0 is HW controlled, so doesn't have a corresponding power well control knob
- * SKL_DISP_PW1_IDX..SKL_DISP_PW2_IDX -> PG1..PG2
- */
-#define SKL_PW_CTL_IDX_TO_PG(pw_idx) \
- ((pw_idx) - SKL_PW_CTL_IDX_PW_1 + SKL_PG1)
-/*
- * PG0 is HW controlled, so doesn't have a corresponding power well control knob
- * ICL_DISP_PW1_IDX..ICL_DISP_PW4_IDX -> PG1..PG4
- */
-#define ICL_PW_CTL_IDX_TO_PG(pw_idx) \
- ((pw_idx) - ICL_PW_CTL_IDX_PW_1 + SKL_PG1)
-#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
-
-/* Per-pipe DDI Function Control */
-#define _TRANS_DDI_FUNC_CTL_A 0x60400
-#define _TRANS_DDI_FUNC_CTL_B 0x61400
-#define _TRANS_DDI_FUNC_CTL_C 0x62400
-#define _TRANS_DDI_FUNC_CTL_D 0x63400
-#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
-#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
-#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
-#define TRANS_DDI_FUNC_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL_A)
-
-#define TRANS_DDI_FUNC_ENABLE (1 << 31)
-/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define TRANS_DDI_PORT_SHIFT 28
-#define TGL_TRANS_DDI_PORT_SHIFT 27
-#define TRANS_DDI_PORT_MASK (7 << TRANS_DDI_PORT_SHIFT)
-#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT)
-#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT)
-#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT)
-#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
-#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
-#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
-#define TRANS_DDI_MODE_SELECT_DP_SST (2 << 24)
-#define TRANS_DDI_MODE_SELECT_DP_MST (3 << 24)
-#define TRANS_DDI_MODE_SELECT_FDI_OR_128B132B (4 << 24)
-#define TRANS_DDI_BPC_MASK (7 << 20)
-#define TRANS_DDI_BPC_8 (0 << 20)
-#define TRANS_DDI_BPC_10 (1 << 20)
-#define TRANS_DDI_BPC_6 (2 << 20)
-#define TRANS_DDI_BPC_12 (3 << 20)
-#define TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK REG_GENMASK(19, 18)
-#define TRANS_DDI_PORT_SYNC_MASTER_SELECT(x) REG_FIELD_PREP(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, (x))
-#define TRANS_DDI_PVSYNC (1 << 17)
-#define TRANS_DDI_PHSYNC (1 << 16)
-#define TRANS_DDI_PORT_SYNC_ENABLE REG_BIT(15)
-#define XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(15)
-#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
-#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
-#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
-#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
-#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
-#define TRANS_DDI_EDP_INPUT_D_ONOFF (7 << 12)
-#define TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(12)
-#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10)
-#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \
- REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans)
-#define TRANS_DDI_HDCP_SIGNALLING (1 << 9)
-#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
-#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
-#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
-#define TRANS_DDI_HDCP_SELECT REG_BIT(5)
-#define TRANS_DDI_BFI_ENABLE (1 << 4)
-#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
-#define TRANS_DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
-#define TRANS_DDI_PORT_WIDTH(width) REG_FIELD_PREP(TRANS_DDI_PORT_WIDTH_MASK, (width) - 1)
-#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0)
-#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
- | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
- | TRANS_DDI_HDMI_SCRAMBLING)
-
-#define _TRANS_DDI_FUNC_CTL2_A 0x60404
-#define _TRANS_DDI_FUNC_CTL2_B 0x61404
-#define _TRANS_DDI_FUNC_CTL2_C 0x62404
-#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
-#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
-#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
-#define TRANS_DDI_FUNC_CTL2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL2_A)
-#define PORT_SYNC_MODE_ENABLE REG_BIT(4)
-#define CMTG_SECONDARY_MODE REG_BIT(3)
-#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0)
-#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
-
-#define TRANS_CMTG_CHICKEN _MMIO(0x6fa90)
-#define DISABLE_DPT_CLK_GATING REG_BIT(1)
-
-/* DisplayPort Transport Control */
-#define _DP_TP_CTL_A 0x64040
-#define _DP_TP_CTL_B 0x64140
-#define _TGL_DP_TP_CTL_A 0x60540
-#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
-#define TGL_DP_TP_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_CTL_A)
-#define DP_TP_CTL_ENABLE REG_BIT(31)
-#define DP_TP_CTL_FEC_ENABLE REG_BIT(30)
-#define DP_TP_CTL_MODE_MASK REG_BIT(27)
-#define DP_TP_CTL_MODE_SST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 0)
-#define DP_TP_CTL_MODE_MST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 1)
-#define DP_TP_CTL_FORCE_ACT REG_BIT(25)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK REG_GENMASK(20, 19)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 0)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 1)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 2)
-#define DP_TP_CTL_ENHANCED_FRAME_ENABLE REG_BIT(18)
-#define DP_TP_CTL_FDI_AUTOTRAIN REG_BIT(15)
-#define DP_TP_CTL_LINK_TRAIN_MASK REG_GENMASK(10, 8)
-#define DP_TP_CTL_LINK_TRAIN_PAT1 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 0)
-#define DP_TP_CTL_LINK_TRAIN_PAT2 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 1)
-#define DP_TP_CTL_LINK_TRAIN_PAT3 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 4)
-#define DP_TP_CTL_LINK_TRAIN_PAT4 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 5)
-#define DP_TP_CTL_LINK_TRAIN_IDLE REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 2)
-#define DP_TP_CTL_LINK_TRAIN_NORMAL REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 3)
-#define DP_TP_CTL_SCRAMBLE_DISABLE REG_BIT(7)
-
-/* DisplayPort Transport Status */
-#define _DP_TP_STATUS_A 0x64044
-#define _DP_TP_STATUS_B 0x64144
-#define _TGL_DP_TP_STATUS_A 0x60544
-#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
-#define TGL_DP_TP_STATUS(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_STATUS_A)
-#define DP_TP_STATUS_FEC_ENABLE_LIVE REG_BIT(28)
-#define DP_TP_STATUS_IDLE_DONE REG_BIT(25)
-#define DP_TP_STATUS_ACT_SENT REG_BIT(24)
-#define DP_TP_STATUS_MODE_STATUS_MST REG_BIT(23)
-#define DP_TP_STATUS_STREAMS_ENABLED_MASK REG_GENMASK(18, 16) /* 17:16 on hsw but bit 18 mbz */
-#define DP_TP_STATUS_AUTOTRAIN_DONE REG_BIT(12)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2_MASK REG_GENMASK(9, 8)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1_MASK REG_GENMASK(5, 4)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0_MASK REG_GENMASK(1, 0)
-
-/* DDI Buffer Control */
-#define _DDI_BUF_CTL_A 0x64000
-#define _DDI_BUF_CTL_B 0x64100
-/* Known as DDI_CTL_DE in MTL+ */
-#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
-#define DDI_BUF_CTL_ENABLE REG_BIT(31)
-#define XE2LPD_DDI_BUF_D2D_LINK_ENABLE REG_BIT(29)
-#define XE2LPD_DDI_BUF_D2D_LINK_STATE REG_BIT(28)
-#define DDI_BUF_EMP_MASK REG_GENMASK(27, 24)
-#define DDI_BUF_TRANS_SELECT(n) REG_FIELD_PREP(DDI_BUF_EMP_MASK, (n))
-#define DDI_BUF_PHY_LINK_RATE_MASK REG_GENMASK(23, 20)
-#define DDI_BUF_PHY_LINK_RATE(r) REG_FIELD_PREP(DDI_BUF_PHY_LINK_RATE_MASK, (r))
-#define DDI_BUF_PORT_DATA_MASK REG_GENMASK(19, 18)
-#define DDI_BUF_PORT_DATA_10BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 0)
-#define DDI_BUF_PORT_DATA_20BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 1)
-#define DDI_BUF_PORT_DATA_40BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 2)
-#define DDI_BUF_PORT_REVERSAL REG_BIT(16)
-#define DDI_BUF_LANE_STAGGER_DELAY_MASK REG_GENMASK(15, 8)
-#define DDI_BUF_LANE_STAGGER_DELAY(symbols) REG_FIELD_PREP(DDI_BUF_LANE_STAGGER_DELAY_MASK, \
- (symbols))
-#define DDI_BUF_IS_IDLE REG_BIT(7)
-#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
-#define DDI_A_4_LANES REG_BIT(4)
-#define DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
-#define DDI_PORT_WIDTH(width) REG_FIELD_PREP(DDI_PORT_WIDTH_MASK, \
- ((width) == 3 ? 4 : (width) - 1))
-#define DDI_PORT_WIDTH_SHIFT 1
-#define DDI_INIT_DISPLAY_DETECTED REG_BIT(0)
-
-/* DDI Buffer Translations */
-#define _DDI_BUF_TRANS_A 0x64E00
-#define _DDI_BUF_TRANS_B 0x64E60
-#define DDI_BUF_TRANS_LO(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8)
-#define DDI_BUF_BALANCE_LEG_ENABLE (1 << 31)
-#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
-
-/* DDI DP Compliance Control */
-#define _DDI_DP_COMP_CTL_A 0x605F0
-#define _DDI_DP_COMP_CTL_B 0x615F0
-#define DDI_DP_COMP_CTL(pipe) _MMIO_PIPE(pipe, _DDI_DP_COMP_CTL_A, _DDI_DP_COMP_CTL_B)
-#define DDI_DP_COMP_CTL_ENABLE (1 << 31)
-#define DDI_DP_COMP_CTL_D10_2 (0 << 28)
-#define DDI_DP_COMP_CTL_SCRAMBLED_0 (1 << 28)
-#define DDI_DP_COMP_CTL_PRBS7 (2 << 28)
-#define DDI_DP_COMP_CTL_CUSTOM80 (3 << 28)
-#define DDI_DP_COMP_CTL_HBR2 (4 << 28)
-#define DDI_DP_COMP_CTL_SCRAMBLED_1 (5 << 28)
-#define DDI_DP_COMP_CTL_HBR2_RESET (0xFC << 0)
-
-/* DDI DP Compliance Pattern */
-#define _DDI_DP_COMP_PAT_A 0x605F4
-#define _DDI_DP_COMP_PAT_B 0x615F4
-#define DDI_DP_COMP_PAT(pipe, i) _MMIO(_PIPE(pipe, _DDI_DP_COMP_PAT_A, _DDI_DP_COMP_PAT_B) + (i) * 4)
-
-/* Sideband Interface (SBI) is programmed indirectly, via
- * SBI_ADDR, which contains the register offset; and SBI_DATA,
- * which contains the payload */
-#define SBI_ADDR _MMIO(0xC6000)
-#define SBI_DATA _MMIO(0xC6004)
-#define SBI_CTL_STAT _MMIO(0xC6008)
-#define SBI_CTL_DEST_ICLK (0x0 << 16)
-#define SBI_CTL_DEST_MPHY (0x1 << 16)
-#define SBI_CTL_OP_IORD (0x2 << 8)
-#define SBI_CTL_OP_IOWR (0x3 << 8)
-#define SBI_CTL_OP_CRRD (0x6 << 8)
-#define SBI_CTL_OP_CRWR (0x7 << 8)
-#define SBI_RESPONSE_FAIL (0x1 << 1)
-#define SBI_RESPONSE_SUCCESS (0x0 << 1)
-#define SBI_BUSY (0x1 << 0)
-#define SBI_READY (0x0 << 0)
-
-/* SBI offsets */
-#define SBI_SSCDIVINTPHASE 0x0200
-#define SBI_SSCDIVINTPHASE6 0x0600
-#define SBI_SSCDIVINTPHASE_DIVSEL_SHIFT 1
-#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f << 1)
-#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x) << 1)
-#define SBI_SSCDIVINTPHASE_INCVAL_SHIFT 8
-#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f << 8)
-#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x) << 8)
-#define SBI_SSCDIVINTPHASE_DIR(x) ((x) << 15)
-#define SBI_SSCDIVINTPHASE_PROPAGATE (1 << 0)
-#define SBI_SSCDITHPHASE 0x0204
-#define SBI_SSCCTL 0x020c
-#define SBI_SSCCTL6 0x060C
-#define SBI_SSCCTL_PATHALT (1 << 3)
-#define SBI_SSCCTL_DISABLE (1 << 0)
-#define SBI_SSCAUXDIV6 0x0610
-#define SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT 4
-#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1 << 4)
-#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x) << 4)
-#define SBI_DBUFF0 0x2a00
-#define SBI_GEN0 0x1f00
-#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1 << 0)
-
-/* LPT PIXCLK_GATE */
-#define PIXCLK_GATE _MMIO(0xC6020)
-#define PIXCLK_GATE_UNGATE (1 << 0)
-#define PIXCLK_GATE_GATE (0 << 0)
-
-/* SPLL */
-#define SPLL_CTL _MMIO(0x46020)
-#define SPLL_PLL_ENABLE (1 << 31)
-#define SPLL_REF_BCLK (0 << 28)
-#define SPLL_REF_MUXED_SSC (1 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
-#define SPLL_REF_NON_SSC_HSW (2 << 28)
-#define SPLL_REF_PCH_SSC_BDW (2 << 28)
-#define SPLL_REF_LCPLL (3 << 28)
-#define SPLL_REF_MASK (3 << 28)
-#define SPLL_FREQ_810MHz (0 << 26)
-#define SPLL_FREQ_1350MHz (1 << 26)
-#define SPLL_FREQ_2700MHz (2 << 26)
-#define SPLL_FREQ_MASK (3 << 26)
-
-/* WRPLL */
-#define _WRPLL_CTL1 0x46040
-#define _WRPLL_CTL2 0x46060
-#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
-#define WRPLL_PLL_ENABLE (1 << 31)
-#define WRPLL_REF_BCLK (0 << 28)
-#define WRPLL_REF_PCH_SSC (1 << 28)
-#define WRPLL_REF_MUXED_SSC_BDW (2 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
-#define WRPLL_REF_SPECIAL_HSW (2 << 28) /* muxed SSC (ULT), non-SSC (non-ULT) */
-#define WRPLL_REF_LCPLL (3 << 28)
-#define WRPLL_REF_MASK (3 << 28)
-/* WRPLL divider programming */
-#define WRPLL_DIVIDER_REFERENCE(x) ((x) << 0)
-#define WRPLL_DIVIDER_REF_MASK (0xff)
-#define WRPLL_DIVIDER_POST(x) ((x) << 8)
-#define WRPLL_DIVIDER_POST_MASK (0x3f << 8)
-#define WRPLL_DIVIDER_POST_SHIFT 8
-#define WRPLL_DIVIDER_FEEDBACK(x) ((x) << 16)
-#define WRPLL_DIVIDER_FB_SHIFT 16
-#define WRPLL_DIVIDER_FB_MASK (0xff << 16)
-
-/* Port clock selection */
-#define _PORT_CLK_SEL_A 0x46100
-#define _PORT_CLK_SEL_B 0x46104
-#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
-#define PORT_CLK_SEL_MASK REG_GENMASK(31, 29)
-#define PORT_CLK_SEL_LCPLL_2700 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 0)
-#define PORT_CLK_SEL_LCPLL_1350 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 1)
-#define PORT_CLK_SEL_LCPLL_810 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 2)
-#define PORT_CLK_SEL_SPLL REG_FIELD_PREP(PORT_CLK_SEL_MASK, 3)
-#define PORT_CLK_SEL_WRPLL(pll) REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4 + (pll))
-#define PORT_CLK_SEL_WRPLL1 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4)
-#define PORT_CLK_SEL_WRPLL2 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 5)
-#define PORT_CLK_SEL_NONE REG_FIELD_PREP(PORT_CLK_SEL_MASK, 7)
-
-/* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
-#define DDI_CLK_SEL(port) PORT_CLK_SEL(port)
-#define DDI_CLK_SEL_MASK REG_GENMASK(31, 28)
-#define DDI_CLK_SEL_NONE REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x0)
-#define DDI_CLK_SEL_MG REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x8)
-#define DDI_CLK_SEL_TBT_162 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xC)
-#define DDI_CLK_SEL_TBT_270 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xD)
-#define DDI_CLK_SEL_TBT_540 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xE)
-#define DDI_CLK_SEL_TBT_810 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xF)
-
-/* Transcoder clock selection */
-#define _TRANS_CLK_SEL_A 0x46140
-#define _TRANS_CLK_SEL_B 0x46144
-#define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B)
-/* For each transcoder, we need to select the corresponding port clock */
-#define TRANS_CLK_SEL_DISABLED (0x0 << 29)
-#define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29)
-#define TGL_TRANS_CLK_SEL_DISABLED (0x0 << 28)
-#define TGL_TRANS_CLK_SEL_PORT(x) (((x) + 1) << 28)
-
-
-#define CDCLK_FREQ _MMIO(0x46200)
-
-#define _TRANSA_MSA_MISC 0x60410
-#define _TRANSB_MSA_MISC 0x61410
-#define _TRANSC_MSA_MISC 0x62410
-#define _TRANS_EDP_MSA_MISC 0x6f410
-#define TRANS_MSA_MISC(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANSA_MSA_MISC)
-/* See DP_MSA_MISC_* for the bit definitions */
-
-#define _TRANS_A_SET_CONTEXT_LATENCY 0x6007C
-#define _TRANS_B_SET_CONTEXT_LATENCY 0x6107C
-#define _TRANS_C_SET_CONTEXT_LATENCY 0x6207C
-#define _TRANS_D_SET_CONTEXT_LATENCY 0x6307C
-#define TRANS_SET_CONTEXT_LATENCY(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_A_SET_CONTEXT_LATENCY)
-#define TRANS_SET_CONTEXT_LATENCY_MASK REG_GENMASK(15, 0)
-#define TRANS_SET_CONTEXT_LATENCY_VALUE(x) REG_FIELD_PREP(TRANS_SET_CONTEXT_LATENCY_MASK, (x))
-
-/* LCPLL Control */
-#define LCPLL_CTL _MMIO(0x130040)
-#define LCPLL_PLL_DISABLE (1 << 31)
-#define LCPLL_PLL_LOCK (1 << 30)
-#define LCPLL_REF_NON_SSC (0 << 28)
-#define LCPLL_REF_BCLK (2 << 28)
-#define LCPLL_REF_PCH_SSC (3 << 28)
-#define LCPLL_REF_MASK (3 << 28)
-#define LCPLL_CLK_FREQ_MASK (3 << 26)
-#define LCPLL_CLK_FREQ_450 (0 << 26)
-#define LCPLL_CLK_FREQ_54O_BDW (1 << 26)
-#define LCPLL_CLK_FREQ_337_5_BDW (2 << 26)
-#define LCPLL_CLK_FREQ_675_BDW (3 << 26)
-#define LCPLL_CD_CLOCK_DISABLE (1 << 25)
-#define LCPLL_ROOT_CD_CLOCK_DISABLE (1 << 24)
-#define LCPLL_CD2X_CLOCK_DISABLE (1 << 23)
-#define LCPLL_POWER_DOWN_ALLOW (1 << 22)
-#define LCPLL_CD_SOURCE_FCLK (1 << 21)
-#define LCPLL_CD_SOURCE_FCLK_DONE (1 << 19)
-
-/*
- * SKL Clocks
- */
-/* CDCLK_CTL */
-#define CDCLK_CTL _MMIO(0x46000)
-#define CDCLK_FREQ_SEL_MASK REG_GENMASK(27, 26)
-#define CDCLK_FREQ_450_432 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 0)
-#define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1)
-#define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2)
-#define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3)
-#define MDCLK_SOURCE_SEL_MASK REG_GENMASK(25, 25)
-#define MDCLK_SOURCE_SEL_CD2XCLK REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 0)
-#define MDCLK_SOURCE_SEL_CDCLK_PLL REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 1)
-#define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22)
-#define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0)
-#define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1)
-#define BXT_CDCLK_CD2X_DIV_SEL_2 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 2)
-#define BXT_CDCLK_CD2X_DIV_SEL_4 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 3)
-#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
-#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
-#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
-#define ICL_CDCLK_CD2X_PIPE(pipe) (_PICK(pipe, 0, 2, 6) << 19)
-#define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19)
-#define TGL_CDCLK_CD2X_PIPE(pipe) BXT_CDCLK_CD2X_PIPE(pipe)
-#define TGL_CDCLK_CD2X_PIPE_NONE ICL_CDCLK_CD2X_PIPE_NONE
-#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
-#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
-
-/* CDCLK_SQUASH_CTL */
-#define CDCLK_SQUASH_CTL _MMIO(0x46008)
-#define CDCLK_SQUASH_ENABLE REG_BIT(31)
-#define CDCLK_SQUASH_WINDOW_SIZE_MASK REG_GENMASK(27, 24)
-#define CDCLK_SQUASH_WINDOW_SIZE(x) REG_FIELD_PREP(CDCLK_SQUASH_WINDOW_SIZE_MASK, (x))
-#define CDCLK_SQUASH_WAVEFORM_MASK REG_GENMASK(15, 0)
-#define CDCLK_SQUASH_WAVEFORM(x) REG_FIELD_PREP(CDCLK_SQUASH_WAVEFORM_MASK, (x))
-
-/* LCPLL_CTL */
-#define LCPLL1_CTL _MMIO(0x46010)
-#define LCPLL2_CTL _MMIO(0x46014)
-#define LCPLL_PLL_ENABLE (1 << 31)
-
-/* DPLL control1 */
-#define DPLL_CTRL1 _MMIO(0x6C058)
-#define DPLL_CTRL1_HDMI_MODE(id) (1 << ((id) * 6 + 5))
-#define DPLL_CTRL1_SSC(id) (1 << ((id) * 6 + 4))
-#define DPLL_CTRL1_LINK_RATE_MASK(id) (7 << ((id) * 6 + 1))
-#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id) * 6 + 1)
-#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate) << ((id) * 6 + 1))
-#define DPLL_CTRL1_OVERRIDE(id) (1 << ((id) * 6))
-#define DPLL_CTRL1_LINK_RATE_2700 0
-#define DPLL_CTRL1_LINK_RATE_1350 1
-#define DPLL_CTRL1_LINK_RATE_810 2
-#define DPLL_CTRL1_LINK_RATE_1620 3
-#define DPLL_CTRL1_LINK_RATE_1080 4
-#define DPLL_CTRL1_LINK_RATE_2160 5
-
-/* DPLL control2 */
-#define DPLL_CTRL2 _MMIO(0x6C05C)
-#define DPLL_CTRL2_DDI_CLK_OFF(port) (1 << ((port) + 15))
-#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3 << ((port) * 3 + 1))
-#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port) * 3 + 1)
-#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk) << ((port) * 3 + 1))
-#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1 << ((port) * 3))
-
-/* DPLL Status */
-#define DPLL_STATUS _MMIO(0x6C060)
-#define DPLL_LOCK(id) (1 << ((id) * 8))
-
-/* DPLL cfg */
-#define _DPLL1_CFGCR1 0x6C040
-#define _DPLL2_CFGCR1 0x6C048
-#define _DPLL3_CFGCR1 0x6C050
-#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
-#define DPLL_CFGCR1_FREQ_ENABLE (1 << 31)
-#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9)
-#define DPLL_CFGCR1_DCO_FRACTION(x) ((x) << 9)
-#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
-
-#define _DPLL1_CFGCR2 0x6C044
-#define _DPLL2_CFGCR2 0x6C04C
-#define _DPLL3_CFGCR2 0x6C054
-#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
-#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff << 8)
-#define DPLL_CFGCR2_QDIV_RATIO(x) ((x) << 8)
-#define DPLL_CFGCR2_QDIV_MODE(x) ((x) << 7)
-#define DPLL_CFGCR2_KDIV_MASK (3 << 5)
-#define DPLL_CFGCR2_KDIV(x) ((x) << 5)
-#define DPLL_CFGCR2_KDIV_5 (0 << 5)
-#define DPLL_CFGCR2_KDIV_2 (1 << 5)
-#define DPLL_CFGCR2_KDIV_3 (2 << 5)
-#define DPLL_CFGCR2_KDIV_1 (3 << 5)
-#define DPLL_CFGCR2_PDIV_MASK (7 << 2)
-#define DPLL_CFGCR2_PDIV(x) ((x) << 2)
-#define DPLL_CFGCR2_PDIV_1 (0 << 2)
-#define DPLL_CFGCR2_PDIV_2 (1 << 2)
-#define DPLL_CFGCR2_PDIV_3 (2 << 2)
-#define DPLL_CFGCR2_PDIV_7 (4 << 2)
-#define DPLL_CFGCR2_PDIV_7_INVALID (5 << 2)
-#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
-
-/* ICL Clocks */
-#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24, 4, 5))
-#define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10)
-#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < TC_PORT_4 ? \
- (tc_port) + 12 : \
- (tc_port) - TC_PORT_4 + 21))
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2)
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) _PICK(phy, 0, 2, 4, 27)
-#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) \
- (3 << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) \
- ((pll) << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-
-/*
- * DG1 Clocks
- * First registers controls the first A and B, while the second register
- * controls the phy C and D. The bits on these registers are the
- * same, but refer to different phys
- */
-#define _DG1_DPCLKA_CFGCR0 0x164280
-#define _DG1_DPCLKA1_CFGCR0 0x16C280
-#define _DG1_DPCLKA_PHY_IDX(phy) ((phy) % 2)
-#define _DG1_DPCLKA_PLL_IDX(pll) ((pll) % 2)
-#define DG1_DPCLKA_CFGCR0(phy) _MMIO_PHY((phy) / 2, \
- _DG1_DPCLKA_CFGCR0, \
- _DG1_DPCLKA1_CFGCR0)
-#define DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT(_DG1_DPCLKA_PHY_IDX(phy) + 10)
-#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) (_DG1_DPCLKA_PHY_IDX(phy) * 2)
-#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) (_DG1_DPCLKA_PLL_IDX(pll) << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (0x3 << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-
-/* ADLS Clocks */
-#define _ADLS_DPCLKA_CFGCR0 0x164280
-#define _ADLS_DPCLKA_CFGCR1 0x1642BC
-#define ADLS_DPCLKA_CFGCR(phy) _MMIO_PHY((phy) / 3, \
- _ADLS_DPCLKA_CFGCR0, \
- _ADLS_DPCLKA_CFGCR1)
-#define ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy) (((phy) % 3) * 2)
-/* ADLS DPCLKA_CFGCR0 DDI mask */
-#define ADLS_DPCLKA_DDII_SEL_MASK REG_GENMASK(5, 4)
-#define ADLS_DPCLKA_DDIB_SEL_MASK REG_GENMASK(3, 2)
-#define ADLS_DPCLKA_DDIA_SEL_MASK REG_GENMASK(1, 0)
-/* ADLS DPCLKA_CFGCR1 DDI mask */
-#define ADLS_DPCLKA_DDIK_SEL_MASK REG_GENMASK(3, 2)
-#define ADLS_DPCLKA_DDIJ_SEL_MASK REG_GENMASK(1, 0)
-#define ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy) _PICK((phy), \
- ADLS_DPCLKA_DDIA_SEL_MASK, \
- ADLS_DPCLKA_DDIB_SEL_MASK, \
- ADLS_DPCLKA_DDII_SEL_MASK, \
- ADLS_DPCLKA_DDIJ_SEL_MASK, \
- ADLS_DPCLKA_DDIK_SEL_MASK)
-
-/* ICL PLL */
-#define _DPLL0_ENABLE 0x46010
-#define _DPLL1_ENABLE 0x46014
-#define _ADLS_DPLL2_ENABLE 0x46018
-#define _ADLS_DPLL3_ENABLE 0x46030
-#define PLL_ENABLE REG_BIT(31)
-#define PLL_LOCK REG_BIT(30)
-#define PLL_POWER_ENABLE REG_BIT(27)
-#define PLL_POWER_STATE REG_BIT(26)
-#define ICL_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
- _DPLL0_ENABLE, _DPLL1_ENABLE, \
- _ADLS_DPLL3_ENABLE, _ADLS_DPLL3_ENABLE))
-
-#define _DG2_PLL3_ENABLE 0x4601C
-
-#define DG2_PLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
- _DPLL0_ENABLE, _DPLL1_ENABLE, \
- _DG2_PLL3_ENABLE, _DG2_PLL3_ENABLE))
-
-#define TBT_PLL_ENABLE _MMIO(0x46020)
-
-#define _MG_PLL1_ENABLE 0x46030
-#define _MG_PLL2_ENABLE 0x46034
-#define _MG_PLL3_ENABLE 0x46038
-#define _MG_PLL4_ENABLE 0x4603C
-/* Bits are the same as _DPLL0_ENABLE */
-#define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \
- _MG_PLL2_ENABLE)
-
-/* DG1 PLL */
-#define DG1_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _DPLL0_ENABLE, _DPLL1_ENABLE, \
- _MG_PLL1_ENABLE, _MG_PLL2_ENABLE))
-
-/* ADL-P Type C PLL */
-#define PORTTC1_PLL_ENABLE 0x46038
-#define PORTTC2_PLL_ENABLE 0x46040
-#define ADLP_PORTTC_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), \
- PORTTC1_PLL_ENABLE, \
- PORTTC2_PLL_ENABLE)
-
-#define _ICL_DPLL0_CFGCR0 0x164000
-#define _ICL_DPLL1_CFGCR0 0x164080
-#define ICL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR0, \
- _ICL_DPLL1_CFGCR0)
-#define DPLL_CFGCR0_HDMI_MODE (1 << 30)
-#define DPLL_CFGCR0_SSC_ENABLE (1 << 29)
-#define DPLL_CFGCR0_SSC_ENABLE_ICL (1 << 25)
-#define DPLL_CFGCR0_LINK_RATE_MASK (0xf << 25)
-#define DPLL_CFGCR0_LINK_RATE_2700 (0 << 25)
-#define DPLL_CFGCR0_LINK_RATE_1350 (1 << 25)
-#define DPLL_CFGCR0_LINK_RATE_810 (2 << 25)
-#define DPLL_CFGCR0_LINK_RATE_1620 (3 << 25)
-#define DPLL_CFGCR0_LINK_RATE_1080 (4 << 25)
-#define DPLL_CFGCR0_LINK_RATE_2160 (5 << 25)
-#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
-#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
-#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
-#define DPLL_CFGCR0_DCO_FRACTION_SHIFT (10)
-#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
-#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
-
-#define _ICL_DPLL0_CFGCR1 0x164004
-#define _ICL_DPLL1_CFGCR1 0x164084
-#define ICL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \
- _ICL_DPLL1_CFGCR1)
-#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
-#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
-#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
-#define DPLL_CFGCR1_QDIV_MODE_SHIFT (9)
-#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
-#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
-#define DPLL_CFGCR1_KDIV_SHIFT (6)
-#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
-#define DPLL_CFGCR1_KDIV_1 (1 << 6)
-#define DPLL_CFGCR1_KDIV_2 (2 << 6)
-#define DPLL_CFGCR1_KDIV_3 (4 << 6)
-#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
-#define DPLL_CFGCR1_PDIV_SHIFT (2)
-#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
-#define DPLL_CFGCR1_PDIV_2 (1 << 2)
-#define DPLL_CFGCR1_PDIV_3 (2 << 2)
-#define DPLL_CFGCR1_PDIV_5 (4 << 2)
-#define DPLL_CFGCR1_PDIV_7 (8 << 2)
-#define DPLL_CFGCR1_CENTRAL_FREQ (3 << 0)
-#define DPLL_CFGCR1_CENTRAL_FREQ_8400 (3 << 0)
-#define TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL (0 << 0)
-
-#define _TGL_DPLL0_CFGCR0 0x164284
-#define _TGL_DPLL1_CFGCR0 0x16428C
-#define _TGL_TBTPLL_CFGCR0 0x16429C
-#define TGL_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
- _TGL_TBTPLL_CFGCR0, _TGL_TBTPLL_CFGCR0))
-#define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \
- _TGL_DPLL1_CFGCR0)
-
-#define _TGL_DPLL0_DIV0 0x164B00
-#define _TGL_DPLL1_DIV0 0x164C00
-#define TGL_DPLL0_DIV0(pll) _MMIO_PLL(pll, _TGL_DPLL0_DIV0, _TGL_DPLL1_DIV0)
-#define TGL_DPLL0_DIV0_AFC_STARTUP_MASK REG_GENMASK(27, 25)
-#define TGL_DPLL0_DIV0_AFC_STARTUP(val) REG_FIELD_PREP(TGL_DPLL0_DIV0_AFC_STARTUP_MASK, (val))
-
-#define _TGL_DPLL0_CFGCR1 0x164288
-#define _TGL_DPLL1_CFGCR1 0x164290
-#define _TGL_TBTPLL_CFGCR1 0x1642A0
-#define TGL_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
- _TGL_TBTPLL_CFGCR1, _TGL_TBTPLL_CFGCR1))
-#define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \
- _TGL_DPLL1_CFGCR1)
-
-#define _DG1_DPLL2_CFGCR0 0x16C284
-#define _DG1_DPLL3_CFGCR0 0x16C28C
-#define DG1_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
- _DG1_DPLL2_CFGCR0, _DG1_DPLL3_CFGCR0))
-
-#define _DG1_DPLL2_CFGCR1 0x16C288
-#define _DG1_DPLL3_CFGCR1 0x16C290
-#define DG1_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
- _DG1_DPLL2_CFGCR1, _DG1_DPLL3_CFGCR1))
-
-/* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */
-#define _ADLS_DPLL4_CFGCR0 0x164294
-#define _ADLS_DPLL3_CFGCR0 0x1642C0
-#define ADLS_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
- _ADLS_DPLL4_CFGCR0, _ADLS_DPLL3_CFGCR0))
-
-#define _ADLS_DPLL4_CFGCR1 0x164298
-#define _ADLS_DPLL3_CFGCR1 0x1642C4
-#define ADLS_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
- _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
- _ADLS_DPLL4_CFGCR1, _ADLS_DPLL3_CFGCR1))
-
-/* BXT display engine PLL */
-#define BXT_DE_PLL_CTL _MMIO(0x6d000)
-#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
-#define BXT_DE_PLL_RATIO_MASK 0xff
-
-#define BXT_DE_PLL_ENABLE _MMIO(0x46070)
-#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
-#define BXT_DE_PLL_LOCK (1 << 30)
-#define BXT_DE_PLL_FREQ_REQ (1 << 23)
-#define BXT_DE_PLL_FREQ_REQ_ACK (1 << 22)
-#define ICL_CDCLK_PLL_RATIO(x) (x)
-#define ICL_CDCLK_PLL_RATIO_MASK 0xff
-
-/* GEN9 DC */
-#define DC_STATE_EN _MMIO(0x45504)
-#define DC_STATE_DISABLE 0
-#define DC_STATE_EN_DC3CO REG_BIT(30)
-#define DC_STATE_DC3CO_STATUS REG_BIT(29)
-#define HOLD_PHY_CLKREQ_PG1_LATCH REG_BIT(21)
-#define HOLD_PHY_PG1_LATCH REG_BIT(20)
-#define DC_STATE_EN_UPTO_DC5 (1 << 0)
-#define DC_STATE_EN_DC9 (1 << 3)
-#define DC_STATE_EN_UPTO_DC6 (2 << 0)
-#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
-
-#define DC_STATE_DEBUG _MMIO(0x45520)
-#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
-#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
-
-#define D_COMP_BDW _MMIO(0x138144)
-
-/* Pipe WM_LINETIME - watermark line time */
-#define _WM_LINETIME_A 0x45270
-#define _WM_LINETIME_B 0x45274
-#define WM_LINETIME(pipe) _MMIO_PIPE(pipe, _WM_LINETIME_A, _WM_LINETIME_B)
-#define HSW_LINETIME_MASK REG_GENMASK(8, 0)
-#define HSW_LINETIME(x) REG_FIELD_PREP(HSW_LINETIME_MASK, (x))
-#define HSW_IPS_LINETIME_MASK REG_GENMASK(24, 16)
-#define HSW_IPS_LINETIME(x) REG_FIELD_PREP(HSW_IPS_LINETIME_MASK, (x))
-
-/* SFUSE_STRAP */
-#define SFUSE_STRAP _MMIO(0xc2014)
-#define SFUSE_STRAP_FUSE_LOCK (1 << 13)
-#define SFUSE_STRAP_RAW_FREQUENCY (1 << 8)
-#define SFUSE_STRAP_DISPLAY_DISABLED (1 << 7)
-#define SFUSE_STRAP_CRT_DISABLED (1 << 6)
-#define SFUSE_STRAP_DDIF_DETECTED (1 << 3)
-#define SFUSE_STRAP_DDIB_DETECTED (1 << 2)
-#define SFUSE_STRAP_DDIC_DETECTED (1 << 1)
-#define SFUSE_STRAP_DDID_DETECTED (1 << 0)
-
-/* Gen4+ Timestamp and Pipe Frame time stamp registers */
-#define GEN4_TIMESTAMP _MMIO(0x2358)
-#define ILK_TIMESTAMP_HI _MMIO(0x70070)
-#define IVB_TIMESTAMP_CTR _MMIO(0x44070)
-
#define GEN9_TIMESTAMP_OVERRIDE _MMIO(0x44074)
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT 0
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK 0x3ff
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT 12
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK (0xf << 12)
-/* g4x+, except vlv/chv! */
-#define _PIPE_FRMTMSTMP_A 0x70048
-#define _PIPE_FRMTMSTMP_B 0x71048
-#define PIPE_FRMTMSTMP(pipe) \
- _MMIO_PIPE(pipe, _PIPE_FRMTMSTMP_A, _PIPE_FRMTMSTMP_B)
-
-/* g4x+, except vlv/chv! */
-#define _PIPE_FLIPTMSTMP_A 0x7004C
-#define _PIPE_FLIPTMSTMP_B 0x7104C
-#define PIPE_FLIPTMSTMP(pipe) \
- _MMIO_PIPE(pipe, _PIPE_FLIPTMSTMP_A, _PIPE_FLIPTMSTMP_B)
-
-/* tgl+ */
-#define _PIPE_FLIPDONETMSTMP_A 0x70054
-#define _PIPE_FLIPDONETMSTMP_B 0x71054
-#define PIPE_FLIPDONETIMSTMP(pipe) \
- _MMIO_PIPE(pipe, _PIPE_FLIPDONETMSTMP_A, _PIPE_FLIPDONETMSTMP_B)
-
-#define _VLV_PIPE_MSA_MISC_A 0x70048
-#define VLV_PIPE_MSA_MISC(__display, pipe) \
- _MMIO_PIPE2(__display, pipe, _VLV_PIPE_MSA_MISC_A)
-#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31)
-#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */
-
#define GGC _MMIO(0x108040)
#define GMS_MASK REG_GENMASK(15, 8)
#define GGMS_MASK REG_GENMASK(7, 6)
@@ -4133,45 +1223,6 @@ enum skl_power_gate {
#define SGGI_DIS REG_BIT(15)
#define SGR_DIS REG_BIT(13)
-#define _ICL_PHY_MISC_A 0x64C00
-#define _ICL_PHY_MISC_B 0x64C04
-#define _DG2_PHY_MISC_TC1 0x64C14 /* TC1="PHY E" but offset as if "PHY F" */
-#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, _ICL_PHY_MISC_B)
-#define DG2_PHY_MISC(port) ((port) == PHY_E ? _MMIO(_DG2_PHY_MISC_TC1) : \
- ICL_PHY_MISC(port))
-#define ICL_PHY_MISC_MUX_DDID (1 << 28)
-#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
-#define DG2_PHY_DP_TX_ACK_MASK REG_GENMASK(23, 20)
-
-#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
-#define MODULAR_FIA_MASK (1 << 4)
-#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6))
-#define TC_LIVE_STATE_TC(idx) (1 << ((idx) * 8 + 5))
-#define DP_LANE_ASSIGNMENT_SHIFT(idx) ((idx) * 8)
-#define DP_LANE_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 8))
-#define DP_LANE_ASSIGNMENT(idx, x) ((x) << ((idx) * 8))
-
-#define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890)
-#define DP_PHY_MODE_STATUS_COMPLETED(idx) (1 << (idx))
-
-#define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894)
-#define DP_PHY_MODE_STATUS_NOT_SAFE(idx) (1 << (idx))
-
-#define PORT_TX_DFLEXPA1(fia) _MMIO_FIA((fia), 0x00880)
-#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4)
-#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
-#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
-
-#define _TCSS_DDI_STATUS_1 0x161500
-#define _TCSS_DDI_STATUS_2 0x161504
-#define TCSS_DDI_STATUS(tc) _MMIO(_PICK_EVEN(tc, \
- _TCSS_DDI_STATUS_1, \
- _TCSS_DDI_STATUS_2))
-#define TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK REG_GENMASK(28, 25)
-#define TCSS_DDI_STATUS_READY REG_BIT(2)
-#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT REG_BIT(1)
-#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT REG_BIT(0)
-
#define PRIMARY_SPI_TRIGGER _MMIO(0x102040)
#define PRIMARY_SPI_ADDRESS _MMIO(0x102080)
#define PRIMARY_SPI_REGIONID _MMIO(0x102084)
@@ -4180,37 +1231,11 @@ enum skl_power_gate {
#define OROM_OFFSET _MMIO(0x1020c0)
#define OROM_OFFSET_MASK REG_GENMASK(20, 16)
-#define CLKREQ_POLICY _MMIO(0x101038)
-#define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1)
-
-#define CLKGATE_DIS_MISC _MMIO(0x46534)
-#define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21)
-
-#define _MTL_CLKGATE_DIS_TRANS_A 0x604E8
-#define _MTL_CLKGATE_DIS_TRANS_B 0x614E8
-#define MTL_CLKGATE_DIS_TRANS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A)
-#define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7)
-
-#define _MTL_PIPE_CLKGATE_DIS2_A 0x60114
-#define _MTL_PIPE_CLKGATE_DIS2_B 0x61114
-#define MTL_PIPE_CLKGATE_DIS2(pipe) _MMIO_PIPE(pipe, _MTL_PIPE_CLKGATE_DIS2_A, _MTL_PIPE_CLKGATE_DIS2_B)
-#define MTL_DPFC_GATING_DIS REG_BIT(6)
-
#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
#define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0)
-#define MTL_MEM_SS_INFO_QGV_POINT_OFFSET 0x45710
-#define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8)
-#define MTL_TRCD_MASK REG_GENMASK(31, 24)
-#define MTL_TRP_MASK REG_GENMASK(23, 16)
-#define MTL_DCLK_MASK REG_GENMASK(15, 0)
-
-#define MTL_MEM_SS_INFO_QGV_POINT_HIGH(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8 + 4)
-#define MTL_TRAS_MASK REG_GENMASK(16, 8)
-#define MTL_TRDPRE_MASK REG_GENMASK(7, 0)
-
#define MTL_MEDIA_GSI_BASE 0x380000
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c3d27eadc0a7..b9a2b2194c8f 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -2184,7 +2184,7 @@ void i915_request_show(struct drm_printer *m,
const char *prefix,
int indent)
{
- const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
+ const char __rcu *timeline;
char buf[80] = "";
int x = 0;
@@ -2220,6 +2220,8 @@ void i915_request_show(struct drm_printer *m,
x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
+ rcu_read_lock();
+ timeline = dma_fence_timeline_name((struct dma_fence *)&rq->fence);
drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
prefix, indent, " ",
queue_status(rq),
@@ -2228,7 +2230,8 @@ void i915_request_show(struct drm_printer *m,
fence_status(rq),
buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
- name);
+ rcu_dereference(timeline));
+ rcu_read_unlock();
}
static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index a4902ee08b6e..73e89b168fc3 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -430,16 +430,22 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
struct i915_sw_dma_fence_cb_timer *cb = timer_container_of(cb, t,
timer);
struct i915_sw_fence *fence;
+ const char __rcu *timeline;
+ const char __rcu *driver;
fence = xchg(&cb->base.fence, NULL);
if (!fence)
return;
+ rcu_read_lock();
+ driver = dma_fence_driver_name(cb->dma);
+ timeline = dma_fence_timeline_name(cb->dma);
pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%ps)\n",
- cb->dma->ops->get_driver_name(cb->dma),
- cb->dma->ops->get_timeline_name(cb->dma),
+ rcu_dereference(driver),
+ rcu_dereference(timeline),
cb->dma->seqno,
i915_sw_fence_debug_hint(fence));
+ rcu_read_unlock();
i915_sw_fence_set_error_once(fence, -ETIMEDOUT);
i915_sw_fence_complete(fence);
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index f45bd6b6cede..4c02a04be681 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -5,6 +5,8 @@
#include <linux/vga_switcheroo.h>
+#include "display/intel_display_core.h"
+
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_switcheroo.h"
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 632e316f8b05..25e97031d76e 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1607,6 +1607,26 @@ err_rpm:
return err;
}
+int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(vma->obj, &ww);
+ if (!err)
+ err = i915_vma_pin_ww(vma, &ww, size, alignment, flags);
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ return err;
+}
+
static void flush_idle_contexts(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 6a6be8048aa8..0f9eee6d18d2 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -289,26 +289,8 @@ int __must_check
i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u64 size, u64 alignment, u64 flags);
-static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- struct i915_gem_ww_ctx ww;
- int err;
-
- i915_gem_ww_ctx_init(&ww, true);
-retry:
- err = i915_gem_object_lock(vma->obj, &ww);
- if (!err)
- err = i915_vma_pin_ww(vma, &ww, size, alignment, flags);
- if (err == -EDEADLK) {
- err = i915_gem_ww_ctx_backoff(&ww);
- if (!err)
- goto retry;
- }
- i915_gem_ww_ctx_fini(&ww);
-
- return err;
-}
+int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u32 align, unsigned int flags);
@@ -353,6 +335,11 @@ static inline bool i915_node_color_differs(const struct drm_mm_node *node,
return drm_mm_node_allocated(node) && node->color != color;
}
+static inline void __iomem *i915_vma_get_iomap(struct i915_vma *vma)
+{
+ return READ_ONCE(vma->iomap);
+}
+
/**
* i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
* @vma: VMA to iomap
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index 387b26400169..f86a3629ae9e 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -27,6 +27,7 @@
#include "display/i9xx_plane_regs.h"
#include "display/intel_display.h"
+#include "display/intel_display_core.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt.h"
@@ -37,7 +38,7 @@
#include "i915_reg.h"
#include "intel_clock_gating.h"
#include "intel_mchbar_regs.h"
-#include "vlv_sideband.h"
+#include "vlv_iosf_sb.h"
struct drm_i915_clock_gating_funcs {
void (*init_clock_gating)(struct drm_i915_private *i915);
@@ -201,6 +202,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *i915)
static void cpt_init_clock_gating(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
enum pipe pipe;
u32 val;
@@ -220,7 +222,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *i915)
val = intel_uncore_read(&i915->uncore, TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
- if (i915->display.vbt.fdi_rx_polarity_inverted)
+ if (display->vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index d581a9d2c063..87ac4446d306 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -11,6 +11,7 @@
#include "display/intel_color_regs.h"
#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
+#include "display/intel_display_core.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
@@ -20,6 +21,7 @@
#include "display/intel_lvds_regs.h"
#include "display/intel_pfit_regs.h"
#include "display/intel_psr_regs.h"
+#include "display/intel_sbi_regs.h"
#include "display/intel_sprite_regs.h"
#include "display/intel_vga_regs.h"
#include "display/skl_universal_plane_regs.h"
@@ -32,6 +34,7 @@
#include "i915_drv.h"
#include "i915_pvinfo.h"
#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "intel_gvt.h"
#include "intel_mchbar_regs.h"
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
index 3db2ba439bb5..81da75108c60 100644
--- a/drivers/gpu/drm/i915/intel_pcode.c
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -110,13 +110,12 @@ int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
}
int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
- int fast_timeout_us, int slow_timeout_ms)
+ int timeout_ms)
{
int err;
mutex_lock(&uncore->i915->sb_lock);
- err = __snb_pcode_rw(uncore, mbox, &val, NULL,
- fast_timeout_us, slow_timeout_ms, false);
+ err = __snb_pcode_rw(uncore, mbox, &val, NULL, 250, timeout_ms, false);
mutex_unlock(&uncore->i915->sb_lock);
if (err) {
@@ -273,3 +272,27 @@ int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u3
return err;
}
+
+/* Helpers with drm device */
+int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return snb_pcode_read(&i915->uncore, mbox, val, val1);
+}
+
+int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return snb_pcode_write_timeout(&i915->uncore, mbox, val, timeout_ms);
+}
+
+int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return skl_pcode_request(&i915->uncore, mbox, request, reply_mask, reply,
+ timeout_base_ms);
+}
diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h
index 8d2198e29422..c91a821a88d4 100644
--- a/drivers/gpu/drm/i915/intel_pcode.h
+++ b/drivers/gpu/drm/i915/intel_pcode.h
@@ -8,13 +8,13 @@
#include <linux/types.h>
+struct drm_device;
struct intel_uncore;
int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1);
-int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
- int fast_timeout_us, int slow_timeout_ms);
+int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val, int timeout_ms);
#define snb_pcode_write(uncore, mbox, val) \
- snb_pcode_write_timeout(uncore, mbox, val, 500, 0)
+ snb_pcode_write_timeout((uncore), (mbox), (val), 1)
int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms);
@@ -27,4 +27,13 @@ int intel_pcode_init(struct intel_uncore *uncore);
int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val);
int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val);
+/* Helpers with drm device */
+int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1);
+int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms);
+#define intel_pcode_write(drm, mbox, val) \
+ intel_pcode_write_timeout((drm), (mbox), (val), 1)
+
+int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
+
#endif /* _INTEL_PCODE_H */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8d9f4c410546..7ce3e6de0c19 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -59,7 +59,9 @@ static struct drm_i915_private *rpm_to_i915(struct intel_runtime_pm *rpm)
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT, dev_name(rpm->kdev));
+ if (!rpm->debug.class)
+ ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT,
+ "intel_runtime_pm");
}
static intel_wakeref_t
diff --git a/drivers/gpu/drm/i915/intel_sbi.c b/drivers/gpu/drm/i915/intel_sbi.c
deleted file mode 100644
index 41e85ac773dc..000000000000
--- a/drivers/gpu/drm/i915/intel_sbi.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2013-2021 Intel Corporation
- *
- * LPT/WPT IOSF sideband.
- */
-
-#include "i915_drv.h"
-#include "intel_sbi.h"
-#include "i915_reg.h"
-
-/* SBI access */
-static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
- enum intel_sbi_destination destination,
- u32 *val, bool is_read)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u32 cmd;
-
- lockdep_assert_held(&i915->sbi_lock);
-
- if (intel_wait_for_register_fw(uncore,
- SBI_CTL_STAT, SBI_BUSY, 0,
- 100)) {
- drm_err(&i915->drm,
- "timeout waiting for SBI to become ready\n");
- return -EBUSY;
- }
-
- intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16);
- intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val);
-
- if (destination == SBI_ICLK)
- cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
- else
- cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
- if (!is_read)
- cmd |= BIT(8);
- intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY);
-
- if (__intel_wait_for_register_fw(uncore,
- SBI_CTL_STAT, SBI_BUSY, 0,
- 100, 100, &cmd)) {
- drm_err(&i915->drm,
- "timeout waiting for SBI to complete read\n");
- return -ETIMEDOUT;
- }
-
- if (cmd & SBI_RESPONSE_FAIL) {
- drm_err(&i915->drm, "error during SBI read of reg %x\n", reg);
- return -ENXIO;
- }
-
- if (is_read)
- *val = intel_uncore_read_fw(uncore, SBI_DATA);
-
- return 0;
-}
-
-void intel_sbi_lock(struct drm_i915_private *i915)
-{
- mutex_lock(&i915->sbi_lock);
-}
-
-void intel_sbi_unlock(struct drm_i915_private *i915)
-{
- mutex_unlock(&i915->sbi_lock);
-}
-
-u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
- enum intel_sbi_destination destination)
-{
- u32 result = 0;
-
- intel_sbi_rw(i915, reg, destination, &result, true);
-
- return result;
-}
-
-void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
- enum intel_sbi_destination destination)
-{
- intel_sbi_rw(i915, reg, destination, &value, false);
-}
-
-void intel_sbi_init(struct drm_i915_private *i915)
-{
- mutex_init(&i915->sbi_lock);
-}
-
-void intel_sbi_fini(struct drm_i915_private *i915)
-{
- mutex_destroy(&i915->sbi_lock);
-}
diff --git a/drivers/gpu/drm/i915/intel_sbi.h b/drivers/gpu/drm/i915/intel_sbi.h
deleted file mode 100644
index 85161a4f13b8..000000000000
--- a/drivers/gpu/drm/i915/intel_sbi.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2013-2021 Intel Corporation
- */
-
-#ifndef _INTEL_SBI_H_
-#define _INTEL_SBI_H_
-
-#include <linux/types.h>
-
-struct drm_i915_private;
-
-enum intel_sbi_destination {
- SBI_ICLK,
- SBI_MPHY,
-};
-
-void intel_sbi_init(struct drm_i915_private *i915);
-void intel_sbi_fini(struct drm_i915_private *i915);
-void intel_sbi_lock(struct drm_i915_private *i915);
-void intel_sbi_unlock(struct drm_i915_private *i915);
-u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
- enum intel_sbi_destination destination);
-void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
- enum intel_sbi_destination destination);
-
-#endif /* _INTEL_SBI_H_ */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 48a10ff80148..c8e29fd72290 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -24,6 +24,8 @@
#include <drm/drm_managed.h>
#include <linux/pm_runtime.h>
+#include "display/intel_display_core.h"
+
#include "gt/intel_gt.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt_regs.h"
@@ -2642,7 +2644,7 @@ static void driver_initiated_flr(struct intel_uncore *uncore)
* is still pending (unless the HW is totally dead), but better to be
* safe in case something unexpected happens
*/
- ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms);
+ ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms, NULL);
if (ret) {
drm_err(&i915->drm,
"Failed to wait for Driver-FLR bit to clear! %d\n",
@@ -2657,7 +2659,7 @@ static void driver_initiated_flr(struct intel_uncore *uncore)
/* Wait for hardware teardown to complete */
ret = intel_wait_for_register_fw(uncore, GU_CNTL,
DRIVERFLR, 0,
- flr_timeout_ms);
+ flr_timeout_ms, NULL);
if (ret) {
drm_err(&i915->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
return;
@@ -2666,7 +2668,7 @@ static void driver_initiated_flr(struct intel_uncore *uncore)
/* Wait for hardware/firmware re-init to complete */
ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
DRIVERFLR_STATUS, DRIVERFLR_STATUS,
- flr_timeout_ms);
+ flr_timeout_ms, NULL);
if (ret) {
drm_err(&i915->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index e39582950627..6048b99b96cb 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -313,10 +313,11 @@ intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
- unsigned int timeout_ms)
+ unsigned int timeout_ms,
+ u32 *out_value)
{
return __intel_wait_for_register_fw(uncore, reg, mask, value,
- 2, timeout_ms, NULL);
+ 2, timeout_ms, out_value);
}
#define IS_GSI_REG(reg) ((reg) < 0x40000)
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 51561b190b93..7fa194de5d35 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -114,7 +114,8 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
"wakeref.work", &key->work, 0);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
- ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, name);
+ if (!wf->debug.class)
+ ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, "intel_wakeref");
#endif
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index f08f6674911e..7b856b5090f9 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -413,15 +413,8 @@ static int igt_mock_splintered_region(void *arg)
close_objects(mem, &objects);
- /*
- * While we should be able allocate everything without any flag
- * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are
- * actually limited to the largest power-of-two for the region size i.e
- * max_order, due to the inner workings of the buddy allocator. So make
- * sure that does indeed hold true.
- */
-
- obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
+ obj = igt_object_create(mem, &objects, roundup_pow_of_two(size),
+ I915_BO_ALLOC_CONTIGUOUS);
if (!IS_ERR(obj)) {
pr_err("%s too large contiguous allocation was not rejected\n",
__func__);
@@ -429,8 +422,7 @@ static int igt_mock_splintered_region(void *arg)
goto out_close;
}
- obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
- I915_BO_ALLOC_CONTIGUOUS);
+ obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
if (IS_ERR(obj)) {
pr_err("%s largest possible contiguous allocation failed\n",
__func__);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index a77e5b26542c..fb8751bd5df0 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -28,6 +28,8 @@
#include <drm/drm_managed.h>
+#include "display/intel_display_device.h"
+
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h"
@@ -141,6 +143,7 @@ struct drm_i915_private *mock_gem_device(void)
static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
#endif
struct drm_i915_private *i915;
+ struct intel_display *display;
struct pci_dev *pdev;
int ret;
@@ -180,7 +183,11 @@ struct drm_i915_private *mock_gem_device(void)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, &mock_info);
- intel_display_device_probe(pdev);
+ display = intel_display_device_probe(pdev);
+ if (IS_ERR(display))
+ goto err_device;
+
+ i915->display = display;
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
@@ -257,6 +264,7 @@ err_ttm:
intel_gt_driver_late_release_all(i915);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
+err_device:
mock_destroy_device(i915);
return NULL;
@@ -266,6 +274,8 @@ void mock_destroy_device(struct drm_i915_private *i915)
{
struct device *dev = i915->drm.dev;
+ intel_display_device_remove(i915->display);
+
devres_release_group(dev, NULL);
put_device(dev);
}
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index eee5c4f45a43..deb159548a09 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -5,12 +5,17 @@
#include <linux/string_helpers.h>
+#include <drm/drm_managed.h>
+
+#include "../display/intel_display_core.h" /* FIXME */
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
-#include "vlv_sideband.h"
+#include "intel_uncore.h"
+#include "vlv_iosf_sb.h"
struct dram_dimm_info {
u16 size;
@@ -97,9 +102,9 @@ static unsigned int chv_mem_freq(struct drm_i915_private *i915)
{
u32 val;
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK));
- val = vlv_cck_read(i915, CCK_FUSE_REG);
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK));
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_CCK));
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_CCK, CCK_FUSE_REG);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_CCK));
switch ((val >> 2) & 0x7) {
case 3:
@@ -113,9 +118,9 @@ static unsigned int vlv_mem_freq(struct drm_i915_private *i915)
{
u32 val;
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT));
- val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT));
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
switch ((val >> 6) & 3) {
case 0:
@@ -381,9 +386,8 @@ intel_is_dram_symmetric(const struct dram_channel_info *ch0,
}
static int
-skl_dram_get_channels_info(struct drm_i915_private *i915)
+skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- struct dram_info *dram_info = &i915->dram_info;
struct dram_channel_info ch0 = {}, ch1 = {};
u32 val;
int ret;
@@ -444,14 +448,13 @@ skl_get_dram_type(struct drm_i915_private *i915)
}
static int
-skl_get_dram_info(struct drm_i915_private *i915)
+skl_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- struct dram_info *dram_info = &i915->dram_info;
int ret;
dram_info->type = skl_get_dram_type(i915);
- ret = skl_dram_get_channels_info(i915);
+ ret = skl_dram_get_channels_info(i915, dram_info);
if (ret)
return ret;
@@ -536,9 +539,8 @@ static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm);
}
-static int bxt_get_dram_info(struct drm_i915_private *i915)
+static int bxt_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- struct dram_info *dram_info = &i915->dram_info;
u32 val;
u8 valid_ranks = 0;
int i;
@@ -583,14 +585,14 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
return 0;
}
-static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
+static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
+ struct dram_info *dram_info)
{
- struct dram_info *dram_info = &dev_priv->dram_info;
u32 val = 0;
int ret;
- ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
+ ret = intel_pcode_read(&dev_priv->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
if (ret)
return ret;
@@ -645,27 +647,26 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
return 0;
}
-static int gen11_get_dram_info(struct drm_i915_private *i915)
+static int gen11_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- int ret = skl_get_dram_info(i915);
+ int ret = skl_get_dram_info(i915, dram_info);
if (ret)
return ret;
- return icl_pcode_read_mem_global_info(i915);
+ return icl_pcode_read_mem_global_info(i915, dram_info);
}
-static int gen12_get_dram_info(struct drm_i915_private *i915)
+static int gen12_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- i915->dram_info.wm_lv_0_adjust_needed = false;
+ dram_info->wm_lv_0_adjust_needed = false;
- return icl_pcode_read_mem_global_info(i915);
+ return icl_pcode_read_mem_global_info(i915, dram_info);
}
-static int xelpdp_get_dram_info(struct drm_i915_private *i915)
+static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL);
- struct dram_info *dram_info = &i915->dram_info;
switch (REG_FIELD_GET(MTL_DDR_TYPE_MASK, val)) {
case 0:
@@ -706,16 +707,22 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915)
return 0;
}
-void intel_dram_detect(struct drm_i915_private *i915)
+int intel_dram_detect(struct drm_i915_private *i915)
{
- struct dram_info *dram_info = &i915->dram_info;
+ struct dram_info *dram_info;
int ret;
detect_fsb_freq(i915);
detect_mem_freq(i915);
if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915))
- return;
+ return 0;
+
+ dram_info = drmm_kzalloc(&i915->drm, sizeof(*dram_info), GFP_KERNEL);
+ if (!dram_info)
+ return -ENOMEM;
+
+ i915->dram_info = dram_info;
/*
* Assume level 0 watermark latency adjustment is needed until proven
@@ -724,21 +731,22 @@ void intel_dram_detect(struct drm_i915_private *i915)
dram_info->wm_lv_0_adjust_needed = !IS_BROXTON(i915) && !IS_GEMINILAKE(i915);
if (DISPLAY_VER(i915) >= 14)
- ret = xelpdp_get_dram_info(i915);
+ ret = xelpdp_get_dram_info(i915, dram_info);
else if (GRAPHICS_VER(i915) >= 12)
- ret = gen12_get_dram_info(i915);
+ ret = gen12_get_dram_info(i915, dram_info);
else if (GRAPHICS_VER(i915) >= 11)
- ret = gen11_get_dram_info(i915);
+ ret = gen11_get_dram_info(i915, dram_info);
else if (IS_BROXTON(i915) || IS_GEMINILAKE(i915))
- ret = bxt_get_dram_info(i915);
+ ret = bxt_get_dram_info(i915, dram_info);
else
- ret = skl_get_dram_info(i915);
+ ret = skl_get_dram_info(i915, dram_info);
drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
intel_dram_type_str(dram_info->type));
+ /* TODO: Do we want to abort probe on dram detection failures? */
if (ret)
- return;
+ return 0;
drm_dbg_kms(&i915->drm, "Num qgv points %u\n", dram_info->num_qgv_points);
@@ -746,6 +754,20 @@ void intel_dram_detect(struct drm_i915_private *i915)
drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
str_yes_no(dram_info->wm_lv_0_adjust_needed));
+
+ return 0;
+}
+
+/*
+ * Returns NULL for platforms that don't have dram info. Avoid overzealous NULL
+ * checks, and prefer not dereferencing on platforms that shouldn't look at dram
+ * info, to catch accidental and incorrect dram info checks.
+ */
+const struct dram_info *intel_dram_info(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return i915->dram_info;
}
static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.h b/drivers/gpu/drm/i915/soc/intel_dram.h
index a10136eda674..2a696e03aad4 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.h
+++ b/drivers/gpu/drm/i915/soc/intel_dram.h
@@ -6,10 +6,34 @@
#ifndef __INTEL_DRAM_H__
#define __INTEL_DRAM_H__
+#include <linux/types.h>
+
struct drm_i915_private;
+struct drm_device;
+
+struct dram_info {
+ bool wm_lv_0_adjust_needed;
+ u8 num_channels;
+ bool symmetric_memory;
+ enum intel_dram_type {
+ INTEL_DRAM_UNKNOWN,
+ INTEL_DRAM_DDR3,
+ INTEL_DRAM_DDR4,
+ INTEL_DRAM_LPDDR3,
+ INTEL_DRAM_LPDDR4,
+ INTEL_DRAM_DDR5,
+ INTEL_DRAM_LPDDR5,
+ INTEL_DRAM_GDDR,
+ INTEL_DRAM_GDDR_ECC,
+ __INTEL_DRAM_TYPE_MAX,
+ } type;
+ u8 num_qgv_points;
+ u8 num_psf_gv_points;
+};
void intel_dram_edram_detect(struct drm_i915_private *i915);
-void intel_dram_detect(struct drm_i915_private *i915);
+int intel_dram_detect(struct drm_i915_private *i915);
unsigned int i9xx_fsb_freq(struct drm_i915_private *i915);
+const struct dram_info *intel_dram_info(struct drm_device *drm);
#endif /* __INTEL_DRAM_H__ */
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c
index 734e9f2801ea..5346b8dda79a 100644
--- a/drivers/gpu/drm/i915/soc/intel_gmch.c
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.c
@@ -10,6 +10,8 @@
#include <drm/drm_managed.h>
#include <drm/intel/i915_drm.h>
+#include "../display/intel_display_core.h" /* FIXME */
+
#include "i915_drv.h"
#include "intel_gmch.h"
#include "intel_pci_config.h"
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_iosf_sb.c
index 114ae8eb9cd5..f4b386933141 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.c
+++ b/drivers/gpu/drm/i915/vlv_iosf_sb.c
@@ -6,9 +6,7 @@
#include "i915_drv.h"
#include "i915_iosf_mbi.h"
#include "i915_reg.h"
-#include "vlv_sideband.h"
-
-#include "display/intel_dpio_phy.h"
+#include "vlv_iosf_sb.h"
/*
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
@@ -57,19 +55,29 @@ static void __vlv_punit_put(struct drm_i915_private *i915)
iosf_mbi_punit_release();
}
-void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
+void vlv_iosf_sb_get(struct drm_device *drm, unsigned long unit_mask)
{
- if (ports & BIT(VLV_IOSF_SB_PUNIT))
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ if (unit_mask & BIT(VLV_IOSF_SB_PUNIT))
__vlv_punit_get(i915);
mutex_lock(&i915->vlv_iosf_sb.lock);
+
+ i915->vlv_iosf_sb.locked_unit_mask |= unit_mask;
}
-void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
+void vlv_iosf_sb_put(struct drm_device *drm, unsigned long unit_mask)
{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ i915->vlv_iosf_sb.locked_unit_mask &= ~unit_mask;
+
+ drm_WARN_ON(drm, i915->vlv_iosf_sb.locked_unit_mask);
+
mutex_unlock(&i915->vlv_iosf_sb.lock);
- if (ports & BIT(VLV_IOSF_SB_PUNIT))
+ if (unit_mask & BIT(VLV_IOSF_SB_PUNIT))
__vlv_punit_put(i915);
}
@@ -123,131 +131,83 @@ static int vlv_sideband_rw(struct drm_i915_private *i915,
return err;
}
-u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
- SB_CRRDDA_NP, addr, &val);
-
- return val;
-}
-
-int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
-{
- return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
- SB_CRWRDA_NP, addr, &val);
-}
-
-u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
-}
-
-void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
+static u32 unit_to_devfn(enum vlv_iosf_sb_unit unit)
{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
- SB_CRWRDA_NP, reg, &val);
-}
-
-u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC,
- SB_CRRDDA_NP, addr, &val);
-
- return val;
-}
-
-u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
+ if (unit == VLV_IOSF_SB_DPIO || unit == VLV_IOSF_SB_DPIO_2 ||
+ unit == VLV_IOSF_SB_FLISDSI)
+ return DPIO_DEVFN;
+ else
+ return PCI_DEVFN(0, 0);
}
-void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
+static u32 unit_to_port(enum vlv_iosf_sb_unit unit)
{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
- SB_CRWRDA_NP, reg, &val);
+ switch (unit) {
+ case VLV_IOSF_SB_BUNIT:
+ return IOSF_PORT_BUNIT;
+ case VLV_IOSF_SB_CCK:
+ return IOSF_PORT_CCK;
+ case VLV_IOSF_SB_CCU:
+ return IOSF_PORT_CCU;
+ case VLV_IOSF_SB_DPIO:
+ return IOSF_PORT_DPIO;
+ case VLV_IOSF_SB_DPIO_2:
+ return IOSF_PORT_DPIO_2;
+ case VLV_IOSF_SB_FLISDSI:
+ return IOSF_PORT_FLISDSI;
+ case VLV_IOSF_SB_GPIO:
+ return 0; /* FIXME: unused */
+ case VLV_IOSF_SB_NC:
+ return IOSF_PORT_NC;
+ case VLV_IOSF_SB_PUNIT:
+ return IOSF_PORT_PUNIT;
+ default:
+ return 0;
+ }
}
-u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
+static u32 unit_to_opcode(enum vlv_iosf_sb_unit unit, bool write)
{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
+ if (unit == VLV_IOSF_SB_DPIO || unit == VLV_IOSF_SB_DPIO_2)
+ return write ? SB_MWR_NP : SB_MRD_NP;
+ else
+ return write ? SB_CRWRDA_NP : SB_CRRDDA_NP;
}
-void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
+u32 vlv_iosf_sb_read(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr)
{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
- SB_CRWRDA_NP, reg, &val);
-}
+ struct drm_i915_private *i915 = to_i915(drm);
+ u32 devfn, port, opcode, val = 0;
-static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy phy)
-{
- /*
- * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D)
- * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C)
- */
- if (IS_CHERRYVIEW(i915))
- return phy == DPIO_PHY0 ? IOSF_PORT_DPIO_2 : IOSF_PORT_DPIO;
- else
- return IOSF_PORT_DPIO;
-}
+ devfn = unit_to_devfn(unit);
+ port = unit_to_port(unit);
+ opcode = unit_to_opcode(unit, false);
-u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg)
-{
- u32 port = vlv_dpio_phy_iosf_port(i915, phy);
- u32 val = 0;
+ if (drm_WARN_ONCE(&i915->drm, !port, "invalid unit %d\n", unit))
+ return 0;
- vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
+ drm_WARN_ON(&i915->drm, !(i915->vlv_iosf_sb.locked_unit_mask & BIT(unit)));
- /*
- * FIXME: There might be some registers where all 1's is a valid value,
- * so ideally we should check the register offset instead...
- */
- drm_WARN(&i915->drm, val == 0xffffffff,
- "DPIO PHY%d read reg 0x%x == 0x%x\n",
- phy, reg, val);
+ vlv_sideband_rw(i915, devfn, port, opcode, addr, &val);
return val;
}
-void vlv_dpio_write(struct drm_i915_private *i915,
- enum dpio_phy phy, int reg, u32 val)
+int vlv_iosf_sb_write(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr, u32 val)
{
- u32 port = vlv_dpio_phy_iosf_port(i915, phy);
+ struct drm_i915_private *i915 = to_i915(drm);
+ u32 devfn, port, opcode;
- vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
-}
+ devfn = unit_to_devfn(unit);
+ port = unit_to_port(unit);
+ opcode = unit_to_opcode(unit, true);
-u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
+ if (drm_WARN_ONCE(&i915->drm, !port, "invalid unit %d\n", unit))
+ return -EINVAL;
- vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
- reg, &val);
- return val;
-}
+ drm_WARN_ON(&i915->drm, !(i915->vlv_iosf_sb.locked_unit_mask & BIT(unit)));
-void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
- reg, &val);
+ return vlv_sideband_rw(i915, devfn, port, opcode, addr, &val);
}
void vlv_iosf_sb_init(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/vlv_iosf_sb.h b/drivers/gpu/drm/i915/vlv_iosf_sb.h
new file mode 100644
index 000000000000..e2fea29a30ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/vlv_iosf_sb.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#ifndef _VLV_IOSF_SB_H_
+#define _VLV_IOSF_SB_H_
+
+#include <linux/types.h>
+
+#include "vlv_iosf_sb_reg.h"
+
+struct drm_device;
+struct drm_i915_private;
+
+enum vlv_iosf_sb_unit {
+ VLV_IOSF_SB_BUNIT,
+ VLV_IOSF_SB_CCK,
+ VLV_IOSF_SB_CCU,
+ VLV_IOSF_SB_DPIO,
+ VLV_IOSF_SB_DPIO_2,
+ VLV_IOSF_SB_FLISDSI,
+ VLV_IOSF_SB_GPIO,
+ VLV_IOSF_SB_NC,
+ VLV_IOSF_SB_PUNIT,
+};
+
+void vlv_iosf_sb_init(struct drm_i915_private *i915);
+void vlv_iosf_sb_fini(struct drm_i915_private *i915);
+
+void vlv_iosf_sb_get(struct drm_device *drm, unsigned long unit_mask);
+void vlv_iosf_sb_put(struct drm_device *drm, unsigned long unit_mask);
+
+u32 vlv_iosf_sb_read(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr);
+int vlv_iosf_sb_write(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr, u32 val);
+
+#endif /* _VLV_IOSF_SB_H_ */
diff --git a/drivers/gpu/drm/i915/vlv_sideband_reg.h b/drivers/gpu/drm/i915/vlv_iosf_sb_reg.h
index b7fbff3d0409..f977fb3b6e17 100644
--- a/drivers/gpu/drm/i915/vlv_sideband_reg.h
+++ b/drivers/gpu/drm/i915/vlv_iosf_sb_reg.h
@@ -3,8 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
-#ifndef _VLV_SIDEBAND_REG_H_
-#define _VLV_SIDEBAND_REG_H_
+#ifndef _VLV_IOSF_SB_REG_H_
+#define _VLV_IOSF_SB_REG_H_
/* See configdb bunit SB addr map */
#define BUNIT_REG_BISOC 0x11
@@ -177,4 +177,4 @@
#define CCK_FREQUENCY_STATUS_SHIFT 8
#define CCK_FREQUENCY_VALUES (0x1f << 0)
-#endif /* _VLV_SIDEBAND_REG_H_ */
+#endif /* _VLV_IOSF_SB_REG_H_ */
diff --git a/drivers/gpu/drm/i915/vlv_sideband.h b/drivers/gpu/drm/i915/vlv_sideband.h
deleted file mode 100644
index 31813e07c56f..000000000000
--- a/drivers/gpu/drm/i915/vlv_sideband.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2013-2021 Intel Corporation
- */
-
-#ifndef _VLV_SIDEBAND_H_
-#define _VLV_SIDEBAND_H_
-
-#include <linux/bitops.h>
-#include <linux/types.h>
-
-#include "vlv_sideband_reg.h"
-
-enum dpio_phy;
-struct drm_i915_private;
-
-enum {
- VLV_IOSF_SB_BUNIT,
- VLV_IOSF_SB_CCK,
- VLV_IOSF_SB_CCU,
- VLV_IOSF_SB_DPIO,
- VLV_IOSF_SB_FLISDSI,
- VLV_IOSF_SB_GPIO,
- VLV_IOSF_SB_NC,
- VLV_IOSF_SB_PUNIT,
-};
-
-void vlv_iosf_sb_init(struct drm_i915_private *i915);
-void vlv_iosf_sb_fini(struct drm_i915_private *i915);
-
-void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports);
-void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports);
-
-static inline void vlv_bunit_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_BUNIT));
-}
-
-u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg);
-void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val);
-
-static inline void vlv_bunit_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_BUNIT));
-}
-
-static inline void vlv_cck_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK));
-}
-
-u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg);
-void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val);
-
-static inline void vlv_cck_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK));
-}
-
-static inline void vlv_ccu_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCU));
-}
-
-u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg);
-void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val);
-
-static inline void vlv_ccu_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCU));
-}
-
-static inline void vlv_dpio_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_DPIO));
-}
-
-u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg);
-void vlv_dpio_write(struct drm_i915_private *i915,
- enum dpio_phy phy, int reg, u32 val);
-
-static inline void vlv_dpio_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_DPIO));
-}
-
-static inline void vlv_flisdsi_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_FLISDSI));
-}
-
-u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg);
-void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val);
-
-static inline void vlv_flisdsi_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_FLISDSI));
-}
-
-static inline void vlv_nc_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_NC));
-}
-
-u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr);
-
-static inline void vlv_nc_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_NC));
-}
-
-static inline void vlv_punit_get(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT));
-}
-
-u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr);
-int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val);
-
-static inline void vlv_punit_put(struct drm_i915_private *i915)
-{
- vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT));
-}
-
-#endif /* _VLV_SIDEBAND_H_ */
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
index 59b334d094fa..7564b0f21b42 100644
--- a/drivers/gpu/drm/imagination/pvr_job.c
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -446,7 +446,7 @@ create_job(struct pvr_device *pvr_dev,
if (err)
goto err_put_job;
- err = pvr_queue_job_init(job);
+ err = pvr_queue_job_init(job, pvr_file->file->client_id);
if (err)
goto err_put_job;
diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
index 3e349d039fc0..187a07e0bd9a 100644
--- a/drivers/gpu/drm/imagination/pvr_power.c
+++ b/drivers/gpu/drm/imagination/pvr_power.c
@@ -340,6 +340,63 @@ pvr_power_device_idle(struct device *dev)
return pvr_power_is_idle(pvr_dev) ? 0 : -EBUSY;
}
+static int
+pvr_power_clear_error(struct pvr_device *pvr_dev)
+{
+ struct device *dev = from_pvr_device(pvr_dev)->dev;
+ int err;
+
+ /* Ensure the device state is known and nothing is happening past this point */
+ pm_runtime_disable(dev);
+
+ /* Attempt to clear the runtime PM error by setting the current state again */
+ if (pm_runtime_status_suspended(dev))
+ err = pm_runtime_set_suspended(dev);
+ else
+ err = pm_runtime_set_active(dev);
+
+ if (err) {
+ drm_err(from_pvr_device(pvr_dev),
+ "%s: Failed to clear runtime PM error (new error %d)\n",
+ __func__, err);
+ }
+
+ pm_runtime_enable(dev);
+
+ return err;
+}
+
+/**
+ * pvr_power_get_clear() - Acquire a power reference, correcting any errors
+ * @pvr_dev: Device pointer
+ *
+ * Attempt to acquire a power reference on the device. If the runtime PM
+ * is in error state, attempt to clear the error and retry.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error code returned by pvr_power_get() or the runtime PM API.
+ */
+static int
+pvr_power_get_clear(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ err = pvr_power_get(pvr_dev);
+ if (err == 0)
+ return err;
+
+ drm_warn(from_pvr_device(pvr_dev),
+ "%s: pvr_power_get returned error %d, attempting recovery\n",
+ __func__, err);
+
+ err = pvr_power_clear_error(pvr_dev);
+ if (err)
+ return err;
+
+ return pvr_power_get(pvr_dev);
+}
+
/**
* pvr_power_reset() - Reset the GPU
* @pvr_dev: Device pointer
@@ -364,7 +421,7 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
* Take a power reference during the reset. This should prevent any interference with the
* power state during reset.
*/
- WARN_ON(pvr_power_get(pvr_dev));
+ WARN_ON(pvr_power_get_clear(pvr_dev));
down_write(&pvr_dev->reset_sem);
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index 5e9bc0992824..fc415dd0d7a7 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -803,7 +803,7 @@ static void pvr_queue_start(struct pvr_queue *queue)
* the scheduler, and re-assign parent fences in the middle.
*
* Return:
- * * DRM_GPU_SCHED_STAT_NOMINAL.
+ * * DRM_GPU_SCHED_STAT_RESET.
*/
static enum drm_gpu_sched_stat
pvr_queue_timedout_job(struct drm_sched_job *s_job)
@@ -854,7 +854,7 @@ pvr_queue_timedout_job(struct drm_sched_job *s_job)
drm_sched_start(sched, 0);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
/**
@@ -1073,6 +1073,7 @@ static int pvr_queue_cleanup_fw_context(struct pvr_queue *queue)
/**
* pvr_queue_job_init() - Initialize queue related fields in a pvr_job object.
* @job: The job to initialize.
+ * @drm_client_id: drm_file.client_id submitting the job
*
* Bind the job to a queue and allocate memory to guarantee pvr_queue_job_arm()
* and pvr_queue_job_push() can't fail. We also make sure the context type is
@@ -1082,7 +1083,7 @@ static int pvr_queue_cleanup_fw_context(struct pvr_queue *queue)
* * 0 on success, or
* * An error code if something failed.
*/
-int pvr_queue_job_init(struct pvr_job *job)
+int pvr_queue_job_init(struct pvr_job *job, u64 drm_client_id)
{
/* Fragment jobs need at least one native fence wait on the geometry job fence. */
u32 min_native_dep_count = job->type == DRM_PVR_JOB_TYPE_FRAGMENT ? 1 : 0;
@@ -1099,7 +1100,7 @@ int pvr_queue_job_init(struct pvr_job *job)
if (!pvr_cccb_cmdseq_can_fit(&queue->cccb, job_cmds_size(job, min_native_dep_count)))
return -E2BIG;
- err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE);
+ err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE, drm_client_id);
if (err)
return err;
diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h
index 93fe9ac9f58c..fc1986d73fc8 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.h
+++ b/drivers/gpu/drm/imagination/pvr_queue.h
@@ -143,7 +143,7 @@ struct pvr_queue {
bool pvr_queue_fence_is_ufo_backed(struct dma_fence *f);
-int pvr_queue_job_init(struct pvr_job *job);
+int pvr_queue_job_init(struct pvr_job *job, u64 drm_client_id);
void pvr_queue_job_cleanup(struct pvr_job *job);
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 03535a15dd8f..3e8c6edbc17c 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+source "drivers/gpu/drm/imx/dc/Kconfig"
source "drivers/gpu/drm/imx/dcss/Kconfig"
source "drivers/gpu/drm/imx/ipuv3/Kconfig"
source "drivers/gpu/drm/imx/lcdc/Kconfig"
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index 86f38e7c7422..c7b317640d71 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DRM_IMX8_DC) += dc/
obj-$(CONFIG_DRM_IMX_DCSS) += dcss/
obj-$(CONFIG_DRM_IMX) += ipuv3/
obj-$(CONFIG_DRM_IMX_LCDC) += lcdc/
diff --git a/drivers/gpu/drm/imx/dc/Kconfig b/drivers/gpu/drm/imx/dc/Kconfig
new file mode 100644
index 000000000000..415993207f2e
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/Kconfig
@@ -0,0 +1,13 @@
+config DRM_IMX8_DC
+ tristate "Freescale i.MX8 Display Controller Graphics"
+ depends on DRM && COMMON_CLK && OF && (ARCH_MXC || COMPILE_TEST)
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_DMA_HELPER
+ select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select GENERIC_IRQ_CHIP
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ enable Freescale i.MX8 Display Controller(DC) graphics support
diff --git a/drivers/gpu/drm/imx/dc/Makefile b/drivers/gpu/drm/imx/dc/Makefile
new file mode 100644
index 000000000000..b9d33c074984
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+imx8-dc-drm-objs := dc-cf.o dc-crtc.o dc-de.o dc-drv.o dc-ed.o dc-fg.o dc-fl.o \
+ dc-fu.o dc-fw.o dc-ic.o dc-kms.o dc-lb.o dc-pe.o \
+ dc-plane.o dc-tc.o
+
+obj-$(CONFIG_DRM_IMX8_DC) += imx8-dc-drm.o
diff --git a/drivers/gpu/drm/imx/dc/dc-cf.c b/drivers/gpu/drm/imx/dc/dc-cf.c
new file mode 100644
index 000000000000..2f077161e912
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-cf.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/component.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+#define STATICCONTROL 0x8
+
+#define FRAMEDIMENSIONS 0xc
+#define HEIGHT(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define WIDTH(x) FIELD_PREP(GENMASK(13, 0), ((x) - 1))
+
+#define CONSTANTCOLOR 0x10
+#define BLUE(x) FIELD_PREP(GENMASK(15, 8), (x))
+
+static const struct dc_subdev_info dc_cf_info[] = {
+ { .reg_start = 0x56180960, .id = 0, },
+ { .reg_start = 0x561809e0, .id = 1, },
+ { .reg_start = 0x561809a0, .id = 4, },
+ { .reg_start = 0x56180a20, .id = 5, },
+};
+
+static const struct regmap_range dc_cf_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, CONSTANTCOLOR),
+};
+
+static const struct regmap_access_table dc_cf_regmap_access_table = {
+ .yes_ranges = dc_cf_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_cf_regmap_ranges),
+};
+
+static const struct regmap_config dc_cf_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_cf_regmap_access_table,
+ .rd_table = &dc_cf_regmap_access_table,
+ .max_register = CONSTANTCOLOR,
+};
+
+static inline void dc_cf_enable_shden(struct dc_cf *cf)
+{
+ regmap_write(cf->reg_cfg, STATICCONTROL, SHDEN);
+}
+
+enum dc_link_id dc_cf_get_link_id(struct dc_cf *cf)
+{
+ return cf->link;
+}
+
+void dc_cf_framedimensions(struct dc_cf *cf, unsigned int w,
+ unsigned int h)
+{
+ regmap_write(cf->reg_cfg, FRAMEDIMENSIONS, WIDTH(w) | HEIGHT(h));
+}
+
+void dc_cf_constantcolor_black(struct dc_cf *cf)
+{
+ regmap_write(cf->reg_cfg, CONSTANTCOLOR, 0);
+}
+
+void dc_cf_constantcolor_blue(struct dc_cf *cf)
+{
+ regmap_write(cf->reg_cfg, CONSTANTCOLOR, BLUE(0xff));
+}
+
+void dc_cf_init(struct dc_cf *cf)
+{
+ dc_cf_enable_shden(cf);
+}
+
+static int dc_cf_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_cfg;
+ struct dc_cf *cf;
+ int id;
+
+ cf = devm_kzalloc(dev, sizeof(*cf), GFP_KERNEL);
+ if (!cf)
+ return -ENOMEM;
+
+ res_pec = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ cf->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_cf_cfg_regmap_config);
+ if (IS_ERR(cf->reg_cfg))
+ return PTR_ERR(cf->reg_cfg);
+
+ id = dc_subdev_get_id(dc_cf_info, ARRAY_SIZE(dc_cf_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ switch (id) {
+ case 0:
+ cf->link = LINK_ID_CONSTFRAME0;
+ dc_drm->cf_cont[0] = cf;
+ break;
+ case 1:
+ cf->link = LINK_ID_CONSTFRAME1;
+ dc_drm->cf_cont[1] = cf;
+ break;
+ case 4:
+ cf->link = LINK_ID_CONSTFRAME4;
+ dc_drm->cf_safe[0] = cf;
+ break;
+ case 5:
+ cf->link = LINK_ID_CONSTFRAME5;
+ dc_drm->cf_safe[1] = cf;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct component_ops dc_cf_ops = {
+ .bind = dc_cf_bind,
+};
+
+static int dc_cf_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_cf_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_cf_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_cf_ops);
+}
+
+static const struct of_device_id dc_cf_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-constframe" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_cf_dt_ids);
+
+struct platform_driver dc_cf_driver = {
+ .probe = dc_cf_probe,
+ .remove = dc_cf_remove,
+ .driver = {
+ .name = "imx8-dc-constframe",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_cf_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-crtc.c b/drivers/gpu/drm/imx/dc/dc-crtc.c
new file mode 100644
index 000000000000..31d3a982deaf
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-crtc.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+#include "dc-kms.h"
+#include "dc-pe.h"
+
+#define dc_crtc_dbg(crtc, fmt, ...) \
+do { \
+ struct drm_crtc *_crtc = (crtc); \
+ drm_dbg_kms(_crtc->dev, "[CRTC:%d:%s] " fmt, \
+ _crtc->base.id, _crtc->name, ##__VA_ARGS__); \
+} while (0)
+
+#define dc_crtc_err(crtc, fmt, ...) \
+do { \
+ struct drm_crtc *_crtc = (crtc); \
+ drm_err(_crtc->dev, "[CRTC:%d:%s] " fmt, \
+ _crtc->base.id, _crtc->name, ##__VA_ARGS__); \
+} while (0)
+
+#define DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(c) \
+do { \
+ unsigned long ret; \
+ ret = wait_for_completion_timeout(&dc_crtc->c, HZ); \
+ if (ret == 0) \
+ dc_crtc_err(crtc, "%s: wait for " #c " timeout\n", \
+ __func__); \
+} while (0)
+
+#define DC_CRTC_CHECK_FRAMEGEN_FIFO(fg) \
+do { \
+ struct dc_fg *_fg = (fg); \
+ if (dc_fg_secondary_requests_to_read_empty_fifo(_fg)) { \
+ dc_fg_secondary_clear_channel_status(_fg); \
+ dc_crtc_err(crtc, "%s: FrameGen FIFO empty\n", \
+ __func__); \
+ } \
+} while (0)
+
+#define DC_CRTC_WAIT_FOR_FRAMEGEN_SECONDARY_SYNCUP(fg) \
+do { \
+ if (dc_fg_wait_for_secondary_syncup(fg)) \
+ dc_crtc_err(crtc, \
+ "%s: FrameGen secondary channel isn't syncup\n",\
+ __func__); \
+} while (0)
+
+static inline struct dc_crtc *to_dc_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct dc_crtc, base);
+}
+
+static u32 dc_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ return dc_fg_get_frame_index(dc_crtc->fg);
+}
+
+static int dc_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ enable_irq(dc_crtc->irq_dec_framecomplete);
+
+ return 0;
+}
+
+static void dc_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ /* nosync due to atomic context */
+ disable_irq_nosync(dc_crtc->irq_dec_framecomplete);
+}
+
+static const struct drm_crtc_funcs dc_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .get_vblank_counter = dc_crtc_get_vblank_counter,
+ .enable_vblank = dc_crtc_enable_vblank,
+ .disable_vblank = dc_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+};
+
+static void dc_crtc_queue_state_event(struct drm_crtc_state *crtc_state)
+{
+ struct drm_crtc *crtc = crtc_state->crtc;
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc_state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc));
+ WARN_ON(dc_crtc->event);
+ dc_crtc->event = crtc_state->event;
+ crtc_state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static inline enum drm_mode_status
+dc_crtc_check_clock(struct dc_crtc *dc_crtc, int clk_khz)
+{
+ return dc_fg_check_clock(dc_crtc->fg, clk_khz);
+}
+
+static enum drm_mode_status
+dc_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ enum drm_mode_status status;
+
+ status = dc_crtc_check_clock(dc_crtc, mode->clock);
+ if (status != MODE_OK)
+ return status;
+
+ if (mode->crtc_clock > DC_FRAMEGEN_MAX_CLOCK_KHZ)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static int
+dc_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_display_mode *adj = &new_crtc_state->adjusted_mode;
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ enum drm_mode_status status;
+
+ status = dc_crtc_check_clock(dc_crtc, adj->clock);
+ if (status != MODE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+dc_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct dc_drm_device *dc_drm = to_dc_drm_device(crtc->dev);
+ int idx, ret;
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) ||
+ !new_crtc_state->active)
+ return;
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ return;
+
+ /* request pixel engine power-on when CRTC starts to be active */
+ ret = pm_runtime_resume_and_get(dc_drm->pe->dev);
+ if (ret)
+ dc_crtc_err(crtc, "failed to get DC pixel engine RPM: %d\n",
+ ret);
+
+ drm_dev_exit(idx);
+}
+
+static void
+dc_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, crtc);
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ int idx;
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state) ||
+ (!old_crtc_state->active && !new_crtc_state->active))
+ return;
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ goto out;
+
+ enable_irq(dc_crtc->irq_ed_cont_shdload);
+
+ /* flush plane update out to display */
+ dc_ed_pec_sync_trigger(dc_crtc->ed_cont);
+
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(ed_cont_shdload_done);
+
+ disable_irq(dc_crtc->irq_ed_cont_shdload);
+
+ DC_CRTC_CHECK_FRAMEGEN_FIFO(dc_crtc->fg);
+
+ drm_dev_exit(idx);
+
+out:
+ dc_crtc_queue_state_event(new_crtc_state);
+}
+
+static void
+dc_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_display_mode *adj = &new_crtc_state->adjusted_mode;
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ enum dc_link_id cf_link;
+ int idx, ret;
+
+ dc_crtc_dbg(crtc, "mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(adj));
+
+ drm_crtc_vblank_on(crtc);
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ goto out;
+
+ /* request display engine power-on when CRTC is enabled */
+ ret = pm_runtime_resume_and_get(dc_crtc->de->dev);
+ if (ret < 0)
+ dc_crtc_err(crtc, "failed to get DC display engine RPM: %d\n",
+ ret);
+
+ enable_irq(dc_crtc->irq_dec_shdload);
+ enable_irq(dc_crtc->irq_ed_cont_shdload);
+ enable_irq(dc_crtc->irq_ed_safe_shdload);
+
+ dc_fg_cfg_videomode(dc_crtc->fg, adj);
+
+ dc_cf_framedimensions(dc_crtc->cf_cont,
+ adj->crtc_hdisplay, adj->crtc_vdisplay);
+ dc_cf_framedimensions(dc_crtc->cf_safe,
+ adj->crtc_hdisplay, adj->crtc_vdisplay);
+
+ /* constframe in safety stream shows blue frame */
+ dc_cf_constantcolor_blue(dc_crtc->cf_safe);
+ cf_link = dc_cf_get_link_id(dc_crtc->cf_safe);
+ dc_ed_pec_src_sel(dc_crtc->ed_safe, cf_link);
+
+ /* show CRTC background if no plane is enabled */
+ if (new_crtc_state->plane_mask == 0) {
+ /* constframe in content stream shows black frame */
+ dc_cf_constantcolor_black(dc_crtc->cf_cont);
+
+ cf_link = dc_cf_get_link_id(dc_crtc->cf_cont);
+ dc_ed_pec_src_sel(dc_crtc->ed_cont, cf_link);
+ }
+
+ dc_fg_enable_clock(dc_crtc->fg);
+ dc_ed_pec_sync_trigger(dc_crtc->ed_cont);
+ dc_ed_pec_sync_trigger(dc_crtc->ed_safe);
+ dc_fg_shdtokgen(dc_crtc->fg);
+ dc_fg_enable(dc_crtc->fg);
+
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(ed_safe_shdload_done);
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(ed_cont_shdload_done);
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(dec_shdload_done);
+
+ disable_irq(dc_crtc->irq_ed_safe_shdload);
+ disable_irq(dc_crtc->irq_ed_cont_shdload);
+ disable_irq(dc_crtc->irq_dec_shdload);
+
+ DC_CRTC_WAIT_FOR_FRAMEGEN_SECONDARY_SYNCUP(dc_crtc->fg);
+
+ DC_CRTC_CHECK_FRAMEGEN_FIFO(dc_crtc->fg);
+
+ drm_dev_exit(idx);
+
+out:
+ dc_crtc_queue_state_event(new_crtc_state);
+}
+
+static void
+dc_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct dc_drm_device *dc_drm = to_dc_drm_device(crtc->dev);
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ int idx, ret;
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ goto out;
+
+ enable_irq(dc_crtc->irq_dec_seqcomplete);
+ dc_fg_disable(dc_crtc->fg);
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(dec_seqcomplete_done);
+ disable_irq(dc_crtc->irq_dec_seqcomplete);
+
+ dc_fg_disable_clock(dc_crtc->fg);
+
+ /* request pixel engine power-off as plane is off too */
+ ret = pm_runtime_put(dc_drm->pe->dev);
+ if (ret)
+ dc_crtc_err(crtc, "failed to put DC pixel engine RPM: %d\n",
+ ret);
+
+ /* request display engine power-off when CRTC is disabled */
+ ret = pm_runtime_put(dc_crtc->de->dev);
+ if (ret < 0)
+ dc_crtc_err(crtc, "failed to put DC display engine RPM: %d\n",
+ ret);
+
+ drm_dev_exit(idx);
+
+out:
+ drm_crtc_vblank_off(crtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (new_crtc_state->event && !new_crtc_state->active) {
+ drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static bool dc_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ int vdisplay = mode->crtc_vdisplay;
+ int vtotal = mode->crtc_vtotal;
+ bool reliable;
+ int line;
+ int idx;
+
+ if (stime)
+ *stime = ktime_get();
+
+ if (!drm_dev_enter(crtc->dev, &idx)) {
+ reliable = false;
+ *vpos = 0;
+ *hpos = 0;
+ goto out;
+ }
+
+ /* line index starts with 0 for the first active output line */
+ line = dc_fg_get_line_index(dc_crtc->fg);
+
+ if (line < vdisplay)
+ /* active scanout area - positive */
+ *vpos = line + 1;
+ else
+ /* inside vblank - negative */
+ *vpos = line - (vtotal - 1);
+
+ *hpos = 0;
+
+ reliable = true;
+
+ drm_dev_exit(idx);
+out:
+ if (etime)
+ *etime = ktime_get();
+
+ return reliable;
+}
+
+static const struct drm_crtc_helper_funcs dc_helper_funcs = {
+ .mode_valid = dc_crtc_mode_valid,
+ .atomic_check = dc_crtc_atomic_check,
+ .atomic_begin = dc_crtc_atomic_begin,
+ .atomic_flush = dc_crtc_atomic_flush,
+ .atomic_enable = dc_crtc_atomic_enable,
+ .atomic_disable = dc_crtc_atomic_disable,
+ .get_scanout_position = dc_crtc_get_scanout_position,
+};
+
+static irqreturn_t dc_crtc_irq_handler_dec_framecomplete(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+ struct drm_crtc *crtc = &dc_crtc->base;
+ unsigned long flags;
+
+ drm_crtc_handle_vblank(crtc);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (dc_crtc->event) {
+ drm_crtc_send_vblank_event(crtc, dc_crtc->event);
+ dc_crtc->event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+dc_crtc_irq_handler_dec_seqcomplete_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->dec_seqcomplete_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dc_crtc_irq_handler_dec_shdload_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->dec_shdload_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+dc_crtc_irq_handler_ed_cont_shdload_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->ed_cont_shdload_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+dc_crtc_irq_handler_ed_safe_shdload_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->ed_safe_shdload_done);
+
+ return IRQ_HANDLED;
+}
+
+static int dc_crtc_request_irqs(struct drm_device *drm, struct dc_crtc *dc_crtc)
+{
+ struct {
+ struct device *dev;
+ unsigned int irq;
+ irqreturn_t (*irq_handler)(int irq, void *dev_id);
+ } irqs[DC_CRTC_IRQS] = {
+ {
+ dc_crtc->de->dev,
+ dc_crtc->irq_dec_framecomplete,
+ dc_crtc_irq_handler_dec_framecomplete,
+ }, {
+ dc_crtc->de->dev,
+ dc_crtc->irq_dec_seqcomplete,
+ dc_crtc_irq_handler_dec_seqcomplete_done,
+ }, {
+ dc_crtc->de->dev,
+ dc_crtc->irq_dec_shdload,
+ dc_crtc_irq_handler_dec_shdload_done,
+ }, {
+ dc_crtc->ed_cont->dev,
+ dc_crtc->irq_ed_cont_shdload,
+ dc_crtc_irq_handler_ed_cont_shdload_done,
+ }, {
+ dc_crtc->ed_safe->dev,
+ dc_crtc->irq_ed_safe_shdload,
+ dc_crtc_irq_handler_ed_safe_shdload_done,
+ },
+ };
+ int i, ret;
+
+ for (i = 0; i < DC_CRTC_IRQS; i++) {
+ struct dc_crtc_irq *irq = &dc_crtc->irqs[i];
+
+ ret = devm_request_irq(irqs[i].dev, irqs[i].irq,
+ irqs[i].irq_handler, IRQF_NO_AUTOEN,
+ dev_name(irqs[i].dev), dc_crtc);
+ if (ret) {
+ dev_err(irqs[i].dev, "failed to request irq(%u): %d\n",
+ irqs[i].irq, ret);
+ return ret;
+ }
+
+ irq->dc_crtc = dc_crtc;
+ irq->irq = irqs[i].irq;
+ }
+
+ return 0;
+}
+
+int dc_crtc_init(struct dc_drm_device *dc_drm, int crtc_index)
+{
+ struct dc_crtc *dc_crtc = &dc_drm->dc_crtc[crtc_index];
+ struct drm_device *drm = &dc_drm->base;
+ struct dc_de *de = dc_drm->de[crtc_index];
+ struct dc_pe *pe = dc_drm->pe;
+ struct dc_plane *dc_primary;
+ int ret;
+
+ dc_crtc->de = de;
+
+ init_completion(&dc_crtc->dec_seqcomplete_done);
+ init_completion(&dc_crtc->dec_shdload_done);
+ init_completion(&dc_crtc->ed_cont_shdload_done);
+ init_completion(&dc_crtc->ed_safe_shdload_done);
+
+ dc_crtc->cf_cont = pe->cf_cont[crtc_index];
+ dc_crtc->cf_safe = pe->cf_safe[crtc_index];
+ dc_crtc->ed_cont = pe->ed_cont[crtc_index];
+ dc_crtc->ed_safe = pe->ed_safe[crtc_index];
+ dc_crtc->fg = de->fg;
+
+ dc_crtc->irq_dec_framecomplete = de->irq_framecomplete;
+ dc_crtc->irq_dec_seqcomplete = de->irq_seqcomplete;
+ dc_crtc->irq_dec_shdload = de->irq_shdload;
+ dc_crtc->irq_ed_safe_shdload = dc_crtc->ed_safe->irq_shdload;
+ dc_crtc->irq_ed_cont_shdload = dc_crtc->ed_cont->irq_shdload;
+
+ dc_primary = &dc_drm->dc_primary[crtc_index];
+ ret = dc_plane_init(dc_drm, dc_primary);
+ if (ret) {
+ dev_err(de->dev, "failed to initialize primary plane: %d\n",
+ ret);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&dc_crtc->base, &dc_helper_funcs);
+
+ ret = drm_crtc_init_with_planes(drm, &dc_crtc->base, &dc_primary->base,
+ NULL, &dc_crtc_funcs, NULL);
+ if (ret)
+ dev_err(de->dev, "failed to add CRTC: %d\n", ret);
+
+ return ret;
+}
+
+int dc_crtc_post_init(struct dc_drm_device *dc_drm, int crtc_index)
+{
+ struct dc_crtc *dc_crtc = &dc_drm->dc_crtc[crtc_index];
+ struct drm_device *drm = &dc_drm->base;
+
+ return dc_crtc_request_irqs(drm, dc_crtc);
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-de.c b/drivers/gpu/drm/imx/dc/dc-de.c
new file mode 100644
index 000000000000..5a3125596fdf
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-de.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+
+#define POLARITYCTRL 0xc
+#define POLEN_HIGH BIT(2)
+
+static const struct dc_subdev_info dc_de_info[] = {
+ { .reg_start = 0x5618b400, .id = 0, },
+ { .reg_start = 0x5618b420, .id = 1, },
+};
+
+static const struct regmap_range dc_de_regmap_ranges[] = {
+ regmap_reg_range(POLARITYCTRL, POLARITYCTRL),
+};
+
+static const struct regmap_access_table dc_de_regmap_access_table = {
+ .yes_ranges = dc_de_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_de_regmap_ranges),
+};
+
+static const struct regmap_config dc_de_top_regmap_config = {
+ .name = "top",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_de_regmap_access_table,
+ .rd_table = &dc_de_regmap_access_table,
+ .max_register = POLARITYCTRL,
+};
+
+static inline void dc_dec_init(struct dc_de *de)
+{
+ regmap_write_bits(de->reg_top, POLARITYCTRL, POLARITYCTRL, POLEN_HIGH);
+}
+
+static int dc_de_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_top;
+ void __iomem *base_top;
+ struct dc_de *de;
+ int ret, id;
+
+ de = devm_kzalloc(dev, sizeof(*de), GFP_KERNEL);
+ if (!de)
+ return -ENOMEM;
+
+ base_top = devm_platform_get_and_ioremap_resource(pdev, 0, &res_top);
+ if (IS_ERR(base_top))
+ return PTR_ERR(base_top);
+
+ de->reg_top = devm_regmap_init_mmio(dev, base_top,
+ &dc_de_top_regmap_config);
+ if (IS_ERR(de->reg_top))
+ return PTR_ERR(de->reg_top);
+
+ de->irq_shdload = platform_get_irq_byname(pdev, "shdload");
+ if (de->irq_shdload < 0)
+ return de->irq_shdload;
+
+ de->irq_framecomplete = platform_get_irq_byname(pdev, "framecomplete");
+ if (de->irq_framecomplete < 0)
+ return de->irq_framecomplete;
+
+ de->irq_seqcomplete = platform_get_irq_byname(pdev, "seqcomplete");
+ if (de->irq_seqcomplete < 0)
+ return de->irq_seqcomplete;
+
+ de->dev = dev;
+
+ dev_set_drvdata(dev, de);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ id = dc_subdev_get_id(dc_de_info, ARRAY_SIZE(dc_de_info), res_top);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ dc_drm->de[id] = de;
+
+ return 0;
+}
+
+/*
+ * It's possible to get the child device pointers from the child component
+ * bind callbacks, but it depends on the component helper behavior to bind
+ * the display engine component first. To avoid the dependency, post bind
+ * to get the pointers from dc_drm in a safe manner.
+ */
+void dc_de_post_bind(struct dc_drm_device *dc_drm)
+{
+ struct dc_de *de;
+ int i;
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ de = dc_drm->de[i];
+ de->fg = dc_drm->fg[i];
+ de->tc = dc_drm->tc[i];
+ }
+}
+
+static const struct component_ops dc_de_ops = {
+ .bind = dc_de_bind,
+};
+
+static int dc_de_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = component_add(&pdev->dev, &dc_de_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_de_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_de_ops);
+}
+
+static int dc_de_runtime_resume(struct device *dev)
+{
+ struct dc_de *de = dev_get_drvdata(dev);
+
+ dc_dec_init(de);
+ dc_fg_init(de->fg);
+ dc_tc_init(de->tc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dc_de_pm_ops = {
+ RUNTIME_PM_OPS(NULL, dc_de_runtime_resume, NULL)
+};
+
+static const struct of_device_id dc_de_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-display-engine" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_de_dt_ids);
+
+struct platform_driver dc_de_driver = {
+ .probe = dc_de_probe,
+ .remove = dc_de_remove,
+ .driver = {
+ .name = "imx8-dc-display-engine",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_de_dt_ids,
+ .pm = pm_sleep_ptr(&dc_de_pm_ops),
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-de.h b/drivers/gpu/drm/imx/dc/dc-de.h
new file mode 100644
index 000000000000..211f3fcc1a9a
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-de.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_DISPLAY_ENGINE_H__
+#define __DC_DISPLAY_ENGINE_H__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <drm/drm_modes.h>
+
+#define DC_DISPLAYS 2
+
+#define DC_FRAMEGEN_MAX_FRAME_INDEX 0x3ffff
+#define DC_FRAMEGEN_MAX_CLOCK_KHZ 300000
+
+struct dc_fg {
+ struct device *dev;
+ struct regmap *reg;
+ struct clk *clk_disp;
+};
+
+struct dc_tc {
+ struct device *dev;
+ struct regmap *reg;
+};
+
+struct dc_de {
+ struct device *dev;
+ struct regmap *reg_top;
+ struct dc_fg *fg;
+ struct dc_tc *tc;
+ int irq_shdload;
+ int irq_framecomplete;
+ int irq_seqcomplete;
+};
+
+/* Frame Generator Unit */
+void dc_fg_cfg_videomode(struct dc_fg *fg, struct drm_display_mode *m);
+void dc_fg_enable(struct dc_fg *fg);
+void dc_fg_disable(struct dc_fg *fg);
+void dc_fg_shdtokgen(struct dc_fg *fg);
+u32 dc_fg_get_frame_index(struct dc_fg *fg);
+u32 dc_fg_get_line_index(struct dc_fg *fg);
+bool dc_fg_wait_for_frame_index_moving(struct dc_fg *fg);
+bool dc_fg_secondary_requests_to_read_empty_fifo(struct dc_fg *fg);
+void dc_fg_secondary_clear_channel_status(struct dc_fg *fg);
+int dc_fg_wait_for_secondary_syncup(struct dc_fg *fg);
+void dc_fg_enable_clock(struct dc_fg *fg);
+void dc_fg_disable_clock(struct dc_fg *fg);
+enum drm_mode_status dc_fg_check_clock(struct dc_fg *fg, int clk_khz);
+void dc_fg_init(struct dc_fg *fg);
+
+/* Timing Controller Unit */
+void dc_tc_init(struct dc_tc *tc);
+
+#endif /* __DC_DISPLAY_ENGINE_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-drv.c b/drivers/gpu/drm/imx/dc/dc-drv.c
new file mode 100644
index 000000000000..04f021d2d6cf
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-drv.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_of.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+struct dc_priv {
+ struct drm_device *drm;
+ struct clk *clk_cfg;
+};
+
+DEFINE_DRM_GEM_DMA_FOPS(dc_drm_driver_fops);
+
+static struct drm_driver dc_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
+ .fops = &dc_drm_driver_fops,
+ .name = "imx8-dc",
+ .desc = "i.MX8 DC DRM graphics",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+};
+
+static void
+dc_add_components(struct device *dev, struct component_match **matchptr)
+{
+ struct device_node *child, *grandchild;
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ /* The interrupt controller is not a component. */
+ if (of_device_is_compatible(child, "fsl,imx8qxp-dc-intc"))
+ continue;
+
+ drm_of_component_match_add(dev, matchptr, component_compare_of,
+ child);
+
+ for_each_available_child_of_node(child, grandchild)
+ drm_of_component_match_add(dev, matchptr,
+ component_compare_of,
+ grandchild);
+ }
+}
+
+static int dc_drm_component_bind_all(struct dc_drm_device *dc_drm)
+{
+ struct drm_device *drm = &dc_drm->base;
+ int ret;
+
+ ret = component_bind_all(drm->dev, dc_drm);
+ if (ret)
+ return ret;
+
+ dc_de_post_bind(dc_drm);
+ dc_pe_post_bind(dc_drm);
+
+ return 0;
+}
+
+static void dc_drm_component_unbind_all(void *ptr)
+{
+ struct dc_drm_device *dc_drm = ptr;
+ struct drm_device *drm = &dc_drm->base;
+
+ component_unbind_all(drm->dev, dc_drm);
+}
+
+static int dc_drm_bind(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+ struct dc_drm_device *dc_drm;
+ struct drm_device *drm;
+ int ret;
+
+ dc_drm = devm_drm_dev_alloc(dev, &dc_drm_driver, struct dc_drm_device,
+ base);
+ if (IS_ERR(dc_drm))
+ return PTR_ERR(dc_drm);
+
+ drm = &dc_drm->base;
+
+ ret = dc_drm_component_bind_all(dc_drm);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, dc_drm_component_unbind_all,
+ dc_drm);
+ if (ret)
+ return ret;
+
+ ret = dc_kms_init(dc_drm);
+ if (ret)
+ return ret;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret) {
+ dev_err(dev, "failed to register drm device: %d\n", ret);
+ goto err;
+ }
+
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_XRGB8888);
+
+ priv->drm = drm;
+
+ return 0;
+
+err:
+ dc_kms_uninit(dc_drm);
+
+ return ret;
+}
+
+static void dc_drm_unbind(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+ struct dc_drm_device *dc_drm = to_dc_drm_device(priv->drm);
+ struct drm_device *drm = &dc_drm->base;
+
+ priv->drm = NULL;
+ drm_dev_unplug(drm);
+ dc_kms_uninit(dc_drm);
+ drm_atomic_helper_shutdown(drm);
+}
+
+static const struct component_master_ops dc_drm_ops = {
+ .bind = dc_drm_bind,
+ .unbind = dc_drm_unbind,
+};
+
+static int dc_probe(struct platform_device *pdev)
+{
+ struct component_match *match = NULL;
+ struct dc_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk_cfg = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk_cfg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk_cfg),
+ "failed to get cfg clock\n");
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret)
+ return ret;
+
+ dc_add_components(&pdev->dev, &match);
+
+ ret = component_master_add_with_match(&pdev->dev, &dc_drm_ops, match);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component master\n");
+
+ return 0;
+}
+
+static void dc_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &dc_drm_ops);
+}
+
+static int dc_runtime_suspend(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk_cfg);
+
+ return 0;
+}
+
+static int dc_runtime_resume(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk_cfg);
+ if (ret)
+ dev_err(dev, "failed to enable cfg clock: %d\n", ret);
+
+ return ret;
+}
+
+static int dc_suspend(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_suspend(priv->drm);
+}
+
+static int dc_resume(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_resume(priv->drm);
+}
+
+static void dc_shutdown(struct platform_device *pdev)
+{
+ struct dc_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ drm_atomic_helper_shutdown(priv->drm);
+}
+
+static const struct dev_pm_ops dc_pm_ops = {
+ RUNTIME_PM_OPS(dc_runtime_suspend, dc_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(dc_suspend, dc_resume)
+};
+
+static const struct of_device_id dc_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_dt_ids);
+
+static struct platform_driver dc_driver = {
+ .probe = dc_probe,
+ .remove = dc_remove,
+ .shutdown = dc_shutdown,
+ .driver = {
+ .name = "imx8-dc",
+ .of_match_table = dc_dt_ids,
+ .pm = pm_sleep_ptr(&dc_pm_ops),
+ },
+};
+
+static struct platform_driver * const dc_drivers[] = {
+ &dc_cf_driver,
+ &dc_de_driver,
+ &dc_ed_driver,
+ &dc_fg_driver,
+ &dc_fl_driver,
+ &dc_fw_driver,
+ &dc_ic_driver,
+ &dc_lb_driver,
+ &dc_pe_driver,
+ &dc_tc_driver,
+ &dc_driver,
+};
+
+static int __init dc_drm_init(void)
+{
+ return platform_register_drivers(dc_drivers, ARRAY_SIZE(dc_drivers));
+}
+
+static void __exit dc_drm_exit(void)
+{
+ platform_unregister_drivers(dc_drivers, ARRAY_SIZE(dc_drivers));
+}
+
+module_init(dc_drm_init);
+module_exit(dc_drm_exit);
+
+MODULE_DESCRIPTION("i.MX8 Display Controller DRM Driver");
+MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/imx/dc/dc-drv.h b/drivers/gpu/drm/imx/dc/dc-drv.h
new file mode 100644
index 000000000000..eb61b8c76269
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-drv.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_DRV_H__
+#define __DC_DRV_H__
+
+#include <linux/container_of.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+
+#include "dc-de.h"
+#include "dc-kms.h"
+#include "dc-pe.h"
+
+/**
+ * struct dc_drm_device - DC specific drm_device
+ */
+struct dc_drm_device {
+ /** @base: base drm_device structure */
+ struct drm_device base;
+ /** @dc_crtc: DC specific CRTC list */
+ struct dc_crtc dc_crtc[DC_DISPLAYS];
+ /** @dc_primary: DC specific primary plane list */
+ struct dc_plane dc_primary[DC_DISPLAYS];
+ /** @encoder: encoder list */
+ struct drm_encoder encoder[DC_DISPLAYS];
+ /** @cf_safe: constframe list(safety stream) */
+ struct dc_cf *cf_safe[DC_DISPLAYS];
+ /** @cf_cont: constframe list(content stream) */
+ struct dc_cf *cf_cont[DC_DISPLAYS];
+ /** @de: display engine list */
+ struct dc_de *de[DC_DISPLAYS];
+ /** @ed_safe: extdst list(safety stream) */
+ struct dc_ed *ed_safe[DC_DISPLAYS];
+ /** @ed_cont: extdst list(content stream) */
+ struct dc_ed *ed_cont[DC_DISPLAYS];
+ /** @fg: framegen list */
+ struct dc_fg *fg[DC_DISPLAYS];
+ /** @fu_disp: fetchunit list(used by display engine) */
+ struct dc_fu *fu_disp[DC_DISP_FU_CNT];
+ /** @lb: layerblend list */
+ struct dc_lb *lb[DC_LB_CNT];
+ /** @pe: pixel engine */
+ struct dc_pe *pe;
+ /** @tc: tcon list */
+ struct dc_tc *tc[DC_DISPLAYS];
+};
+
+struct dc_subdev_info {
+ resource_size_t reg_start;
+ int id;
+};
+
+static inline struct dc_drm_device *to_dc_drm_device(struct drm_device *drm)
+{
+ return container_of(drm, struct dc_drm_device, base);
+}
+
+int dc_crtc_init(struct dc_drm_device *dc_drm, int crtc_index);
+int dc_crtc_post_init(struct dc_drm_device *dc_drm, int crtc_index);
+
+int dc_kms_init(struct dc_drm_device *dc_drm);
+void dc_kms_uninit(struct dc_drm_device *dc_drm);
+
+int dc_plane_init(struct dc_drm_device *dc_drm, struct dc_plane *dc_plane);
+
+extern struct platform_driver dc_cf_driver;
+extern struct platform_driver dc_de_driver;
+extern struct platform_driver dc_ed_driver;
+extern struct platform_driver dc_fg_driver;
+extern struct platform_driver dc_fl_driver;
+extern struct platform_driver dc_fw_driver;
+extern struct platform_driver dc_ic_driver;
+extern struct platform_driver dc_lb_driver;
+extern struct platform_driver dc_pe_driver;
+extern struct platform_driver dc_tc_driver;
+
+static inline int dc_subdev_get_id(const struct dc_subdev_info *info,
+ int info_cnt, struct resource *res)
+{
+ int i;
+
+ if (!res)
+ return -EINVAL;
+
+ for (i = 0; i < info_cnt; i++)
+ if (info[i].reg_start == res->start)
+ return info[i].id;
+
+ return -EINVAL;
+}
+
+void dc_de_post_bind(struct dc_drm_device *dc_drm);
+void dc_pe_post_bind(struct dc_drm_device *dc_drm);
+
+#endif /* __DC_DRV_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-ed.c b/drivers/gpu/drm/imx/dc/dc-ed.c
new file mode 100644
index 000000000000..86ecc22d0a55
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-ed.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+#define PIXENGCFG_STATIC 0x8
+#define POWERDOWN BIT(4)
+#define SYNC_MODE BIT(8)
+#define SINGLE 0
+#define DIV_MASK GENMASK(23, 16)
+#define DIV(x) FIELD_PREP(DIV_MASK, (x))
+#define DIV_RESET 0x80
+
+#define PIXENGCFG_DYNAMIC 0xc
+
+#define PIXENGCFG_TRIGGER 0x14
+#define SYNC_TRIGGER BIT(0)
+
+#define STATICCONTROL 0x8
+#define KICK_MODE BIT(8)
+#define EXTERNAL BIT(8)
+#define PERFCOUNTMODE BIT(12)
+
+#define CONTROL 0xc
+#define GAMMAAPPLYENABLE BIT(0)
+
+static const struct dc_subdev_info dc_ed_info[] = {
+ { .reg_start = 0x56180980, .id = 0, },
+ { .reg_start = 0x56180a00, .id = 1, },
+ { .reg_start = 0x561809c0, .id = 4, },
+ { .reg_start = 0x56180a40, .id = 5, },
+};
+
+static const struct regmap_range dc_ed_pec_regmap_write_ranges[] = {
+ regmap_reg_range(PIXENGCFG_STATIC, PIXENGCFG_STATIC),
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+ regmap_reg_range(PIXENGCFG_TRIGGER, PIXENGCFG_TRIGGER),
+};
+
+static const struct regmap_access_table dc_ed_pec_regmap_write_table = {
+ .yes_ranges = dc_ed_pec_regmap_write_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_pec_regmap_write_ranges),
+};
+
+static const struct regmap_range dc_ed_pec_regmap_read_ranges[] = {
+ regmap_reg_range(PIXENGCFG_STATIC, PIXENGCFG_STATIC),
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+};
+
+static const struct regmap_access_table dc_ed_pec_regmap_read_table = {
+ .yes_ranges = dc_ed_pec_regmap_read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_pec_regmap_read_ranges),
+};
+
+static const struct regmap_range dc_ed_pec_regmap_volatile_ranges[] = {
+ regmap_reg_range(PIXENGCFG_TRIGGER, PIXENGCFG_TRIGGER),
+};
+
+static const struct regmap_access_table dc_ed_pec_regmap_volatile_table = {
+ .yes_ranges = dc_ed_pec_regmap_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_pec_regmap_volatile_ranges),
+};
+
+static const struct regmap_config dc_ed_pec_regmap_config = {
+ .name = "pec",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_ed_pec_regmap_write_table,
+ .rd_table = &dc_ed_pec_regmap_read_table,
+ .volatile_table = &dc_ed_pec_regmap_volatile_table,
+ .max_register = PIXENGCFG_TRIGGER,
+};
+
+static const struct regmap_range dc_ed_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, STATICCONTROL),
+ regmap_reg_range(CONTROL, CONTROL),
+};
+
+static const struct regmap_access_table dc_ed_regmap_access_table = {
+ .yes_ranges = dc_ed_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_regmap_ranges),
+};
+
+static const struct regmap_config dc_ed_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_ed_regmap_access_table,
+ .rd_table = &dc_ed_regmap_access_table,
+ .max_register = CONTROL,
+};
+
+static const enum dc_link_id src_sels[] = {
+ LINK_ID_NONE,
+ LINK_ID_CONSTFRAME0,
+ LINK_ID_CONSTFRAME1,
+ LINK_ID_CONSTFRAME4,
+ LINK_ID_CONSTFRAME5,
+ LINK_ID_LAYERBLEND3,
+ LINK_ID_LAYERBLEND2,
+ LINK_ID_LAYERBLEND1,
+ LINK_ID_LAYERBLEND0,
+};
+
+static inline void dc_ed_pec_enable_shden(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, SHDEN, SHDEN);
+}
+
+static inline void dc_ed_pec_poweron(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, POWERDOWN, 0);
+}
+
+static inline void dc_ed_pec_sync_mode_single(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, SYNC_MODE, SINGLE);
+}
+
+static inline void dc_ed_pec_div_reset(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, DIV_MASK,
+ DIV(DIV_RESET));
+}
+
+void dc_ed_pec_src_sel(struct dc_ed *ed, enum dc_link_id src)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(src_sels); i++) {
+ if (src_sels[i] == src) {
+ regmap_write(ed->reg_pec, PIXENGCFG_DYNAMIC, src);
+ return;
+ }
+ }
+}
+
+void dc_ed_pec_sync_trigger(struct dc_ed *ed)
+{
+ regmap_write(ed->reg_pec, PIXENGCFG_TRIGGER, SYNC_TRIGGER);
+}
+
+static inline void dc_ed_enable_shden(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, STATICCONTROL, SHDEN, SHDEN);
+}
+
+static inline void dc_ed_kick_mode_external(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, STATICCONTROL, KICK_MODE, EXTERNAL);
+}
+
+static inline void dc_ed_disable_perfcountmode(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, STATICCONTROL, PERFCOUNTMODE, 0);
+}
+
+static inline void dc_ed_disable_gamma_apply(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, CONTROL, GAMMAAPPLYENABLE, 0);
+}
+
+void dc_ed_init(struct dc_ed *ed)
+{
+ dc_ed_pec_src_sel(ed, LINK_ID_NONE);
+ dc_ed_pec_enable_shden(ed);
+ dc_ed_pec_poweron(ed);
+ dc_ed_pec_sync_mode_single(ed);
+ dc_ed_pec_div_reset(ed);
+ dc_ed_enable_shden(ed);
+ dc_ed_disable_perfcountmode(ed);
+ dc_ed_kick_mode_external(ed);
+ dc_ed_disable_gamma_apply(ed);
+}
+
+static int dc_ed_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_pec;
+ void __iomem *base_cfg;
+ struct dc_ed *ed;
+ int id;
+
+ ed = devm_kzalloc(dev, sizeof(*ed), GFP_KERNEL);
+ if (!ed)
+ return -ENOMEM;
+
+ base_pec = devm_platform_get_and_ioremap_resource(pdev, 0, &res_pec);
+ if (IS_ERR(base_pec))
+ return PTR_ERR(base_pec);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ ed->reg_pec = devm_regmap_init_mmio(dev, base_pec,
+ &dc_ed_pec_regmap_config);
+ if (IS_ERR(ed->reg_pec))
+ return PTR_ERR(ed->reg_pec);
+
+ ed->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_ed_cfg_regmap_config);
+ if (IS_ERR(ed->reg_cfg))
+ return PTR_ERR(ed->reg_cfg);
+
+ ed->irq_shdload = platform_get_irq_byname(pdev, "shdload");
+ if (ed->irq_shdload < 0)
+ return ed->irq_shdload;
+
+ ed->dev = dev;
+
+ id = dc_subdev_get_id(dc_ed_info, ARRAY_SIZE(dc_ed_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ switch (id) {
+ case 0:
+ dc_drm->ed_cont[0] = ed;
+ break;
+ case 1:
+ dc_drm->ed_cont[1] = ed;
+ break;
+ case 4:
+ dc_drm->ed_safe[0] = ed;
+ break;
+ case 5:
+ dc_drm->ed_safe[1] = ed;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct component_ops dc_ed_ops = {
+ .bind = dc_ed_bind,
+};
+
+static int dc_ed_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_ed_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_ed_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_ed_ops);
+}
+
+static const struct of_device_id dc_ed_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-extdst" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_ed_dt_ids);
+
+struct platform_driver dc_ed_driver = {
+ .probe = dc_ed_probe,
+ .remove = dc_ed_remove,
+ .driver = {
+ .name = "imx8-dc-extdst",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_ed_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-fg.c b/drivers/gpu/drm/imx/dc/dc-fg.c
new file mode 100644
index 000000000000..7f6c1852bf72
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fg.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+#include <drm/drm_modes.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+
+#define FGSTCTRL 0x8
+#define FGSYNCMODE_MASK GENMASK(2, 1)
+#define FGSYNCMODE(x) FIELD_PREP(FGSYNCMODE_MASK, (x))
+#define SHDEN BIT(0)
+
+#define HTCFG1 0xc
+#define HTOTAL(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define HACT(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+#define HTCFG2 0x10
+#define HSEN BIT(31)
+#define HSBP(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define HSYNC(x) FIELD_PREP(GENMASK(13, 0), ((x) - 1))
+
+#define VTCFG1 0x14
+#define VTOTAL(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define VACT(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+#define VTCFG2 0x18
+#define VSEN BIT(31)
+#define VSBP(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define VSYNC(x) FIELD_PREP(GENMASK(13, 0), ((x) - 1))
+
+#define PKICKCONFIG 0x2c
+#define SKICKCONFIG 0x30
+#define EN BIT(31)
+#define ROW(x) FIELD_PREP(GENMASK(29, 16), (x))
+#define COL(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+#define PACFG 0x54
+#define SACFG 0x58
+#define STARTY(x) FIELD_PREP(GENMASK(29, 16), ((x) + 1))
+#define STARTX(x) FIELD_PREP(GENMASK(13, 0), ((x) + 1))
+
+#define FGINCTRL 0x5c
+#define FGINCTRLPANIC 0x60
+#define FGDM_MASK GENMASK(2, 0)
+#define ENPRIMALPHA BIT(3)
+#define ENSECALPHA BIT(4)
+
+#define FGCCR 0x64
+#define CCGREEN(x) FIELD_PREP(GENMASK(19, 10), (x))
+
+#define FGENABLE 0x68
+#define FGEN BIT(0)
+
+#define FGSLR 0x6c
+#define SHDTOKGEN BIT(0)
+
+#define FGTIMESTAMP 0x74
+#define FRAMEINDEX(x) FIELD_GET(GENMASK(31, 14), (x))
+#define LINEINDEX(x) FIELD_GET(GENMASK(13, 0), (x))
+
+#define FGCHSTAT 0x78
+#define SECSYNCSTAT BIT(24)
+#define SFIFOEMPTY BIT(16)
+
+#define FGCHSTATCLR 0x7c
+#define CLRSECSTAT BIT(16)
+
+enum dc_fg_syncmode {
+ FG_SYNCMODE_OFF, /* No side-by-side synchronization. */
+};
+
+enum dc_fg_dm {
+ FG_DM_CONSTCOL = 0x1, /* Constant Color Background is shown. */
+ FG_DM_SEC_ON_TOP = 0x5, /* Both inputs overlaid with secondary on top. */
+};
+
+static const struct dc_subdev_info dc_fg_info[] = {
+ { .reg_start = 0x5618b800, .id = 0, },
+ { .reg_start = 0x5618d400, .id = 1, },
+};
+
+static const struct regmap_range dc_fg_regmap_write_ranges[] = {
+ regmap_reg_range(FGSTCTRL, VTCFG2),
+ regmap_reg_range(PKICKCONFIG, SKICKCONFIG),
+ regmap_reg_range(PACFG, FGSLR),
+ regmap_reg_range(FGCHSTATCLR, FGCHSTATCLR),
+};
+
+static const struct regmap_range dc_fg_regmap_read_ranges[] = {
+ regmap_reg_range(FGSTCTRL, VTCFG2),
+ regmap_reg_range(PKICKCONFIG, SKICKCONFIG),
+ regmap_reg_range(PACFG, FGENABLE),
+ regmap_reg_range(FGTIMESTAMP, FGCHSTAT),
+};
+
+static const struct regmap_access_table dc_fg_regmap_write_table = {
+ .yes_ranges = dc_fg_regmap_write_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fg_regmap_write_ranges),
+};
+
+static const struct regmap_access_table dc_fg_regmap_read_table = {
+ .yes_ranges = dc_fg_regmap_read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fg_regmap_read_ranges),
+};
+
+static const struct regmap_config dc_fg_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fg_regmap_write_table,
+ .rd_table = &dc_fg_regmap_read_table,
+ .max_register = FGCHSTATCLR,
+};
+
+static inline void dc_fg_enable_shden(struct dc_fg *fg)
+{
+ regmap_write_bits(fg->reg, FGSTCTRL, SHDEN, SHDEN);
+}
+
+static inline void dc_fg_syncmode(struct dc_fg *fg, enum dc_fg_syncmode mode)
+{
+ regmap_write_bits(fg->reg, FGSTCTRL, FGSYNCMODE_MASK, FGSYNCMODE(mode));
+}
+
+void dc_fg_cfg_videomode(struct dc_fg *fg, struct drm_display_mode *m)
+{
+ u32 hact, htotal, hsync, hsbp;
+ u32 vact, vtotal, vsync, vsbp;
+ u32 kick_row, kick_col;
+ int ret;
+
+ hact = m->crtc_hdisplay;
+ htotal = m->crtc_htotal;
+ hsync = m->crtc_hsync_end - m->crtc_hsync_start;
+ hsbp = m->crtc_htotal - m->crtc_hsync_start;
+
+ vact = m->crtc_vdisplay;
+ vtotal = m->crtc_vtotal;
+ vsync = m->crtc_vsync_end - m->crtc_vsync_start;
+ vsbp = m->crtc_vtotal - m->crtc_vsync_start;
+
+ /* video mode */
+ regmap_write(fg->reg, HTCFG1, HACT(hact) | HTOTAL(htotal));
+ regmap_write(fg->reg, HTCFG2, HSYNC(hsync) | HSBP(hsbp) | HSEN);
+ regmap_write(fg->reg, VTCFG1, VACT(vact) | VTOTAL(vtotal));
+ regmap_write(fg->reg, VTCFG2, VSYNC(vsync) | VSBP(vsbp) | VSEN);
+
+ kick_col = hact + 1;
+ kick_row = vact;
+
+ /* pkickconfig */
+ regmap_write(fg->reg, PKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN);
+
+ /* skikconfig */
+ regmap_write(fg->reg, SKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN);
+
+ /* primary and secondary area position configuration */
+ regmap_write(fg->reg, PACFG, STARTX(0) | STARTY(0));
+ regmap_write(fg->reg, SACFG, STARTX(0) | STARTY(0));
+
+ /* alpha */
+ regmap_write_bits(fg->reg, FGINCTRL, ENPRIMALPHA | ENSECALPHA, 0);
+ regmap_write_bits(fg->reg, FGINCTRLPANIC, ENPRIMALPHA | ENSECALPHA, 0);
+
+ /* constant color is green(used in panic mode) */
+ regmap_write(fg->reg, FGCCR, CCGREEN(0x3ff));
+
+ ret = clk_set_rate(fg->clk_disp, m->clock * HZ_PER_KHZ);
+ if (ret < 0)
+ dev_err(fg->dev, "failed to set display clock rate: %d\n", ret);
+}
+
+static inline void dc_fg_displaymode(struct dc_fg *fg, enum dc_fg_dm mode)
+{
+ regmap_write_bits(fg->reg, FGINCTRL, FGDM_MASK, mode);
+}
+
+static inline void dc_fg_panic_displaymode(struct dc_fg *fg, enum dc_fg_dm mode)
+{
+ regmap_write_bits(fg->reg, FGINCTRLPANIC, FGDM_MASK, mode);
+}
+
+void dc_fg_enable(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGENABLE, FGEN);
+}
+
+void dc_fg_disable(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGENABLE, 0);
+}
+
+void dc_fg_shdtokgen(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGSLR, SHDTOKGEN);
+}
+
+u32 dc_fg_get_frame_index(struct dc_fg *fg)
+{
+ u32 val;
+
+ regmap_read(fg->reg, FGTIMESTAMP, &val);
+
+ return FRAMEINDEX(val);
+}
+
+u32 dc_fg_get_line_index(struct dc_fg *fg)
+{
+ u32 val;
+
+ regmap_read(fg->reg, FGTIMESTAMP, &val);
+
+ return LINEINDEX(val);
+}
+
+bool dc_fg_wait_for_frame_index_moving(struct dc_fg *fg)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ u32 frame_index, last_frame_index;
+
+ frame_index = dc_fg_get_frame_index(fg);
+ do {
+ last_frame_index = frame_index;
+ frame_index = dc_fg_get_frame_index(fg);
+ } while (last_frame_index == frame_index &&
+ time_before(jiffies, timeout));
+
+ return last_frame_index != frame_index;
+}
+
+bool dc_fg_secondary_requests_to_read_empty_fifo(struct dc_fg *fg)
+{
+ u32 val;
+
+ regmap_read(fg->reg, FGCHSTAT, &val);
+
+ return !!(val & SFIFOEMPTY);
+}
+
+void dc_fg_secondary_clear_channel_status(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGCHSTATCLR, CLRSECSTAT);
+}
+
+int dc_fg_wait_for_secondary_syncup(struct dc_fg *fg)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(fg->reg, FGCHSTAT, val,
+ val & SECSYNCSTAT, 5, 100000);
+}
+
+void dc_fg_enable_clock(struct dc_fg *fg)
+{
+ int ret;
+
+ ret = clk_prepare_enable(fg->clk_disp);
+ if (ret)
+ dev_err(fg->dev, "failed to enable display clock: %d\n", ret);
+}
+
+void dc_fg_disable_clock(struct dc_fg *fg)
+{
+ clk_disable_unprepare(fg->clk_disp);
+}
+
+enum drm_mode_status dc_fg_check_clock(struct dc_fg *fg, int clk_khz)
+{
+ unsigned long rounded_rate;
+
+ rounded_rate = clk_round_rate(fg->clk_disp, clk_khz * HZ_PER_KHZ);
+
+ if (rounded_rate != clk_khz * HZ_PER_KHZ)
+ return MODE_NOCLOCK;
+
+ return MODE_OK;
+}
+
+void dc_fg_init(struct dc_fg *fg)
+{
+ dc_fg_enable_shden(fg);
+ dc_fg_syncmode(fg, FG_SYNCMODE_OFF);
+ dc_fg_displaymode(fg, FG_DM_SEC_ON_TOP);
+ dc_fg_panic_displaymode(fg, FG_DM_CONSTCOL);
+}
+
+static int dc_fg_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res;
+ void __iomem *base;
+ struct dc_fg *fg;
+ int id;
+
+ fg = devm_kzalloc(dev, sizeof(*fg), GFP_KERNEL);
+ if (!fg)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ fg->reg = devm_regmap_init_mmio(dev, base, &dc_fg_regmap_config);
+ if (IS_ERR(fg->reg))
+ return PTR_ERR(fg->reg);
+
+ fg->clk_disp = devm_clk_get(dev, NULL);
+ if (IS_ERR(fg->clk_disp))
+ return dev_err_probe(dev, PTR_ERR(fg->clk_disp),
+ "failed to get display clock\n");
+
+ id = dc_subdev_get_id(dc_fg_info, ARRAY_SIZE(dc_fg_info), res);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ fg->dev = dev;
+ dc_drm->fg[id] = fg;
+
+ return 0;
+}
+
+static const struct component_ops dc_fg_ops = {
+ .bind = dc_fg_bind,
+};
+
+static int dc_fg_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_fg_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_fg_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_fg_ops);
+}
+
+static const struct of_device_id dc_fg_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-framegen" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_fg_dt_ids);
+
+struct platform_driver dc_fg_driver = {
+ .probe = dc_fg_probe,
+ .remove = dc_fg_remove,
+ .driver = {
+ .name = "imx8-dc-framegen",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_fg_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-fl.c b/drivers/gpu/drm/imx/dc/dc-fl.c
new file mode 100644
index 000000000000..3ce24c72aa13
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fl.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+
+#define BASEADDRESS(x) (0x10 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERATTRIBUTES(x) (0x14 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERDIMENSION(x) (0x18 + FRAC_OFFSET * (x))
+#define COLORCOMPONENTBITS(x) (0x1c + FRAC_OFFSET * (x))
+#define COLORCOMPONENTSHIFT(x) (0x20 + FRAC_OFFSET * (x))
+#define LAYEROFFSET(x) (0x24 + FRAC_OFFSET * (x))
+#define CLIPWINDOWOFFSET(x) (0x28 + FRAC_OFFSET * (x))
+#define CLIPWINDOWDIMENSIONS(x) (0x2c + FRAC_OFFSET * (x))
+#define CONSTANTCOLOR(x) (0x30 + FRAC_OFFSET * (x))
+#define LAYERPROPERTY(x) (0x34 + FRAC_OFFSET * (x))
+#define FRAMEDIMENSIONS 0x150
+
+struct dc_fl {
+ struct dc_fu fu;
+};
+
+static const struct dc_subdev_info dc_fl_info[] = {
+ { .reg_start = 0x56180ac0, .id = 0, },
+};
+
+static const struct regmap_range dc_fl_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, FRAMEDIMENSIONS),
+};
+
+static const struct regmap_access_table dc_fl_regmap_access_table = {
+ .yes_ranges = dc_fl_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fl_regmap_ranges),
+};
+
+static const struct regmap_config dc_fl_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fl_regmap_access_table,
+ .rd_table = &dc_fl_regmap_access_table,
+ .max_register = FRAMEDIMENSIONS,
+};
+
+static void dc_fl_set_fmt(struct dc_fu *fu, enum dc_fu_frac frac,
+ const struct drm_format_info *format)
+{
+ u32 bits = 0, shifts = 0;
+
+ dc_fu_set_src_bpp(fu, frac, format->cpp[0] * 8);
+
+ regmap_write_bits(fu->reg_cfg, LAYERPROPERTY(frac),
+ YUVCONVERSIONMODE_MASK,
+ YUVCONVERSIONMODE(YUVCONVERSIONMODE_OFF));
+
+ dc_fu_get_pixel_format_bits(fu, format->format, &bits);
+ dc_fu_get_pixel_format_shifts(fu, format->format, &shifts);
+
+ regmap_write(fu->reg_cfg, COLORCOMPONENTBITS(frac), bits);
+ regmap_write(fu->reg_cfg, COLORCOMPONENTSHIFT(frac), shifts);
+}
+
+static void dc_fl_set_framedimensions(struct dc_fu *fu, int w, int h)
+{
+ regmap_write(fu->reg_cfg, FRAMEDIMENSIONS,
+ FRAMEWIDTH(w) | FRAMEHEIGHT(h));
+}
+
+static void dc_fl_init(struct dc_fu *fu)
+{
+ dc_fu_common_hw_init(fu);
+ dc_fu_shdldreq_sticky(fu, 0xff);
+}
+
+static void dc_fl_set_ops(struct dc_fu *fu)
+{
+ memcpy(&fu->ops, &dc_fu_common_ops, sizeof(dc_fu_common_ops));
+ fu->ops.init = dc_fl_init;
+ fu->ops.set_fmt = dc_fl_set_fmt;
+ fu->ops.set_framedimensions = dc_fl_set_framedimensions;
+}
+
+static int dc_fl_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_cfg;
+ struct dc_fl *fl;
+ struct dc_fu *fu;
+ int i, id;
+
+ fl = devm_kzalloc(dev, sizeof(*fl), GFP_KERNEL);
+ if (!fl)
+ return -ENOMEM;
+
+ fu = &fl->fu;
+
+ res_pec = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ fu->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_fl_cfg_regmap_config);
+ if (IS_ERR(fu->reg_cfg))
+ return PTR_ERR(fu->reg_cfg);
+
+ id = dc_subdev_get_id(dc_fl_info, ARRAY_SIZE(dc_fl_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ fu->link_id = LINK_ID_FETCHLAYER0;
+ fu->id = DC_FETCHUNIT_FL0;
+ for (i = 0; i < DC_FETCHUNIT_FRAC_NUM; i++) {
+ fu->reg_baseaddr[i] = BASEADDRESS(i);
+ fu->reg_sourcebufferattributes[i] = SOURCEBUFFERATTRIBUTES(i);
+ fu->reg_sourcebufferdimension[i] = SOURCEBUFFERDIMENSION(i);
+ fu->reg_layeroffset[i] = LAYEROFFSET(i);
+ fu->reg_clipwindowoffset[i] = CLIPWINDOWOFFSET(i);
+ fu->reg_clipwindowdimensions[i] = CLIPWINDOWDIMENSIONS(i);
+ fu->reg_constantcolor[i] = CONSTANTCOLOR(i);
+ fu->reg_layerproperty[i] = LAYERPROPERTY(i);
+ }
+ snprintf(fu->name, sizeof(fu->name), "FetchLayer%d", id);
+
+ dc_fl_set_ops(fu);
+
+ dc_drm->fu_disp[fu->id] = fu;
+
+ return 0;
+}
+
+static const struct component_ops dc_fl_ops = {
+ .bind = dc_fl_bind,
+};
+
+static int dc_fl_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_fl_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_fl_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_fl_ops);
+}
+
+static const struct of_device_id dc_fl_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-fetchlayer" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_fl_dt_ids);
+
+struct platform_driver dc_fl_driver = {
+ .probe = dc_fl_probe,
+ .remove = dc_fl_remove,
+ .driver = {
+ .name = "imx8-dc-fetchlayer",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_fl_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-fu.c b/drivers/gpu/drm/imx/dc/dc-fu.c
new file mode 100644
index 000000000000..f94c591c8158
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fu.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/math.h>
+
+#include "dc-fu.h"
+#include "dc-pe.h"
+
+/* STATICCONTROL */
+#define SHDLDREQSTICKY_MASK GENMASK(31, 24)
+#define SHDLDREQSTICKY(x) FIELD_PREP(SHDLDREQSTICKY_MASK, (x))
+#define BASEADDRESSAUTOUPDATE_MASK GENMASK(23, 16)
+#define BASEADDRESSAUTOUPDATE(x) FIELD_PREP(BASEADDRESSAUTOUPDATE_MASK, (x))
+
+/* BURSTBUFFERMANAGEMENT */
+#define SETBURSTLENGTH_MASK GENMASK(12, 8)
+#define SETBURSTLENGTH(x) FIELD_PREP(SETBURSTLENGTH_MASK, (x))
+#define SETNUMBUFFERS_MASK GENMASK(7, 0)
+#define SETNUMBUFFERS(x) FIELD_PREP(SETNUMBUFFERS_MASK, (x))
+#define LINEMODE_MASK BIT(31)
+
+/* SOURCEBUFFERATTRIBUTES */
+#define BITSPERPIXEL_MASK GENMASK(21, 16)
+#define BITSPERPIXEL(x) FIELD_PREP(BITSPERPIXEL_MASK, (x))
+#define STRIDE_MASK GENMASK(15, 0)
+#define STRIDE(x) FIELD_PREP(STRIDE_MASK, (x) - 1)
+
+/* SOURCEBUFFERDIMENSION */
+#define LINEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
+#define LINECOUNT(x) FIELD_PREP(GENMASK(29, 16), (x))
+
+/* LAYEROFFSET */
+#define LAYERXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
+#define LAYERYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x))
+
+/* CLIPWINDOWOFFSET */
+#define CLIPWINDOWXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
+#define CLIPWINDOWYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x))
+
+/* CLIPWINDOWDIMENSIONS */
+#define CLIPWINDOWWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x) - 1)
+#define CLIPWINDOWHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x) - 1)
+
+enum dc_linemode {
+ /*
+ * Mandatory setting for operation in the Display Controller.
+ * Works also for Blit Engine with marginal performance impact.
+ */
+ LINEMODE_DISPLAY = 0,
+};
+
+struct dc_fu_pixel_format {
+ u32 pixel_format;
+ u32 bits;
+ u32 shifts;
+};
+
+static const struct dc_fu_pixel_format pixel_formats[] = {
+ {
+ DRM_FORMAT_XRGB8888,
+ R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
+ R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(0),
+ },
+};
+
+void dc_fu_get_pixel_format_bits(struct dc_fu *fu, u32 format, u32 *bits)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ if (pixel_formats[i].pixel_format == format) {
+ *bits = pixel_formats[i].bits;
+ return;
+ }
+ }
+}
+
+void
+dc_fu_get_pixel_format_shifts(struct dc_fu *fu, u32 format, u32 *shifts)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ if (pixel_formats[i].pixel_format == format) {
+ *shifts = pixel_formats[i].shifts;
+ return;
+ }
+ }
+}
+
+static inline void dc_fu_enable_shden(struct dc_fu *fu)
+{
+ regmap_write_bits(fu->reg_cfg, STATICCONTROL, SHDEN, SHDEN);
+}
+
+static inline void dc_fu_baddr_autoupdate(struct dc_fu *fu, u8 layer_mask)
+{
+ regmap_write_bits(fu->reg_cfg, STATICCONTROL,
+ BASEADDRESSAUTOUPDATE_MASK,
+ BASEADDRESSAUTOUPDATE(layer_mask));
+}
+
+void dc_fu_shdldreq_sticky(struct dc_fu *fu, u8 layer_mask)
+{
+ regmap_write_bits(fu->reg_cfg, STATICCONTROL, SHDLDREQSTICKY_MASK,
+ SHDLDREQSTICKY(layer_mask));
+}
+
+static inline void dc_fu_set_linemode(struct dc_fu *fu, enum dc_linemode mode)
+{
+ regmap_write_bits(fu->reg_cfg, BURSTBUFFERMANAGEMENT, LINEMODE_MASK,
+ mode);
+}
+
+static inline void dc_fu_set_numbuffers(struct dc_fu *fu, unsigned int num)
+{
+ regmap_write_bits(fu->reg_cfg, BURSTBUFFERMANAGEMENT,
+ SETNUMBUFFERS_MASK, SETNUMBUFFERS(num));
+}
+
+static void dc_fu_set_burstlength(struct dc_fu *fu, dma_addr_t baddr)
+{
+ unsigned int burst_size, burst_length;
+
+ burst_size = 1 << __ffs(baddr);
+ burst_size = round_up(burst_size, 8);
+ burst_size = min(burst_size, 128U);
+ burst_length = burst_size / 8;
+
+ regmap_write_bits(fu->reg_cfg, BURSTBUFFERMANAGEMENT,
+ SETBURSTLENGTH_MASK, SETBURSTLENGTH(burst_length));
+}
+
+static void dc_fu_set_baseaddress(struct dc_fu *fu, enum dc_fu_frac frac,
+ dma_addr_t baddr)
+{
+ regmap_write(fu->reg_cfg, fu->reg_baseaddr[frac], baddr);
+}
+
+void dc_fu_set_src_bpp(struct dc_fu *fu, enum dc_fu_frac frac, unsigned int bpp)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_sourcebufferattributes[frac],
+ BITSPERPIXEL_MASK, BITSPERPIXEL(bpp));
+}
+
+static void dc_fu_set_src_stride(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int stride)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_sourcebufferattributes[frac],
+ STRIDE_MASK, STRIDE(stride));
+}
+
+static void dc_fu_set_src_buf_dimensions(struct dc_fu *fu, enum dc_fu_frac frac,
+ int w, int h)
+{
+ regmap_write(fu->reg_cfg, fu->reg_sourcebufferdimension[frac],
+ LINEWIDTH(w) | LINECOUNT(h));
+}
+
+static inline void dc_fu_layeroffset(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int x, unsigned int y)
+{
+ regmap_write(fu->reg_cfg, fu->reg_layeroffset[frac],
+ LAYERXOFFSET(x) | LAYERYOFFSET(y));
+}
+
+static inline void dc_fu_clipoffset(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int x, unsigned int y)
+{
+ regmap_write(fu->reg_cfg, fu->reg_clipwindowoffset[frac],
+ CLIPWINDOWXOFFSET(x) | CLIPWINDOWYOFFSET(y));
+}
+
+static inline void dc_fu_clipdimensions(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int w, unsigned int h)
+{
+ regmap_write(fu->reg_cfg, fu->reg_clipwindowdimensions[frac],
+ CLIPWINDOWWIDTH(w) | CLIPWINDOWHEIGHT(h));
+}
+
+static inline void
+dc_fu_set_pixel_blend_mode(struct dc_fu *fu, enum dc_fu_frac frac)
+{
+ regmap_write(fu->reg_cfg, fu->reg_layerproperty[frac], 0);
+ regmap_write(fu->reg_cfg, fu->reg_constantcolor[frac], 0);
+}
+
+static void dc_fu_enable_src_buf(struct dc_fu *fu, enum dc_fu_frac frac)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_layerproperty[frac],
+ SOURCEBUFFERENABLE, SOURCEBUFFERENABLE);
+}
+
+static void dc_fu_disable_src_buf(struct dc_fu *fu, enum dc_fu_frac frac)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_layerproperty[frac],
+ SOURCEBUFFERENABLE, 0);
+
+ if (fu->lb) {
+ dc_lb_pec_clken(fu->lb, CLKEN_DISABLE);
+ dc_lb_mode(fu->lb, LB_NEUTRAL);
+ }
+}
+
+static void dc_fu_set_layerblend(struct dc_fu *fu, struct dc_lb *lb)
+{
+ fu->lb = lb;
+}
+
+static enum dc_link_id dc_fu_get_link_id(struct dc_fu *fu)
+{
+ return fu->link_id;
+}
+
+static const char *dc_fu_get_name(struct dc_fu *fu)
+{
+ return fu->name;
+}
+
+const struct dc_fu_ops dc_fu_common_ops = {
+ .set_burstlength = dc_fu_set_burstlength,
+ .set_baseaddress = dc_fu_set_baseaddress,
+ .set_src_stride = dc_fu_set_src_stride,
+ .set_src_buf_dimensions = dc_fu_set_src_buf_dimensions,
+ .enable_src_buf = dc_fu_enable_src_buf,
+ .disable_src_buf = dc_fu_disable_src_buf,
+ .set_layerblend = dc_fu_set_layerblend,
+ .get_link_id = dc_fu_get_link_id,
+ .get_name = dc_fu_get_name,
+};
+
+const struct dc_fu_ops *dc_fu_get_ops(struct dc_fu *fu)
+{
+ return &fu->ops;
+}
+
+void dc_fu_common_hw_init(struct dc_fu *fu)
+{
+ enum dc_fu_frac i;
+
+ dc_fu_baddr_autoupdate(fu, 0x0);
+ dc_fu_enable_shden(fu);
+ dc_fu_set_linemode(fu, LINEMODE_DISPLAY);
+ dc_fu_set_numbuffers(fu, 16);
+
+ for (i = DC_FETCHUNIT_FRAC0; i < DC_FETCHUNIT_FRAC_NUM; i++) {
+ dc_fu_layeroffset(fu, i, 0, 0);
+ dc_fu_clipoffset(fu, i, 0, 0);
+ dc_fu_clipdimensions(fu, i, 1, 1);
+ dc_fu_disable_src_buf(fu, i);
+ dc_fu_set_pixel_blend_mode(fu, i);
+ }
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-fu.h b/drivers/gpu/drm/imx/dc/dc-fu.h
new file mode 100644
index 000000000000..e016e1ea5b4e
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fu.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_FETCHUNIT_H__
+#define __DC_FETCHUNIT_H__
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "dc-pe.h"
+
+#define FRAC_OFFSET 0x28
+
+#define STATICCONTROL 0x8
+#define BURSTBUFFERMANAGEMENT 0xc
+
+/* COLORCOMPONENTBITS */
+#define R_BITS(x) FIELD_PREP_CONST(GENMASK(27, 24), (x))
+#define G_BITS(x) FIELD_PREP_CONST(GENMASK(19, 16), (x))
+#define B_BITS(x) FIELD_PREP_CONST(GENMASK(11, 8), (x))
+#define A_BITS(x) FIELD_PREP_CONST(GENMASK(3, 0), (x))
+
+/* COLORCOMPONENTSHIFT */
+#define R_SHIFT(x) FIELD_PREP_CONST(GENMASK(28, 24), (x))
+#define G_SHIFT(x) FIELD_PREP_CONST(GENMASK(20, 16), (x))
+#define B_SHIFT(x) FIELD_PREP_CONST(GENMASK(12, 8), (x))
+#define A_SHIFT(x) FIELD_PREP_CONST(GENMASK(4, 0), (x))
+
+/* LAYERPROPERTY */
+#define YUVCONVERSIONMODE_MASK GENMASK(18, 17)
+#define YUVCONVERSIONMODE(x) FIELD_PREP(YUVCONVERSIONMODE_MASK, (x))
+#define SOURCEBUFFERENABLE BIT(31)
+
+/* FRAMEDIMENSIONS */
+#define FRAMEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
+#define FRAMEHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x))
+
+/* CONTROL */
+#define INPUTSELECT_MASK GENMASK(4, 3)
+#define INPUTSELECT(x) FIELD_PREP(INPUTSELECT_MASK, (x))
+#define RASTERMODE_MASK GENMASK(2, 0)
+#define RASTERMODE(x) FIELD_PREP(RASTERMODE_MASK, (x))
+
+enum dc_yuvconversionmode {
+ YUVCONVERSIONMODE_OFF,
+};
+
+enum dc_inputselect {
+ INPUTSELECT_INACTIVE,
+};
+
+enum dc_rastermode {
+ RASTERMODE_NORMAL,
+};
+
+enum {
+ DC_FETCHUNIT_FL0,
+ DC_FETCHUNIT_FW2,
+};
+
+enum dc_fu_frac {
+ DC_FETCHUNIT_FRAC0,
+ DC_FETCHUNIT_FRAC1,
+ DC_FETCHUNIT_FRAC2,
+ DC_FETCHUNIT_FRAC3,
+ DC_FETCHUNIT_FRAC4,
+ DC_FETCHUNIT_FRAC5,
+ DC_FETCHUNIT_FRAC6,
+ DC_FETCHUNIT_FRAC7,
+ DC_FETCHUNIT_FRAC_NUM
+};
+
+struct dc_fu;
+struct dc_lb;
+
+struct dc_fu_ops {
+ void (*init)(struct dc_fu *fu);
+ void (*set_burstlength)(struct dc_fu *fu, dma_addr_t baddr);
+ void (*set_baseaddress)(struct dc_fu *fu, enum dc_fu_frac frac,
+ dma_addr_t baddr);
+ void (*set_src_stride)(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int stride);
+ void (*set_src_buf_dimensions)(struct dc_fu *fu, enum dc_fu_frac frac,
+ int w, int h);
+ void (*set_fmt)(struct dc_fu *fu, enum dc_fu_frac frac,
+ const struct drm_format_info *format);
+ void (*enable_src_buf)(struct dc_fu *fu, enum dc_fu_frac frac);
+ void (*disable_src_buf)(struct dc_fu *fu, enum dc_fu_frac frac);
+ void (*set_framedimensions)(struct dc_fu *fu, int w, int h);
+ void (*set_layerblend)(struct dc_fu *fu, struct dc_lb *lb);
+ enum dc_link_id (*get_link_id)(struct dc_fu *fu);
+ const char *(*get_name)(struct dc_fu *fu);
+};
+
+struct dc_fu {
+ struct regmap *reg_pec;
+ struct regmap *reg_cfg;
+ char name[21];
+ u32 reg_baseaddr[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_sourcebufferattributes[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_sourcebufferdimension[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_layeroffset[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_clipwindowoffset[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_clipwindowdimensions[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_constantcolor[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_layerproperty[DC_FETCHUNIT_FRAC_NUM];
+ unsigned int id;
+ enum dc_link_id link_id;
+ struct dc_fu_ops ops;
+ struct dc_lb *lb;
+};
+
+extern const struct dc_fu_ops dc_fu_common_ops;
+
+void dc_fu_get_pixel_format_bits(struct dc_fu *fu, u32 format, u32 *bits);
+void dc_fu_get_pixel_format_shifts(struct dc_fu *fu, u32 format, u32 *shifts);
+void dc_fu_shdldreq_sticky(struct dc_fu *fu, u8 layer_mask);
+void dc_fu_set_src_bpp(struct dc_fu *fu, enum dc_fu_frac frac, unsigned int bpp);
+void dc_fu_common_hw_init(struct dc_fu *fu);
+
+const struct dc_fu_ops *dc_fu_get_ops(struct dc_fu *fu);
+
+#endif /* __DC_FETCHUNIT_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-fw.c b/drivers/gpu/drm/imx/dc/dc-fw.c
new file mode 100644
index 000000000000..acb2d4d9e2ec
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fw.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+
+#define PIXENGCFG_DYNAMIC 0x8
+
+#define BASEADDRESS(x) (0x10 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERATTRIBUTES(x) (0x14 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERDIMENSION(x) (0x18 + FRAC_OFFSET * (x))
+#define COLORCOMPONENTBITS(x) (0x1c + FRAC_OFFSET * (x))
+#define COLORCOMPONENTSHIFT(x) (0x20 + FRAC_OFFSET * (x))
+#define LAYEROFFSET(x) (0x24 + FRAC_OFFSET * (x))
+#define CLIPWINDOWOFFSET(x) (0x28 + FRAC_OFFSET * (x))
+#define CLIPWINDOWDIMENSIONS(x) (0x2c + FRAC_OFFSET * (x))
+#define CONSTANTCOLOR(x) (0x30 + FRAC_OFFSET * (x))
+#define LAYERPROPERTY(x) (0x34 + FRAC_OFFSET * (x))
+#define FRAMEDIMENSIONS 0x150
+#define CONTROL 0x170
+
+struct dc_fw {
+ struct dc_fu fu;
+};
+
+static const struct dc_subdev_info dc_fw_info[] = {
+ { .reg_start = 0x56180a60, .id = 2, },
+};
+
+static const struct regmap_range dc_fw_pec_regmap_access_ranges[] = {
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+};
+
+static const struct regmap_access_table dc_fw_pec_regmap_access_table = {
+ .yes_ranges = dc_fw_pec_regmap_access_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fw_pec_regmap_access_ranges),
+};
+
+static const struct regmap_config dc_fw_pec_regmap_config = {
+ .name = "pec",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fw_pec_regmap_access_table,
+ .rd_table = &dc_fw_pec_regmap_access_table,
+ .max_register = PIXENGCFG_DYNAMIC,
+};
+
+static const struct regmap_range dc_fw_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, FRAMEDIMENSIONS),
+ regmap_reg_range(CONTROL, CONTROL),
+};
+
+static const struct regmap_access_table dc_fw_regmap_access_table = {
+ .yes_ranges = dc_fw_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fw_regmap_ranges),
+};
+
+static const struct regmap_config dc_fw_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fw_regmap_access_table,
+ .rd_table = &dc_fw_regmap_access_table,
+ .max_register = CONTROL,
+};
+
+static void dc_fw_set_fmt(struct dc_fu *fu, enum dc_fu_frac frac,
+ const struct drm_format_info *format)
+{
+ u32 bits = 0, shifts = 0;
+
+ dc_fu_set_src_bpp(fu, frac, format->cpp[0] * 8);
+
+ regmap_write_bits(fu->reg_cfg, CONTROL, INPUTSELECT_MASK,
+ INPUTSELECT(INPUTSELECT_INACTIVE));
+ regmap_write_bits(fu->reg_cfg, CONTROL, RASTERMODE_MASK,
+ RASTERMODE(RASTERMODE_NORMAL));
+
+ regmap_write_bits(fu->reg_cfg, LAYERPROPERTY(frac),
+ YUVCONVERSIONMODE_MASK,
+ YUVCONVERSIONMODE(YUVCONVERSIONMODE_OFF));
+
+ dc_fu_get_pixel_format_bits(fu, format->format, &bits);
+ dc_fu_get_pixel_format_shifts(fu, format->format, &shifts);
+
+ regmap_write(fu->reg_cfg, COLORCOMPONENTBITS(frac), bits);
+ regmap_write(fu->reg_cfg, COLORCOMPONENTSHIFT(frac), shifts);
+}
+
+static void dc_fw_set_framedimensions(struct dc_fu *fu, int w, int h)
+{
+ regmap_write(fu->reg_cfg, FRAMEDIMENSIONS,
+ FRAMEWIDTH(w) | FRAMEHEIGHT(h));
+}
+
+static void dc_fw_init(struct dc_fu *fu)
+{
+ regmap_write(fu->reg_pec, PIXENGCFG_DYNAMIC, LINK_ID_NONE);
+ dc_fu_common_hw_init(fu);
+ dc_fu_shdldreq_sticky(fu, 0xff);
+}
+
+static void dc_fw_set_ops(struct dc_fu *fu)
+{
+ memcpy(&fu->ops, &dc_fu_common_ops, sizeof(dc_fu_common_ops));
+ fu->ops.init = dc_fw_init;
+ fu->ops.set_fmt = dc_fw_set_fmt;
+ fu->ops.set_framedimensions = dc_fw_set_framedimensions;
+}
+
+static int dc_fw_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_pec;
+ void __iomem *base_cfg;
+ struct dc_fw *fw;
+ struct dc_fu *fu;
+ int i, id;
+
+ fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
+
+ fu = &fw->fu;
+
+ base_pec = devm_platform_get_and_ioremap_resource(pdev, 0, &res_pec);
+ if (IS_ERR(base_pec))
+ return PTR_ERR(base_pec);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ fu->reg_pec = devm_regmap_init_mmio(dev, base_pec,
+ &dc_fw_pec_regmap_config);
+ if (IS_ERR(fu->reg_pec))
+ return PTR_ERR(fu->reg_pec);
+
+ fu->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_fw_cfg_regmap_config);
+ if (IS_ERR(fu->reg_cfg))
+ return PTR_ERR(fu->reg_cfg);
+
+ id = dc_subdev_get_id(dc_fw_info, ARRAY_SIZE(dc_fw_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ fu->link_id = LINK_ID_FETCHWARP2;
+ fu->id = DC_FETCHUNIT_FW2;
+ for (i = 0; i < DC_FETCHUNIT_FRAC_NUM; i++) {
+ fu->reg_baseaddr[i] = BASEADDRESS(i);
+ fu->reg_sourcebufferattributes[i] = SOURCEBUFFERATTRIBUTES(i);
+ fu->reg_sourcebufferdimension[i] = SOURCEBUFFERDIMENSION(i);
+ fu->reg_layeroffset[i] = LAYEROFFSET(i);
+ fu->reg_clipwindowoffset[i] = CLIPWINDOWOFFSET(i);
+ fu->reg_clipwindowdimensions[i] = CLIPWINDOWDIMENSIONS(i);
+ fu->reg_constantcolor[i] = CONSTANTCOLOR(i);
+ fu->reg_layerproperty[i] = LAYERPROPERTY(i);
+ }
+ snprintf(fu->name, sizeof(fu->name), "FetchWarp%d", id);
+
+ dc_fw_set_ops(fu);
+
+ dc_drm->fu_disp[fu->id] = fu;
+
+ return 0;
+}
+
+static const struct component_ops dc_fw_ops = {
+ .bind = dc_fw_bind,
+};
+
+static int dc_fw_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_fw_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_fw_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_fw_ops);
+}
+
+static const struct of_device_id dc_fw_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-fetchwarp" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_fw_dt_ids);
+
+struct platform_driver dc_fw_driver = {
+ .probe = dc_fw_probe,
+ .remove = dc_fw_remove,
+ .driver = {
+ .name = "imx8-dc-fetchwarp",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_fw_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-ic.c b/drivers/gpu/drm/imx/dc/dc-ic.c
new file mode 100644
index 000000000000..a270ae4030cd
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-ic.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#define USERINTERRUPTMASK(n) (0x8 + 4 * (n))
+#define INTERRUPTENABLE(n) (0x10 + 4 * (n))
+#define INTERRUPTPRESET(n) (0x18 + 4 * (n))
+#define INTERRUPTCLEAR(n) (0x20 + 4 * (n))
+#define INTERRUPTSTATUS(n) (0x28 + 4 * (n))
+#define USERINTERRUPTENABLE(n) (0x40 + 4 * (n))
+#define USERINTERRUPTPRESET(n) (0x48 + 4 * (n))
+#define USERINTERRUPTCLEAR(n) (0x50 + 4 * (n))
+#define USERINTERRUPTSTATUS(n) (0x58 + 4 * (n))
+
+#define IRQ_COUNT 49
+#define IRQ_RESERVED 35
+#define REG_NUM 2
+
+struct dc_ic_data {
+ struct regmap *regs;
+ struct clk *clk_axi;
+ int irq[IRQ_COUNT];
+ struct irq_domain *domain;
+};
+
+struct dc_ic_entry {
+ struct dc_ic_data *data;
+ int irq;
+};
+
+static const struct regmap_range dc_ic_regmap_write_ranges[] = {
+ regmap_reg_range(USERINTERRUPTMASK(0), INTERRUPTCLEAR(1)),
+ regmap_reg_range(USERINTERRUPTENABLE(0), USERINTERRUPTCLEAR(1)),
+};
+
+static const struct regmap_access_table dc_ic_regmap_write_table = {
+ .yes_ranges = dc_ic_regmap_write_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ic_regmap_write_ranges),
+};
+
+static const struct regmap_range dc_ic_regmap_read_ranges[] = {
+ regmap_reg_range(USERINTERRUPTMASK(0), INTERRUPTENABLE(1)),
+ regmap_reg_range(INTERRUPTSTATUS(0), INTERRUPTSTATUS(1)),
+ regmap_reg_range(USERINTERRUPTENABLE(0), USERINTERRUPTENABLE(1)),
+ regmap_reg_range(USERINTERRUPTSTATUS(0), USERINTERRUPTSTATUS(1)),
+};
+
+static const struct regmap_access_table dc_ic_regmap_read_table = {
+ .yes_ranges = dc_ic_regmap_read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ic_regmap_read_ranges),
+};
+
+static const struct regmap_range dc_ic_regmap_volatile_ranges[] = {
+ regmap_reg_range(INTERRUPTPRESET(0), INTERRUPTCLEAR(1)),
+ regmap_reg_range(USERINTERRUPTPRESET(0), USERINTERRUPTCLEAR(1)),
+};
+
+static const struct regmap_access_table dc_ic_regmap_volatile_table = {
+ .yes_ranges = dc_ic_regmap_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ic_regmap_volatile_ranges),
+};
+
+static const struct regmap_config dc_ic_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_ic_regmap_write_table,
+ .rd_table = &dc_ic_regmap_read_table,
+ .volatile_table = &dc_ic_regmap_volatile_table,
+ .max_register = USERINTERRUPTSTATUS(1),
+};
+
+static void dc_ic_irq_handler(struct irq_desc *desc)
+{
+ struct dc_ic_entry *entry = irq_desc_get_handler_data(desc);
+ struct dc_ic_data *data = entry->data;
+ unsigned int status, enable;
+ unsigned int virq;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ regmap_read(data->regs, USERINTERRUPTSTATUS(entry->irq / 32), &status);
+ regmap_read(data->regs, USERINTERRUPTENABLE(entry->irq / 32), &enable);
+
+ status &= enable;
+
+ if (status & BIT(entry->irq % 32)) {
+ virq = irq_find_mapping(data->domain, entry->irq);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static const unsigned long unused_irq[REG_NUM] = {0x00000000, 0xfffe0008};
+
+static int dc_ic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct irq_chip_generic *gc;
+ struct dc_ic_entry *entry;
+ struct irq_chip_type *ct;
+ struct dc_ic_data *data;
+ void __iomem *base;
+ int i, ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ entry = devm_kcalloc(dev, IRQ_COUNT, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ dev_err(dev, "failed to initialize reg\n");
+ return PTR_ERR(base);
+ }
+
+ data->regs = devm_regmap_init_mmio(dev, base, &dc_ic_regmap_config);
+ if (IS_ERR(data->regs))
+ return PTR_ERR(data->regs);
+
+ data->clk_axi = devm_clk_get(dev, NULL);
+ if (IS_ERR(data->clk_axi))
+ return dev_err_probe(dev, PTR_ERR(data->clk_axi),
+ "failed to get AXI clock\n");
+
+ for (i = 0; i < IRQ_COUNT; i++) {
+ /* skip the reserved IRQ */
+ if (i == IRQ_RESERVED)
+ continue;
+
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ dev_set_drvdata(dev, data);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get runtime PM sync: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < REG_NUM; i++) {
+ /* mask and clear all interrupts */
+ regmap_write(data->regs, USERINTERRUPTENABLE(i), 0x0);
+ regmap_write(data->regs, INTERRUPTENABLE(i), 0x0);
+ regmap_write(data->regs, USERINTERRUPTCLEAR(i), 0xffffffff);
+ regmap_write(data->regs, INTERRUPTCLEAR(i), 0xffffffff);
+
+ /* set all interrupts to user mode */
+ regmap_write(data->regs, USERINTERRUPTMASK(i), 0xffffffff);
+ }
+
+ data->domain = irq_domain_add_linear(dev->of_node, IRQ_COUNT,
+ &irq_generic_chip_ops, data);
+ if (!data->domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ pm_runtime_put(dev);
+ return -ENOMEM;
+ }
+ irq_domain_set_pm_device(data->domain, dev);
+
+ ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, "DC",
+ handle_level_irq, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed to alloc generic IRQ chips: %d\n", ret);
+ irq_domain_remove(data->domain);
+ pm_runtime_put(dev);
+ return ret;
+ }
+
+ for (i = 0; i < IRQ_COUNT; i += 32) {
+ gc = irq_get_domain_generic_chip(data->domain, i);
+ gc->reg_base = base;
+ gc->unused = unused_irq[i / 32];
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->regs.ack = USERINTERRUPTCLEAR(i / 32);
+ ct->regs.mask = USERINTERRUPTENABLE(i / 32);
+ }
+
+ for (i = 0; i < IRQ_COUNT; i++) {
+ /* skip the reserved IRQ */
+ if (i == IRQ_RESERVED)
+ continue;
+
+ data->irq[i] = irq_of_parse_and_map(dev->of_node, i);
+
+ entry[i].data = data;
+ entry[i].irq = i;
+
+ irq_set_chained_handler_and_data(data->irq[i],
+ dc_ic_irq_handler, &entry[i]);
+ }
+
+ return 0;
+}
+
+static void dc_ic_remove(struct platform_device *pdev)
+{
+ struct dc_ic_data *data = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ for (i = 0; i < IRQ_COUNT; i++) {
+ if (i == IRQ_RESERVED)
+ continue;
+
+ irq_set_chained_handler_and_data(data->irq[i], NULL, NULL);
+ }
+
+ irq_domain_remove(data->domain);
+
+ pm_runtime_put_sync(&pdev->dev);
+}
+
+static int dc_ic_runtime_suspend(struct device *dev)
+{
+ struct dc_ic_data *data = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(data->clk_axi);
+
+ return 0;
+}
+
+static int dc_ic_runtime_resume(struct device *dev)
+{
+ struct dc_ic_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(data->clk_axi);
+ if (ret)
+ dev_err(dev, "failed to enable AXI clock: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops dc_ic_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ RUNTIME_PM_OPS(dc_ic_runtime_suspend, dc_ic_runtime_resume, NULL)
+};
+
+static const struct of_device_id dc_ic_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-intc", },
+ { /* sentinel */ }
+};
+
+struct platform_driver dc_ic_driver = {
+ .probe = dc_ic_probe,
+ .remove = dc_ic_remove,
+ .driver = {
+ .name = "imx8-dc-intc",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_ic_dt_ids,
+ .pm = pm_sleep_ptr(&dc_ic_pm_ops),
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-kms.c b/drivers/gpu/drm/imx/dc/dc-kms.c
new file mode 100644
index 000000000000..2b18aa37a4a8
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-kms.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/of.h>
+#include <linux/of_graph.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mode_config.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+#include "dc-kms.h"
+
+static const struct drm_mode_config_funcs dc_drm_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int dc_kms_init_encoder_per_crtc(struct dc_drm_device *dc_drm,
+ int crtc_index)
+{
+ struct dc_crtc *dc_crtc = &dc_drm->dc_crtc[crtc_index];
+ struct drm_device *drm = &dc_drm->base;
+ struct drm_crtc *crtc = &dc_crtc->base;
+ struct drm_connector *connector;
+ struct device *dev = drm->dev;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+ int ret;
+
+ bridge = devm_drm_of_get_bridge(dev, dc_crtc->de->tc->dev->of_node,
+ 0, 0);
+ if (IS_ERR(bridge)) {
+ ret = PTR_ERR(bridge);
+ if (ret == -ENODEV)
+ return 0;
+
+ return dev_err_probe(dev, ret,
+ "failed to find bridge for CRTC%u\n",
+ crtc->index);
+ }
+
+ encoder = &dc_drm->encoder[crtc_index];
+ ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
+ if (ret) {
+ dev_err(dev, "failed to initialize encoder for CRTC%u: %d\n",
+ crtc->index, ret);
+ return ret;
+ }
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = drm_bridge_attach(encoder, bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ dev_err(dev,
+ "failed to attach bridge to encoder for CRTC%u: %d\n",
+ crtc->index, ret);
+ return ret;
+ }
+
+ connector = drm_bridge_connector_init(drm, encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ dev_err(dev, "failed to init bridge connector for CRTC%u: %d\n",
+ crtc->index, ret);
+ return ret;
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ dev_err(dev,
+ "failed to attach encoder to connector for CRTC%u: %d\n",
+ crtc->index, ret);
+
+ return ret;
+}
+
+int dc_kms_init(struct dc_drm_device *dc_drm)
+{
+ struct drm_device *drm = &dc_drm->base;
+ int ret, i;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
+ drm->mode_config.min_width = 60;
+ drm->mode_config.min_height = 60;
+ drm->mode_config.max_width = 8192;
+ drm->mode_config.max_height = 8192;
+ drm->mode_config.funcs = &dc_drm_mode_config_funcs;
+
+ drm->vblank_disable_immediate = true;
+ drm->max_vblank_count = DC_FRAMEGEN_MAX_FRAME_INDEX;
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ ret = dc_crtc_init(dc_drm, i);
+ if (ret)
+ return ret;
+
+ ret = dc_kms_init_encoder_per_crtc(dc_drm, i);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ ret = dc_crtc_post_init(dc_drm, i);
+ if (ret)
+ return ret;
+ }
+
+ ret = drm_vblank_init(drm, DC_DISPLAYS);
+ if (ret) {
+ dev_err(drm->dev, "failed to init vblank support: %d\n", ret);
+ return ret;
+ }
+
+ drm_mode_config_reset(drm);
+
+ drm_kms_helper_poll_init(drm);
+
+ return 0;
+}
+
+void dc_kms_uninit(struct dc_drm_device *dc_drm)
+{
+ drm_kms_helper_poll_fini(&dc_drm->base);
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-kms.h b/drivers/gpu/drm/imx/dc/dc-kms.h
new file mode 100644
index 000000000000..cd7860eff986
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-kms.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_KMS_H__
+#define __DC_KMS_H__
+
+#include <linux/completion.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_vblank.h>
+
+#include "dc-de.h"
+#include "dc-fu.h"
+#include "dc-pe.h"
+
+#define DC_CRTC_IRQS 5
+
+struct dc_crtc_irq {
+ struct dc_crtc *dc_crtc;
+ unsigned int irq;
+};
+
+/**
+ * struct dc_crtc - DC specific drm_crtc
+ *
+ * Each display controller contains one content stream and one safety stream.
+ * In general, the two streams have the same functionality. One stream is
+ * overlaid on the other by @fg. This driver chooses to generate black constant
+ * color from the content stream as background color, build plane(s) on the
+ * content stream by using layerblend(s) and always generate a constant color
+ * from the safety stream. Note that due to the decoupled timing, the safety
+ * stream still works to show the constant color properly even when the content
+ * stream has completely hung up due to mal-function of this driver.
+ */
+struct dc_crtc {
+ /** @base: base drm_crtc structure */
+ struct drm_crtc base;
+ /** @de: display engine */
+ struct dc_de *de;
+ /** @cf_cont: content stream constframe */
+ struct dc_cf *cf_cont;
+ /** @cf_safe: safety stream constframe */
+ struct dc_cf *cf_safe;
+ /** @ed_cont: content stream extdst */
+ struct dc_ed *ed_cont;
+ /** @ed_safe: safety stream extdst */
+ struct dc_ed *ed_safe;
+ /** @fg: framegen */
+ struct dc_fg *fg;
+ /**
+ * @irq_dec_framecomplete:
+ *
+ * display engine configuration frame complete interrupt
+ */
+ unsigned int irq_dec_framecomplete;
+ /**
+ * @irq_dec_seqcomplete:
+ *
+ * display engine configuration sequence complete interrupt
+ */
+ unsigned int irq_dec_seqcomplete;
+ /**
+ * @irq_dec_shdload:
+ *
+ * display engine configuration shadow load interrupt
+ */
+ unsigned int irq_dec_shdload;
+ /**
+ * @irq_ed_cont_shdload:
+ *
+ * content stream extdst shadow load interrupt
+ */
+ unsigned int irq_ed_cont_shdload;
+ /**
+ * @irq_ed_safe_shdload:
+ *
+ * safety stream extdst shadow load interrupt
+ */
+ unsigned int irq_ed_safe_shdload;
+ /**
+ * @dec_seqcomplete_done:
+ *
+ * display engine configuration sequence completion
+ */
+ struct completion dec_seqcomplete_done;
+ /**
+ * @dec_shdload_done:
+ *
+ * display engine configuration shadow load completion
+ */
+ struct completion dec_shdload_done;
+ /**
+ * @ed_cont_shdload_done:
+ *
+ * content stream extdst shadow load completion
+ */
+ struct completion ed_cont_shdload_done;
+ /**
+ * @ed_safe_shdload_done:
+ *
+ * safety stream extdst shadow load completion
+ */
+ struct completion ed_safe_shdload_done;
+ /** @event: cached pending vblank event */
+ struct drm_pending_vblank_event *event;
+ /** @irqs: interrupt list */
+ struct dc_crtc_irq irqs[DC_CRTC_IRQS];
+};
+
+/**
+ * struct dc_plane - DC specific drm_plane
+ *
+ * Build a plane on content stream with a fetchunit and a layerblend.
+ */
+struct dc_plane {
+ /** @base: base drm_plane structure */
+ struct drm_plane base;
+ /** @fu: fetchunit */
+ struct dc_fu *fu;
+ /** @cf: content stream constframe */
+ struct dc_cf *cf;
+ /** @lb: layerblend */
+ struct dc_lb *lb;
+ /** @ed: content stream extdst */
+ struct dc_ed *ed;
+};
+
+#endif /* __DC_KMS_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-lb.c b/drivers/gpu/drm/imx/dc/dc-lb.c
new file mode 100644
index 000000000000..38f966625d38
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-lb.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_blend.h>
+
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+#define PIXENGCFG_DYNAMIC 0x8
+#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK GENMASK(5, 0)
+#define PIXENGCFG_DYNAMIC_PRIM_SEL(x) \
+ FIELD_PREP(PIXENGCFG_DYNAMIC_PRIM_SEL_MASK, (x))
+#define PIXENGCFG_DYNAMIC_SEC_SEL_MASK GENMASK(13, 8)
+#define PIXENGCFG_DYNAMIC_SEC_SEL(x) \
+ FIELD_PREP(PIXENGCFG_DYNAMIC_SEC_SEL_MASK, (x))
+
+#define STATICCONTROL 0x8
+#define SHDTOKSEL_MASK GENMASK(4, 3)
+#define SHDTOKSEL(x) FIELD_PREP(SHDTOKSEL_MASK, (x))
+#define SHDLDSEL_MASK GENMASK(2, 1)
+#define SHDLDSEL(x) FIELD_PREP(SHDLDSEL_MASK, (x))
+
+#define CONTROL 0xc
+#define CTRL_MODE_MASK BIT(0)
+#define CTRL_MODE(x) FIELD_PREP(CTRL_MODE_MASK, (x))
+
+#define BLENDCONTROL 0x10
+#define ALPHA_MASK GENMASK(23, 16)
+#define ALPHA(x) FIELD_PREP(ALPHA_MASK, (x))
+#define PRIM_C_BLD_FUNC_MASK GENMASK(2, 0)
+#define PRIM_C_BLD_FUNC(x) \
+ FIELD_PREP(PRIM_C_BLD_FUNC_MASK, (x))
+#define SEC_C_BLD_FUNC_MASK GENMASK(6, 4)
+#define SEC_C_BLD_FUNC(x) \
+ FIELD_PREP(SEC_C_BLD_FUNC_MASK, (x))
+#define PRIM_A_BLD_FUNC_MASK GENMASK(10, 8)
+#define PRIM_A_BLD_FUNC(x) \
+ FIELD_PREP(PRIM_A_BLD_FUNC_MASK, (x))
+#define SEC_A_BLD_FUNC_MASK GENMASK(14, 12)
+#define SEC_A_BLD_FUNC(x) \
+ FIELD_PREP(SEC_A_BLD_FUNC_MASK, (x))
+
+#define POSITION 0x14
+#define XPOS_MASK GENMASK(15, 0)
+#define XPOS(x) FIELD_PREP(XPOS_MASK, (x))
+#define YPOS_MASK GENMASK(31, 16)
+#define YPOS(x) FIELD_PREP(YPOS_MASK, (x))
+
+enum dc_lb_blend_func {
+ DC_LAYERBLEND_BLEND_ZERO,
+ DC_LAYERBLEND_BLEND_ONE,
+ DC_LAYERBLEND_BLEND_PRIM_ALPHA,
+ DC_LAYERBLEND_BLEND_ONE_MINUS_PRIM_ALPHA,
+ DC_LAYERBLEND_BLEND_SEC_ALPHA,
+ DC_LAYERBLEND_BLEND_ONE_MINUS_SEC_ALPHA,
+ DC_LAYERBLEND_BLEND_CONST_ALPHA,
+ DC_LAYERBLEND_BLEND_ONE_MINUS_CONST_ALPHA,
+};
+
+enum dc_lb_shadow_sel {
+ BOTH = 0x2,
+};
+
+static const struct dc_subdev_info dc_lb_info[] = {
+ { .reg_start = 0x56180ba0, .id = 0, },
+ { .reg_start = 0x56180bc0, .id = 1, },
+ { .reg_start = 0x56180be0, .id = 2, },
+ { .reg_start = 0x56180c00, .id = 3, },
+};
+
+static const struct regmap_range dc_lb_pec_regmap_access_ranges[] = {
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+};
+
+static const struct regmap_access_table dc_lb_pec_regmap_access_table = {
+ .yes_ranges = dc_lb_pec_regmap_access_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_lb_pec_regmap_access_ranges),
+};
+
+static const struct regmap_config dc_lb_pec_regmap_config = {
+ .name = "pec",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_lb_pec_regmap_access_table,
+ .rd_table = &dc_lb_pec_regmap_access_table,
+ .max_register = PIXENGCFG_DYNAMIC,
+};
+
+static const struct regmap_range dc_lb_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, POSITION),
+};
+
+static const struct regmap_access_table dc_lb_regmap_access_table = {
+ .yes_ranges = dc_lb_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_lb_regmap_ranges),
+};
+
+static const struct regmap_config dc_lb_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_lb_regmap_access_table,
+ .rd_table = &dc_lb_regmap_access_table,
+ .max_register = POSITION,
+};
+
+static const enum dc_link_id prim_sels[] = {
+ /* common options */
+ LINK_ID_NONE,
+ LINK_ID_CONSTFRAME0,
+ LINK_ID_CONSTFRAME1,
+ LINK_ID_CONSTFRAME4,
+ LINK_ID_CONSTFRAME5,
+ /*
+ * special options:
+ * layerblend(n) has n special options,
+ * from layerblend0 to layerblend(n - 1), e.g.,
+ * layerblend3 has 3 special options -
+ * layerblend0/1/2.
+ */
+ LINK_ID_LAYERBLEND0,
+ LINK_ID_LAYERBLEND1,
+ LINK_ID_LAYERBLEND2,
+ LINK_ID_LAYERBLEND3,
+};
+
+static const enum dc_link_id sec_sels[] = {
+ LINK_ID_NONE,
+ LINK_ID_FETCHWARP2,
+ LINK_ID_FETCHLAYER0,
+};
+
+enum dc_link_id dc_lb_get_link_id(struct dc_lb *lb)
+{
+ return lb->link;
+}
+
+void dc_lb_pec_dynamic_prim_sel(struct dc_lb *lb, enum dc_link_id prim)
+{
+ int fixed_sels_num = ARRAY_SIZE(prim_sels) - 4;
+ int i;
+
+ for (i = 0; i < fixed_sels_num + lb->id; i++) {
+ if (prim_sels[i] == prim) {
+ regmap_write_bits(lb->reg_pec, PIXENGCFG_DYNAMIC,
+ PIXENGCFG_DYNAMIC_PRIM_SEL_MASK,
+ PIXENGCFG_DYNAMIC_PRIM_SEL(prim));
+ return;
+ }
+ }
+
+ dev_warn(lb->dev, "invalid primary input selection:%d\n", prim);
+}
+
+void dc_lb_pec_dynamic_sec_sel(struct dc_lb *lb, enum dc_link_id sec)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sec_sels); i++) {
+ if (sec_sels[i] == sec) {
+ regmap_write_bits(lb->reg_pec, PIXENGCFG_DYNAMIC,
+ PIXENGCFG_DYNAMIC_SEC_SEL_MASK,
+ PIXENGCFG_DYNAMIC_SEC_SEL(sec));
+ return;
+ }
+ }
+
+ dev_warn(lb->dev, "invalid secondary input selection:%d\n", sec);
+}
+
+void dc_lb_pec_clken(struct dc_lb *lb, enum dc_pec_clken clken)
+{
+ regmap_write_bits(lb->reg_pec, PIXENGCFG_DYNAMIC, CLKEN_MASK,
+ CLKEN(clken));
+}
+
+static inline void dc_lb_enable_shden(struct dc_lb *lb)
+{
+ regmap_write_bits(lb->reg_cfg, STATICCONTROL, SHDEN, SHDEN);
+}
+
+static inline void dc_lb_shdtoksel(struct dc_lb *lb, enum dc_lb_shadow_sel sel)
+{
+ regmap_write_bits(lb->reg_cfg, STATICCONTROL, SHDTOKSEL_MASK,
+ SHDTOKSEL(sel));
+}
+
+static inline void dc_lb_shdldsel(struct dc_lb *lb, enum dc_lb_shadow_sel sel)
+{
+ regmap_write_bits(lb->reg_cfg, STATICCONTROL, SHDLDSEL_MASK,
+ SHDLDSEL(sel));
+}
+
+void dc_lb_mode(struct dc_lb *lb, enum dc_lb_mode mode)
+{
+ regmap_write_bits(lb->reg_cfg, CONTROL, CTRL_MODE_MASK, mode);
+}
+
+static inline void dc_lb_blendcontrol(struct dc_lb *lb)
+{
+ u32 val = PRIM_A_BLD_FUNC(DC_LAYERBLEND_BLEND_ZERO) |
+ SEC_A_BLD_FUNC(DC_LAYERBLEND_BLEND_ZERO) |
+ PRIM_C_BLD_FUNC(DC_LAYERBLEND_BLEND_ZERO) |
+ SEC_C_BLD_FUNC(DC_LAYERBLEND_BLEND_CONST_ALPHA) |
+ ALPHA(DRM_BLEND_ALPHA_OPAQUE >> 8);
+
+ regmap_write(lb->reg_cfg, BLENDCONTROL, val);
+}
+
+void dc_lb_position(struct dc_lb *lb, int x, int y)
+{
+ regmap_write(lb->reg_cfg, POSITION, XPOS(x) | YPOS(y));
+}
+
+int dc_lb_get_id(struct dc_lb *lb)
+{
+ return lb->id;
+}
+
+void dc_lb_init(struct dc_lb *lb)
+{
+ dc_lb_pec_dynamic_prim_sel(lb, LINK_ID_NONE);
+ dc_lb_pec_dynamic_sec_sel(lb, LINK_ID_NONE);
+ dc_lb_pec_clken(lb, CLKEN_DISABLE);
+ dc_lb_shdldsel(lb, BOTH);
+ dc_lb_shdtoksel(lb, BOTH);
+ dc_lb_blendcontrol(lb);
+ dc_lb_enable_shden(lb);
+}
+
+static int dc_lb_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_pec;
+ void __iomem *base_cfg;
+ struct dc_lb *lb;
+
+ lb = devm_kzalloc(dev, sizeof(*lb), GFP_KERNEL);
+ if (!lb)
+ return -ENOMEM;
+
+ base_pec = devm_platform_get_and_ioremap_resource(pdev, 0, &res_pec);
+ if (IS_ERR(base_pec))
+ return PTR_ERR(base_pec);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ lb->reg_pec = devm_regmap_init_mmio(dev, base_pec,
+ &dc_lb_pec_regmap_config);
+ if (IS_ERR(lb->reg_pec))
+ return PTR_ERR(lb->reg_pec);
+
+ lb->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_lb_cfg_regmap_config);
+ if (IS_ERR(lb->reg_cfg))
+ return PTR_ERR(lb->reg_cfg);
+
+ lb->id = dc_subdev_get_id(dc_lb_info, ARRAY_SIZE(dc_lb_info), res_pec);
+ if (lb->id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", lb->id);
+ return lb->id;
+ }
+
+ lb->dev = dev;
+ lb->link = LINK_ID_LAYERBLEND0 + lb->id;
+
+ dc_drm->lb[lb->id] = lb;
+
+ return 0;
+}
+
+static const struct component_ops dc_lb_ops = {
+ .bind = dc_lb_bind,
+};
+
+static int dc_lb_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_lb_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_lb_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_lb_ops);
+}
+
+static const struct of_device_id dc_lb_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-layerblend" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_lb_dt_ids);
+
+struct platform_driver dc_lb_driver = {
+ .probe = dc_lb_probe,
+ .remove = dc_lb_remove,
+ .driver = {
+ .name = "imx8-dc-layerblend",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_lb_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-pe.c b/drivers/gpu/drm/imx/dc/dc-pe.c
new file mode 100644
index 000000000000..6676c22f3f45
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-pe.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+#include "dc-pe.h"
+
+static int dc_pe_bind(struct device *dev, struct device *master, void *data)
+{
+ struct dc_drm_device *dc_drm = data;
+ struct dc_pe *pe;
+ int ret;
+
+ pe = devm_kzalloc(dev, sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return -ENOMEM;
+
+ pe->clk_axi = devm_clk_get(dev, NULL);
+ if (IS_ERR(pe->clk_axi))
+ return dev_err_probe(dev, PTR_ERR(pe->clk_axi),
+ "failed to get AXI clock\n");
+
+ pe->dev = dev;
+
+ dev_set_drvdata(dev, pe);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ dc_drm->pe = pe;
+
+ return 0;
+}
+
+/*
+ * It's possible to get the child device pointers from the child component
+ * bind callbacks, but it depends on the component helper behavior to bind
+ * the pixel engine component first. To avoid the dependency, post bind to
+ * get the pointers from dc_drm in a safe manner.
+ */
+void dc_pe_post_bind(struct dc_drm_device *dc_drm)
+{
+ struct dc_pe *pe = dc_drm->pe;
+ int i;
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ pe->cf_safe[i] = dc_drm->cf_safe[i];
+ pe->cf_cont[i] = dc_drm->cf_cont[i];
+ pe->ed_safe[i] = dc_drm->ed_safe[i];
+ pe->ed_cont[i] = dc_drm->ed_cont[i];
+ }
+
+ for (i = 0; i < DC_DISP_FU_CNT; i++)
+ pe->fu_disp[i] = dc_drm->fu_disp[i];
+
+ for (i = 0; i < DC_LB_CNT; i++)
+ pe->lb[i] = dc_drm->lb[i];
+}
+
+static const struct component_ops dc_pe_ops = {
+ .bind = dc_pe_bind,
+};
+
+static int dc_pe_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = component_add(&pdev->dev, &dc_pe_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_pe_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_pe_ops);
+}
+
+static int dc_pe_runtime_suspend(struct device *dev)
+{
+ struct dc_pe *pe = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(pe->clk_axi);
+
+ return 0;
+}
+
+static int dc_pe_runtime_resume(struct device *dev)
+{
+ struct dc_pe *pe = dev_get_drvdata(dev);
+ int i, ret;
+
+ ret = clk_prepare_enable(pe->clk_axi);
+ if (ret) {
+ dev_err(dev, "failed to enable AXI clock: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pe->cf_safe); i++)
+ dc_cf_init(pe->cf_safe[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->cf_cont); i++)
+ dc_cf_init(pe->cf_cont[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->ed_safe); i++)
+ dc_ed_init(pe->ed_safe[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->ed_cont); i++)
+ dc_ed_init(pe->ed_cont[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->fu_disp); i++)
+ pe->fu_disp[i]->ops.init(pe->fu_disp[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->lb); i++)
+ dc_lb_init(pe->lb[i]);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dc_pe_pm_ops = {
+ RUNTIME_PM_OPS(dc_pe_runtime_suspend, dc_pe_runtime_resume, NULL)
+};
+
+static const struct of_device_id dc_pe_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-pixel-engine", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_pe_dt_ids);
+
+struct platform_driver dc_pe_driver = {
+ .probe = dc_pe_probe,
+ .remove = dc_pe_remove,
+ .driver = {
+ .name = "imx8-dc-pixel-engine",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_pe_dt_ids,
+ .pm = pm_sleep_ptr(&dc_pe_pm_ops),
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-pe.h b/drivers/gpu/drm/imx/dc/dc-pe.h
new file mode 100644
index 000000000000..f5e01a6eb9e9
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-pe.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_PIXEL_ENGINE_H__
+#define __DC_PIXEL_ENGINE_H__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+#include "dc-de.h"
+
+#define SHDEN BIT(0)
+
+#define CLKEN_MASK_SHIFT 24
+#define CLKEN_MASK (0x3 << CLKEN_MASK_SHIFT)
+#define CLKEN(n) ((n) << CLKEN_MASK_SHIFT)
+
+#define DC_DISP_FU_CNT 2
+#define DC_LB_CNT 4
+
+enum dc_link_id {
+ LINK_ID_NONE = 0x00,
+ LINK_ID_CONSTFRAME0 = 0x0c,
+ LINK_ID_CONSTFRAME4 = 0x0e,
+ LINK_ID_CONSTFRAME1 = 0x10,
+ LINK_ID_CONSTFRAME5 = 0x12,
+ LINK_ID_FETCHWARP2 = 0x14,
+ LINK_ID_FETCHLAYER0 = 0x1a,
+ LINK_ID_LAYERBLEND0 = 0x21,
+ LINK_ID_LAYERBLEND1 = 0x22,
+ LINK_ID_LAYERBLEND2 = 0x23,
+ LINK_ID_LAYERBLEND3 = 0x24,
+};
+
+enum dc_lb_mode {
+ LB_NEUTRAL, /* Output is same as primary input. */
+ LB_BLEND,
+};
+
+enum dc_pec_clken {
+ CLKEN_DISABLE,
+ CLKEN_AUTOMATIC,
+};
+
+struct dc_cf {
+ struct regmap *reg_cfg;
+ enum dc_link_id link;
+};
+
+struct dc_ed {
+ struct device *dev;
+ struct regmap *reg_pec;
+ struct regmap *reg_cfg;
+ int irq_shdload;
+};
+
+struct dc_lb {
+ struct device *dev;
+ struct regmap *reg_pec;
+ struct regmap *reg_cfg;
+ int id;
+ enum dc_link_id link;
+};
+
+struct dc_pe {
+ struct device *dev;
+ struct clk *clk_axi;
+ struct dc_cf *cf_safe[DC_DISPLAYS];
+ struct dc_cf *cf_cont[DC_DISPLAYS];
+ struct dc_ed *ed_safe[DC_DISPLAYS];
+ struct dc_ed *ed_cont[DC_DISPLAYS];
+ struct dc_fu *fu_disp[DC_DISP_FU_CNT];
+ struct dc_lb *lb[DC_LB_CNT];
+};
+
+/* Constant Frame Unit */
+enum dc_link_id dc_cf_get_link_id(struct dc_cf *cf);
+void dc_cf_framedimensions(struct dc_cf *cf, unsigned int w, unsigned int h);
+void dc_cf_constantcolor_black(struct dc_cf *cf);
+void dc_cf_constantcolor_blue(struct dc_cf *cf);
+void dc_cf_init(struct dc_cf *cf);
+
+/* External Destination Unit */
+void dc_ed_pec_src_sel(struct dc_ed *ed, enum dc_link_id src);
+void dc_ed_pec_sync_trigger(struct dc_ed *ed);
+void dc_ed_init(struct dc_ed *ed);
+
+/* Layer Blend Unit */
+enum dc_link_id dc_lb_get_link_id(struct dc_lb *lb);
+void dc_lb_pec_dynamic_prim_sel(struct dc_lb *lb, enum dc_link_id prim);
+void dc_lb_pec_dynamic_sec_sel(struct dc_lb *lb, enum dc_link_id sec);
+void dc_lb_pec_clken(struct dc_lb *lb, enum dc_pec_clken clken);
+void dc_lb_mode(struct dc_lb *lb, enum dc_lb_mode mode);
+void dc_lb_position(struct dc_lb *lb, int x, int y);
+int dc_lb_get_id(struct dc_lb *lb);
+void dc_lb_init(struct dc_lb *lb);
+
+#endif /* __DC_PIXEL_ENGINE_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-plane.c b/drivers/gpu/drm/imx/dc/dc-plane.c
new file mode 100644
index 000000000000..d8b946fb90de
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-plane.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/container_of.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+#include "dc-kms.h"
+
+#define DC_PLANE_MAX_PITCH 0x10000
+#define DC_PLANE_MAX_PIX_CNT 8192
+
+#define dc_plane_dbg(plane, fmt, ...) \
+do { \
+ struct drm_plane *_plane = (plane); \
+ drm_dbg_kms(_plane->dev, "[PLANE:%d:%s] " fmt, \
+ _plane->base.id, _plane->name, ##__VA_ARGS__); \
+} while (0)
+
+static const uint32_t dc_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const struct drm_plane_funcs dc_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static inline struct dc_plane *to_dc_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct dc_plane, base);
+}
+
+static int dc_plane_check_max_source_resolution(struct drm_plane_state *state)
+{
+ int src_h = drm_rect_height(&state->src) >> 16;
+ int src_w = drm_rect_width(&state->src) >> 16;
+
+ if (src_w > DC_PLANE_MAX_PIX_CNT || src_h > DC_PLANE_MAX_PIX_CNT) {
+ dc_plane_dbg(state->plane, "invalid source resolution\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dc_plane_check_fb(struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ dma_addr_t baseaddr = drm_fb_dma_get_gem_addr(fb, state, 0);
+
+ /* base address alignment */
+ if (baseaddr & 0x3) {
+ dc_plane_dbg(state->plane, "fb bad baddr alignment\n");
+ return -EINVAL;
+ }
+
+ /* pitches[0] range */
+ if (fb->pitches[0] > DC_PLANE_MAX_PITCH) {
+ dc_plane_dbg(state->plane, "fb pitches[0] is out of range\n");
+ return -EINVAL;
+ }
+
+ /* pitches[0] alignment */
+ if (fb->pitches[0] & 0x3) {
+ dc_plane_dbg(state->plane, "fb bad pitches[0] alignment\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dc_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ /* ok to disable */
+ if (!plane_state->fb)
+ return 0;
+
+ if (!plane_state->crtc) {
+ dc_plane_dbg(plane, "no CRTC in plane state\n");
+ return -EINVAL;
+ }
+
+ crtc_state =
+ drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, false);
+ if (ret) {
+ dc_plane_dbg(plane, "failed to check plane state: %d\n", ret);
+ return ret;
+ }
+
+ ret = dc_plane_check_max_source_resolution(plane_state);
+ if (ret)
+ return ret;
+
+ return dc_plane_check_fb(plane_state);
+}
+
+static void
+dc_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct dc_plane *dplane = to_dc_plane(plane);
+ struct drm_framebuffer *fb = new_state->fb;
+ const struct dc_fu_ops *fu_ops;
+ struct dc_lb *lb = dplane->lb;
+ struct dc_fu *fu = dplane->fu;
+ dma_addr_t baseaddr;
+ int src_w, src_h;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
+
+ src_w = drm_rect_width(&new_state->src) >> 16;
+ src_h = drm_rect_height(&new_state->src) >> 16;
+
+ baseaddr = drm_fb_dma_get_gem_addr(fb, new_state, 0);
+
+ fu_ops = dc_fu_get_ops(dplane->fu);
+
+ fu_ops->set_layerblend(fu, lb);
+ fu_ops->set_burstlength(fu, baseaddr);
+ fu_ops->set_src_stride(fu, DC_FETCHUNIT_FRAC0, fb->pitches[0]);
+ fu_ops->set_src_buf_dimensions(fu, DC_FETCHUNIT_FRAC0, src_w, src_h);
+ fu_ops->set_fmt(fu, DC_FETCHUNIT_FRAC0, fb->format);
+ fu_ops->set_framedimensions(fu, src_w, src_h);
+ fu_ops->set_baseaddress(fu, DC_FETCHUNIT_FRAC0, baseaddr);
+ fu_ops->enable_src_buf(fu, DC_FETCHUNIT_FRAC0);
+
+ dc_plane_dbg(plane, "uses %s\n", fu_ops->get_name(fu));
+
+ dc_lb_pec_dynamic_prim_sel(lb, dc_cf_get_link_id(dplane->cf));
+ dc_lb_pec_dynamic_sec_sel(lb, fu_ops->get_link_id(fu));
+ dc_lb_mode(lb, LB_BLEND);
+ dc_lb_position(lb, new_state->dst.x1, new_state->dst.y1);
+ dc_lb_pec_clken(lb, CLKEN_AUTOMATIC);
+
+ dc_plane_dbg(plane, "uses LayerBlend%d\n", dc_lb_get_id(lb));
+
+ /* set ExtDst's source to LayerBlend */
+ dc_ed_pec_src_sel(dplane->ed, dc_lb_get_link_id(lb));
+
+ drm_dev_exit(idx);
+}
+
+static void dc_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct dc_plane *dplane = to_dc_plane(plane);
+ const struct dc_fu_ops *fu_ops;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
+
+ /* disable fetchunit in shadow */
+ fu_ops = dc_fu_get_ops(dplane->fu);
+ fu_ops->disable_src_buf(dplane->fu, DC_FETCHUNIT_FRAC0);
+
+ /* set ExtDst's source to ConstFrame */
+ dc_ed_pec_src_sel(dplane->ed, dc_cf_get_link_id(dplane->cf));
+
+ drm_dev_exit(idx);
+}
+
+static const struct drm_plane_helper_funcs dc_plane_helper_funcs = {
+ .atomic_check = dc_plane_atomic_check,
+ .atomic_update = dc_plane_atomic_update,
+ .atomic_disable = dc_plane_atomic_disable,
+};
+
+int dc_plane_init(struct dc_drm_device *dc_drm, struct dc_plane *dc_plane)
+{
+ struct drm_plane *plane = &dc_plane->base;
+ int ret;
+
+ ret = drm_universal_plane_init(&dc_drm->base, plane, 0, &dc_plane_funcs,
+ dc_plane_formats,
+ ARRAY_SIZE(dc_plane_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(plane, &dc_plane_helper_funcs);
+
+ dc_plane->fu = dc_drm->pe->fu_disp[plane->index];
+ dc_plane->cf = dc_drm->pe->cf_cont[plane->index];
+ dc_plane->lb = dc_drm->pe->lb[plane->index];
+ dc_plane->ed = dc_drm->pe->ed_cont[plane->index];
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-tc.c b/drivers/gpu/drm/imx/dc/dc-tc.c
new file mode 100644
index 000000000000..0bfd381b2cea
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-tc.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dc-drv.h"
+#include "dc-de.h"
+
+#define TCON_CTRL 0x410
+#define CTRL_RST_VAL 0x01401408
+
+/* red: MAPBIT 29-20, green: MAPBIT 19-10, blue: MAPBIT 9-0 */
+#define MAPBIT3_0 0x418
+#define MAPBIT7_4 0x41c
+#define MAPBIT11_8 0x420
+#define MAPBIT15_12 0x424
+#define MAPBIT19_16 0x428
+#define MAPBIT23_20 0x42c
+#define MAPBIT27_24 0x430
+#define MAPBIT31_28 0x434
+
+static const struct dc_subdev_info dc_tc_info[] = {
+ { .reg_start = 0x5618c800, .id = 0, },
+ { .reg_start = 0x5618e400, .id = 1, },
+};
+
+static const struct regmap_range dc_tc_regmap_ranges[] = {
+ regmap_reg_range(TCON_CTRL, TCON_CTRL),
+ regmap_reg_range(MAPBIT3_0, MAPBIT31_28),
+};
+
+static const struct regmap_access_table dc_tc_regmap_access_table = {
+ .yes_ranges = dc_tc_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_tc_regmap_ranges),
+};
+
+static const struct regmap_config dc_tc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_tc_regmap_access_table,
+ .rd_table = &dc_tc_regmap_access_table,
+ .max_register = MAPBIT31_28,
+};
+
+/*
+ * The pixels reach TCON are always in 30-bit BGR format.
+ * The first bridge always receives pixels in 30-bit RGB format.
+ * So, map the format to MEDIA_BUS_FMT_RGB101010_1X30.
+ */
+static const u32 dc_tc_mapbit[] = {
+ 0x17161514, 0x1b1a1918, 0x0b0a1d1c, 0x0f0e0d0c,
+ 0x13121110, 0x03020100, 0x07060504, 0x00000908,
+};
+
+void dc_tc_init(struct dc_tc *tc)
+{
+ /* reset TCON_CTRL to POR default so that TCON works in bypass mode */
+ regmap_write(tc->reg, TCON_CTRL, CTRL_RST_VAL);
+
+ /* set format */
+ regmap_bulk_write(tc->reg, MAPBIT3_0, dc_tc_mapbit,
+ ARRAY_SIZE(dc_tc_mapbit));
+}
+
+static int dc_tc_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res;
+ void __iomem *base;
+ struct dc_tc *tc;
+ int id;
+
+ tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
+ if (!tc)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ tc->reg = devm_regmap_init_mmio(dev, base, &dc_tc_regmap_config);
+ if (IS_ERR(tc->reg))
+ return PTR_ERR(tc->reg);
+
+ id = dc_subdev_get_id(dc_tc_info, ARRAY_SIZE(dc_tc_info), res);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ tc->dev = dev;
+ dc_drm->tc[id] = tc;
+
+ return 0;
+}
+
+static const struct component_ops dc_tc_ops = {
+ .bind = dc_tc_bind,
+};
+
+static int dc_tc_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_tc_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_tc_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_tc_ops);
+}
+
+static const struct of_device_id dc_tc_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-tcon" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_tc_dt_ids);
+
+struct platform_driver dc_tc_driver = {
+ .probe = dc_tc_probe,
+ .remove = dc_tc_remove,
+ .driver = {
+ .name = "imx8-dc-tcon",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_tc_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index f851e9ffdb28..9db1ceaed518 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -901,14 +901,15 @@ static void ingenic_drm_disable_vblank(struct drm_crtc *crtc)
static struct drm_framebuffer *
ingenic_drm_gem_fb_create(struct drm_device *drm, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct ingenic_drm *priv = drm_device_get_priv(drm);
if (priv->soc_info->map_noncoherent)
- return drm_gem_fb_create_with_dirty(drm, file, mode_cmd);
+ return drm_gem_fb_create_with_dirty(drm, file, info, mode_cmd);
- return drm_gem_fb_create(drm, file, mode_cmd);
+ return drm_gem_fb_create(drm, file, info, mode_cmd);
}
static struct drm_gem_object *
diff --git a/drivers/gpu/drm/lib/drm_random.c b/drivers/gpu/drm/lib/drm_random.c
index 31b5a3e21911..0e9dba1ef4af 100644
--- a/drivers/gpu/drm/lib/drm_random.c
+++ b/drivers/gpu/drm/lib/drm_random.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 5deec673c11e..9722b847a539 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -341,7 +341,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
err = lima_sched_task_init(
submit->task, submit->ctx->context + submit->pipe,
- bos, submit->nr_bos, vm);
+ bos, submit->nr_bos, vm, file->client_id);
if (err)
goto err_out1;
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 7934098e651b..739e8c6c6d90 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -113,7 +113,8 @@ static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sch
int lima_sched_task_init(struct lima_sched_task *task,
struct lima_sched_context *context,
struct lima_bo **bos, int num_bos,
- struct lima_vm *vm)
+ struct lima_vm *vm,
+ u64 drm_client_id)
{
int err, i;
@@ -124,7 +125,8 @@ int lima_sched_task_init(struct lima_sched_task *task,
for (i = 0; i < num_bos; i++)
drm_gem_object_get(&bos[i]->base.base);
- err = drm_sched_job_init(&task->base, &context->base, 1, vm);
+ err = drm_sched_job_init(&task->base, &context->base, 1, vm,
+ drm_client_id);
if (err) {
kfree(task->bos);
return err;
@@ -410,7 +412,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
*/
if (dma_fence_is_signaled(task->fence)) {
DRM_WARN("%s spurious timeout\n", lima_ip_name(ip));
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
/*
@@ -427,7 +429,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
if (dma_fence_is_signaled(task->fence)) {
DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip));
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
/*
@@ -465,7 +467,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
drm_sched_resubmit_jobs(&pipe->base);
drm_sched_start(&pipe->base, 0);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static void lima_sched_free_job(struct drm_sched_job *job)
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 85b23ba901d5..1a08faf8a529 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -88,7 +88,8 @@ struct lima_sched_pipe {
int lima_sched_task_init(struct lima_sched_task *task,
struct lima_sched_context *context,
struct lima_bo **bos, int num_bos,
- struct lima_vm *vm);
+ struct lima_vm *vm,
+ u64 drm_client_id);
void lima_sched_task_fini(struct lima_sched_task *task);
int lima_sched_context_init(struct lima_sched_pipe *pipe,
diff --git a/drivers/gpu/drm/lima/lima_trace.h b/drivers/gpu/drm/lima/lima_trace.h
index 3a349d10304e..98a7fb2fa00e 100644
--- a/drivers/gpu/drm/lima/lima_trace.h
+++ b/drivers/gpu/drm/lima/lima_trace.h
@@ -14,21 +14,19 @@ DECLARE_EVENT_CLASS(lima_task,
TP_PROTO(struct lima_sched_task *task),
TP_ARGS(task),
TP_STRUCT__entry(
- __field(uint64_t, task_id)
__field(unsigned int, context)
__field(unsigned int, seqno)
__string(pipe, task->base.sched->name)
),
TP_fast_assign(
- __entry->task_id = task->base.id;
__entry->context = task->base.s_fence->finished.context;
__entry->seqno = task->base.s_fence->finished.seqno;
__assign_str(pipe);
),
- TP_printk("task=%llu, context=%u seqno=%u pipe=%s",
- __entry->task_id, __entry->context, __entry->seqno,
+ TP_printk("context=%u seqno=%u pipe=%s",
+ __entry->context, __entry->seqno,
__get_str(pipe))
);
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 58279ddaab3c..bef6eeb30d3e 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2118,7 +2118,8 @@ static void mtk_dp_update_plugged_status(struct mtk_dp *mtk_dp)
mutex_unlock(&mtk_dp->update_plugged_status_lock);
}
-static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+mtk_dp_bdg_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
enum drm_connector_status ret = connector_status_disconnected;
@@ -2725,9 +2726,10 @@ static int mtk_dp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
- mtk_dp = devm_kzalloc(dev, sizeof(*mtk_dp), GFP_KERNEL);
- if (!mtk_dp)
- return -ENOMEM;
+ mtk_dp = devm_drm_bridge_alloc(dev, struct mtk_dp, bridge,
+ &mtk_dp_bridge_funcs);
+ if (IS_ERR(mtk_dp))
+ return PTR_ERR(mtk_dp);
mtk_dp->dev = dev;
mtk_dp->data = (struct mtk_dp_data *)of_device_get_match_data(dev);
@@ -2785,7 +2787,6 @@ static int mtk_dp_probe(struct platform_device *pdev)
if (ret)
return ret;
- mtk_dp->bridge.funcs = &mtk_dp_bridge_funcs;
mtk_dp->bridge.of_node = dev->of_node;
mtk_dp->bridge.type = mtk_dp->data->bridge_type;
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index a2fdceadf209..61cab32e213a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -1266,9 +1266,10 @@ static int mtk_dpi_probe(struct platform_device *pdev)
struct mtk_dpi *dpi;
int ret;
- dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
- if (!dpi)
- return -ENOMEM;
+ dpi = devm_drm_bridge_alloc(dev, struct mtk_dpi, bridge,
+ &mtk_dpi_bridge_funcs);
+ if (IS_ERR(dpi))
+ return PTR_ERR(dpi);
dpi->dev = dev;
dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev);
@@ -1320,7 +1321,6 @@ static int mtk_dpi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dpi);
- dpi->bridge.funcs = &mtk_dpi_bridge_funcs;
dpi->bridge.of_node = dev->of_node;
dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 7c0c12dde488..d5e6bab36414 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -43,14 +43,13 @@ static const struct drm_mode_config_helper_funcs mtk_drm_mode_config_helpers = {
static struct drm_framebuffer *
mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *cmd)
{
- const struct drm_format_info *info = drm_get_format_info(dev, cmd);
-
if (info->num_planes != 1)
return ERR_PTR(-EINVAL);
- return drm_gem_fb_create(dev, file, cmd);
+ return drm_gem_fb_create(dev, file, info, cmd);
}
static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 4fe1f38a3c4b..d7726091819c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -1196,9 +1196,10 @@ static int mtk_dsi_probe(struct platform_device *pdev)
int irq_num;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct mtk_dsi, bridge,
+ &mtk_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dsi->driver_data = of_device_get_match_data(dev);
@@ -1246,7 +1247,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dsi);
- dsi->bridge.funcs = &mtk_dsi_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 8803cd4a8bc9..845fd8aa43c3 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1174,7 +1174,8 @@ static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
* Bridge callbacks
*/
-static enum drm_connector_status mtk_hdmi_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+mtk_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1642,9 +1643,10 @@ static int mtk_hdmi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
+ hdmi = devm_drm_bridge_alloc(dev, struct mtk_hdmi, bridge,
+ &mtk_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
hdmi->dev = dev;
hdmi->conf = of_device_get_match_data(dev);
@@ -1666,7 +1668,6 @@ static int mtk_hdmi_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret,
"Failed to register audio driver\n");
- hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
hdmi->bridge.of_node = pdev->dev.of_node;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index c9678dc68fa1..dc374bfc5951 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -227,9 +227,12 @@ int meson_encoder_cvbs_probe(struct meson_drm *priv)
struct device_node *remote;
int ret;
- meson_encoder_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_encoder_cvbs), GFP_KERNEL);
- if (!meson_encoder_cvbs)
- return -ENOMEM;
+ meson_encoder_cvbs = devm_drm_bridge_alloc(priv->dev,
+ struct meson_encoder_cvbs,
+ bridge,
+ &meson_encoder_cvbs_bridge_funcs);
+ if (IS_ERR(meson_encoder_cvbs))
+ return PTR_ERR(meson_encoder_cvbs);
/* CVBS Connector Bridge */
remote = of_graph_get_remote_node(priv->dev->of_node, 0, 0);
@@ -245,7 +248,6 @@ int meson_encoder_cvbs_probe(struct meson_drm *priv)
"Failed to find CVBS Connector bridge\n");
/* CVBS Encoder Bridge */
- meson_encoder_cvbs->bridge.funcs = &meson_encoder_cvbs_bridge_funcs;
meson_encoder_cvbs->bridge.of_node = priv->dev->of_node;
meson_encoder_cvbs->bridge.type = DRM_MODE_CONNECTOR_Composite;
meson_encoder_cvbs->bridge.ops = DRM_BRIDGE_OP_MODES;
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index 3db518e5f95d..6c6624f9ba24 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -106,9 +106,12 @@ int meson_encoder_dsi_probe(struct meson_drm *priv)
struct device_node *remote;
int ret;
- meson_encoder_dsi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_dsi), GFP_KERNEL);
- if (!meson_encoder_dsi)
- return -ENOMEM;
+ meson_encoder_dsi = devm_drm_bridge_alloc(priv->dev,
+ struct meson_encoder_dsi,
+ bridge,
+ &meson_encoder_dsi_bridge_funcs);
+ if (IS_ERR(meson_encoder_dsi))
+ return PTR_ERR(meson_encoder_dsi);
/* DSI Transceiver Bridge */
remote = of_graph_get_remote_node(priv->dev->of_node, 2, 0);
@@ -123,7 +126,6 @@ int meson_encoder_dsi_probe(struct meson_drm *priv)
"Failed to find DSI transceiver bridge\n");
/* DSI Encoder Bridge */
- meson_encoder_dsi->bridge.funcs = &meson_encoder_dsi_bridge_funcs;
meson_encoder_dsi->bridge.of_node = priv->dev->of_node;
meson_encoder_dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index ab08d690d882..8205ee56a691 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -376,9 +376,12 @@ int meson_encoder_hdmi_probe(struct meson_drm *priv)
struct device_node *remote;
int ret;
- meson_encoder_hdmi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_hdmi), GFP_KERNEL);
- if (!meson_encoder_hdmi)
- return -ENOMEM;
+ meson_encoder_hdmi = devm_drm_bridge_alloc(priv->dev,
+ struct meson_encoder_hdmi,
+ bridge,
+ &meson_encoder_hdmi_bridge_funcs);
+ if (IS_ERR(meson_encoder_hdmi))
+ return PTR_ERR(meson_encoder_hdmi);
/* HDMI Transceiver Bridge */
remote = of_graph_get_remote_node(priv->dev->of_node, 1, 0);
@@ -395,7 +398,6 @@ int meson_encoder_hdmi_probe(struct meson_drm *priv)
}
/* HDMI Encoder Bridge */
- meson_encoder_hdmi->bridge.funcs = &meson_encoder_hdmi_bridge_funcs;
meson_encoder_hdmi->bridge.of_node = priv->dev->of_node;
meson_encoder_hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
meson_encoder_hdmi->bridge.interlace_allowed = true;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 819a7e9381e3..f4bf40cd7c88 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -382,10 +382,10 @@ int mgag200_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
.destroy = drm_plane_cleanup, \
DRM_GEM_SHADOW_PLANE_FUNCS
-void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, const struct drm_format_info *format);
-void mgag200_crtc_set_gamma(struct mga_device *mdev,
- const struct drm_format_info *format,
- struct drm_color_lut *lut);
+void mgag200_crtc_fill_gamma(struct mga_device *mdev, const struct drm_format_info *format);
+void mgag200_crtc_load_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut);
enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
index c20ed0ab50ec..23debc70dc54 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
@@ -200,9 +200,9 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200er_reset_tagfifo(mdev);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
index 78be964eb97c..f8796e2b7a0f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
@@ -201,9 +201,9 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200ev_set_hiprilvl(mdev);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
index 7a32d3b1d226..e80da12ba1fe 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -332,9 +332,9 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, format);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 6067d08aeee3..951d715dea30 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_format_helper.h>
@@ -30,35 +31,37 @@
* This file contains setup code for the CRTC.
*/
-void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
- const struct drm_format_info *format)
+static void mgag200_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
{
- int i;
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ u8 i8 = index & 0xff;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ WREG8(DAC_INDEX + MGA1064_INDEX, i8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, r8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, g8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, b8);
+}
- WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+void mgag200_crtc_fill_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format)
+{
+ struct drm_crtc *crtc = &mdev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
- /* Use better interpolation, to take 32 values from 0 to 255 */
- for (i = 0; i < MGAG200_LUT_SIZE / 8; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 8 + i / 4);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 4 + i / 16);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 8 + i / 4);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = MGAG200_LUT_SIZE / 8; i < MGAG200_LUT_SIZE / 4; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 4 + i / 16);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- }
+ drm_crtc_fill_gamma_565(crtc, mgag200_set_gamma_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < MGAG200_LUT_SIZE; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i);
- }
+ drm_crtc_fill_gamma_888(crtc, mgag200_set_gamma_lut);
break;
default:
drm_warn_once(&mdev->base, "Unsupported format %p4cc for gamma correction\n",
@@ -67,36 +70,19 @@ void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
}
}
-void mgag200_crtc_set_gamma(struct mga_device *mdev,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+void mgag200_crtc_load_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
- int i;
-
- WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+ struct drm_crtc *crtc = &mdev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
- /* Use better interpolation, to take 32 values from lut[0] to lut[255] */
- for (i = 0; i < MGAG200_LUT_SIZE / 8; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 8 + i / 4].red >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 4 + i / 16].green >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 8 + i / 4].blue >> 8);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = MGAG200_LUT_SIZE / 8; i < MGAG200_LUT_SIZE / 4; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 4 + i / 16].green >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- }
+ drm_crtc_load_gamma_565_from_888(crtc, lut, mgag200_set_gamma_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < MGAG200_LUT_SIZE; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].red >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].green >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].blue >> 8);
- }
+ drm_crtc_load_gamma_888(crtc, lut, mgag200_set_gamma_lut);
break;
default:
drm_warn_once(&mdev->base, "Unsupported format %p4cc for gamma correction\n",
@@ -642,9 +628,9 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
const struct drm_format_info *format = mgag200_crtc_state->format;
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
}
}
@@ -665,9 +651,9 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
funcs->pixpllc_atomic_update(crtc, old_state);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 7f127e2ae442..250246f81ea9 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -15,21 +15,13 @@ config DRM_MSM
select IOMMU_IO_PGTABLE
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
- select DRM_CLIENT_SELECTION
- select DRM_DISPLAY_DP_AUX_BUS
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_BRIDGE_CONNECTOR
select DRM_EXEC
- select DRM_KMS_HELPER
- select DRM_PANEL
- select DRM_BRIDGE
- select DRM_PANEL_BRIDGE
+ select DRM_GPUVM
select DRM_SCHED
- select FB_SYSMEM_HELPERS if DRM_FBDEV_EMULATION
select SHMEM
select TMPFS
select QCOM_SCM
+ select QCOM_UBWC_CONFIG
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
@@ -66,6 +58,22 @@ config DRM_MSM_VALIDATE_XML
Validate XML files with register definitions against rules-fd schema.
This option is mostly targeting DRM MSM developers. If unsure, say N.
+config DRM_MSM_KMS
+ def_bool n
+ depends on DRM_MSM
+ select DRM_BRIDGE
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_CLIENT_SELECTION
+ select DRM_DISPLAY_HELPER
+ select DRM_KMS_HELPER
+ select DRM_PANEL
+ select DRM_PANEL_BRIDGE
+
+config DRM_MSM_KMS_FBDEV
+ def_bool DRM_FBDEV_EMULATION
+ depends on DRM_MSM_KMS
+ select FB_SYSMEM_HELPERS
+
config DRM_MSM_MDSS
bool
depends on DRM_MSM
@@ -74,6 +82,7 @@ config DRM_MSM_MDSS
config DRM_MSM_MDP4
bool "Enable MDP4 support in MSM DRM driver"
depends on DRM_MSM
+ select DRM_MSM_KMS
default y
help
Compile in support for the Mobile Display Processor v4 (MDP4) in
@@ -84,6 +93,7 @@ config DRM_MSM_MDP5
bool "Enable MDP5 support in MSM DRM driver"
depends on DRM_MSM
select DRM_MSM_MDSS
+ select DRM_MSM_KMS
default y
help
Compile in support for the Mobile Display Processor v5 (MDP5) in
@@ -94,6 +104,7 @@ config DRM_MSM_DPU
bool "Enable DPU support in MSM DRM driver"
depends on DRM_MSM
select DRM_MSM_MDSS
+ select DRM_MSM_KMS
select DRM_DISPLAY_DSC_HELPER
default y
help
@@ -104,8 +115,11 @@ config DRM_MSM_DPU
config DRM_MSM_DP
bool "Enable DisplayPort support in MSM DRM driver"
depends on DRM_MSM
+ depends on DRM_MSM_KMS
select DRM_DISPLAY_HDMI_AUDIO_HELPER
select RATIONAL
+ select DRM_DISPLAY_DP_AUX_BUS
+ select DRM_DISPLAY_DP_HELPER
default y
help
Compile in support for DP driver in MSM DRM driver. DP external
@@ -115,6 +129,7 @@ config DRM_MSM_DP
config DRM_MSM_DSI
bool "Enable DSI support in MSM DRM driver"
depends on DRM_MSM
+ depends on DRM_MSM_KMS
select DRM_PANEL
select DRM_MIPI_DSI
select DRM_DISPLAY_DSC_HELPER
@@ -170,6 +185,7 @@ config DRM_MSM_DSI_7NM_PHY
config DRM_MSM_HDMI
bool "Enable HDMI support in MSM DRM driver"
depends on DRM_MSM
+ depends on DRM_MSM_KMS
default y
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 7a2ada6e2d74..0c0dfb25f01b 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -100,18 +100,15 @@ msm-display-$(CONFIG_DRM_MSM_DPU) += \
msm-display-$(CONFIG_DRM_MSM_MDSS) += \
msm_mdss.o \
-msm-display-y += \
+msm-display-$(CONFIG_DRM_MSM_KMS) += \
disp/mdp_format.o \
disp/mdp_kms.o \
disp/msm_disp_snapshot.o \
disp/msm_disp_snapshot_util.o \
msm-y += \
- msm_atomic.o \
- msm_atomic_tracepoints.o \
msm_debugfs.o \
msm_drv.o \
- msm_fb.o \
msm_fence.o \
msm_gem.o \
msm_gem_prime.o \
@@ -122,21 +119,24 @@ msm-y += \
msm_gpu_devfreq.o \
msm_io_utils.o \
msm_iommu.o \
- msm_kms.o \
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o \
msm_submitqueue.o \
+ msm_syncobj.o \
msm_gpu_tracepoints.o \
-msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
+msm-$(CONFIG_DRM_MSM_KMS) += \
+ msm_atomic.o \
+ msm_atomic_tracepoints.o \
+ msm_fb.o \
+ msm_kms.o \
-msm-display-$(CONFIG_DEBUG_FS) += \
- dp/dp_debug.o
+msm-$(CONFIG_DRM_MSM_KMS_FBDEV) += msm_fbdev.o
msm-display-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
- dp/dp_catalog.o \
dp/dp_ctrl.o \
+ dp/dp_debug.o \
dp/dp_display.o \
dp/dp_drm.o \
dp/dp_link.o \
@@ -159,7 +159,8 @@ msm-display-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
msm-display-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
msm-display-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o
-msm-y += $(adreno-y) $(msm-display-y)
+msm-y += $(adreno-y)
+msm-$(CONFIG_DRM_MSM_KMS) += $(msm-display-y)
obj-$(CONFIG_DRM_MSM) += msm.o
@@ -195,6 +196,11 @@ ADRENO_HEADERS = \
generated/a4xx.xml.h \
generated/a5xx.xml.h \
generated/a6xx.xml.h \
+ generated/a6xx_descriptors.xml.h \
+ generated/a6xx_enums.xml.h \
+ generated/a6xx_perfcntrs.xml.h \
+ generated/a7xx_enums.xml.h \
+ generated/a7xx_perfcntrs.xml.h \
generated/a6xx_gmu.xml.h \
generated/adreno_common.xml.h \
generated/adreno_pm4.xml.h \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 379a3d346c30..ec38db45d8a3 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -113,7 +113,7 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
uint32_t *ptr, len;
int i, ret;
- a2xx_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
+ a2xx_gpummu_params(to_msm_vm(gpu->vm)->mmu, &pt_base, &tran_error);
DBG("%s", gpu->name);
@@ -466,19 +466,18 @@ static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
return state;
}
-static struct msm_gem_address_space *
-a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+static struct drm_gpuvm *
+a2xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct msm_mmu *mmu = a2xx_gpummu_new(&pdev->dev, gpu);
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
- aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
- 0xfff * SZ_64K);
+ vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", SZ_16M, 0xfff * SZ_64K, true);
- if (IS_ERR(aspace) && !IS_ERR(mmu))
+ if (IS_ERR(vm) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
- return aspace;
+ return vm;
}
static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
@@ -504,7 +503,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a2xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
- .create_address_space = a2xx_create_address_space,
+ .create_vm = a2xx_create_vm,
.get_rptr = a2xx_get_rptr,
},
};
@@ -551,14 +550,6 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
else
adreno_gpu->registers = a220_registers;
- if (!gpu->aspace) {
- dev_err(dev->dev, "No memory protection without MMU\n");
- if (!allow_vram_carveout) {
- ret = -ENXIO;
- goto fail;
- }
- }
-
return gpu;
fail:
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
index 4280f71e472a..0407c9bc8c1b 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
@@ -29,13 +29,16 @@ static void a2xx_gpummu_detach(struct msm_mmu *mmu)
}
static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, size_t len, int prot)
+ struct sg_table *sgt, size_t off, size_t len,
+ int prot)
{
struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
struct sg_dma_page_iter dma_iter;
unsigned prot_bits = 0;
+ WARN_ON(off != 0);
+
if (prot & IOMMU_WRITE)
prot_bits |= 1;
if (prot & IOMMU_READ)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index b6df115bb567..a956cd79195e 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -526,7 +526,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a3xx_gpu_busy,
.gpu_state_get = a3xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
- .create_address_space = adreno_create_address_space,
+ .create_vm = adreno_create_vm,
.get_rptr = a3xx_get_rptr,
},
};
@@ -581,21 +581,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
goto fail;
}
- if (!gpu->aspace) {
- /* TODO we think it is possible to configure the GPU to
- * restrict access to VRAM carveout. But the required
- * registers are unknown. For now just bail out and
- * limp along with just modesetting. If it turns out
- * to not be possible to restrict access, then we must
- * implement a cmdstream validator.
- */
- DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- if (!allow_vram_carveout) {
- ret = -ENXIO;
- goto fail;
- }
- }
-
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
if (IS_ERR(icc_path)) {
ret = PTR_ERR(icc_path);
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index f1b18a6663f7..83f6329accba 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -645,7 +645,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a4xx_gpu_busy,
.gpu_state_get = a4xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
- .create_address_space = adreno_create_address_space,
+ .create_vm = adreno_create_vm,
.get_rptr = a4xx_get_rptr,
},
.get_timestamp = a4xx_get_timestamp,
@@ -695,21 +695,6 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->uche_trap_base = 0xffff0000ffff0000ull;
- if (!gpu->aspace) {
- /* TODO we think it is possible to configure the GPU to
- * restrict access to VRAM carveout. But the required
- * registers are unknown. For now just bail out and
- * limp along with just modesetting. If it turns out
- * to not be possible to restrict access, then we must
- * implement a cmdstream validator.
- */
- DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- if (!allow_vram_carveout) {
- ret = -ENXIO;
- goto fail;
- }
- }
-
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
if (IS_ERR(icc_path)) {
ret = PTR_ERR(icc_path);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index 169b8fe688f8..625a4e787d8f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -116,13 +116,13 @@ reset_set(void *data, u64 val)
adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
if (a5xx_gpu->pm4_bo) {
- msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
- msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 60aef0796236..4a04dc43a8e6 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -622,7 +622,7 @@ static int a5xx_ucode_load(struct msm_gpu *gpu)
a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
sizeof(u32) * gpu->nr_rings,
MSM_BO_WC | MSM_BO_MAP_PRIV,
- gpu->aspace, &a5xx_gpu->shadow_bo,
+ gpu->vm, &a5xx_gpu->shadow_bo,
&a5xx_gpu->shadow_iova);
if (IS_ERR(a5xx_gpu->shadow))
@@ -835,8 +835,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
- BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13);
- hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
+ BUG_ON(adreno_gpu->ubwc_config->highest_bank_bit < 13);
+ hbb = adreno_gpu->ubwc_config->highest_bank_bit - 13;
gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, hbb << 7);
gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, hbb << 1);
@@ -1042,22 +1042,22 @@ static void a5xx_destroy(struct msm_gpu *gpu)
a5xx_preempt_fini(gpu);
if (a5xx_gpu->pm4_bo) {
- msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
- msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
- msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->gpmu_bo);
}
if (a5xx_gpu->shadow_bo) {
- msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->shadow_bo);
}
@@ -1457,7 +1457,7 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu,
struct a5xx_crashdumper *dumper)
{
dumper->ptr = msm_gem_kernel_new(gpu->dev,
- SZ_1M, MSM_BO_WC, gpu->aspace,
+ SZ_1M, MSM_BO_WC, gpu->vm,
&dumper->bo, &dumper->iova);
if (!IS_ERR(dumper->ptr))
@@ -1557,7 +1557,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
if (a5xx_crashdumper_run(gpu, &dumper)) {
kfree(a5xx_state->hlsqregs);
- msm_gem_kernel_put(dumper.bo, gpu->aspace);
+ msm_gem_kernel_put(dumper.bo, gpu->vm);
return;
}
@@ -1565,7 +1565,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
count * sizeof(u32));
- msm_gem_kernel_put(dumper.bo, gpu->aspace);
+ msm_gem_kernel_put(dumper.bo, gpu->vm);
}
static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
@@ -1713,7 +1713,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a5xx_gpu_busy,
.gpu_state_get = a5xx_gpu_state_get,
.gpu_state_put = a5xx_gpu_state_put,
- .create_address_space = adreno_create_address_space,
+ .create_vm = adreno_create_vm,
.get_rptr = a5xx_get_rptr,
},
.get_timestamp = a5xx_get_timestamp,
@@ -1756,6 +1756,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct adreno_platform_config *config = pdev->dev.platform_data;
+ const struct qcom_ubwc_cfg_data *common_cfg;
struct a5xx_gpu *a5xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
@@ -1786,21 +1787,20 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
return ERR_PTR(ret);
}
- if (gpu->aspace)
- msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
+ msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu,
+ a5xx_fault_handler);
/* Set up the preemption specific bits and pieces for each ringbuffer */
a5xx_preempt_init(gpu);
- /* Set the highest bank bit */
- if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
- adreno_gpu->ubwc_config.highest_bank_bit = 15;
- else
- adreno_gpu->ubwc_config.highest_bank_bit = 14;
+ /* Inherit the common config and make some necessary fixups */
+ common_cfg = qcom_ubwc_config_get_data();
+ if (IS_ERR(common_cfg))
+ return ERR_CAST(common_cfg);
- /* a5xx only supports UBWC 1.0, these are not configurable */
- adreno_gpu->ubwc_config.macrotile_mode = 0;
- adreno_gpu->ubwc_config.ubwc_swizzle = 0x7;
+ /* Copy the data into the internal struct to drop the const qualifier (temporarily) */
+ adreno_gpu->_ubwc_config = *common_cfg;
+ adreno_gpu->ubwc_config = &adreno_gpu->_ubwc_config;
adreno_gpu->uche_trap_base = 0x0001ffffffff0000ull;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 6b91e0bd1514..d6da7351cfbb 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -363,7 +363,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
ptr = msm_gem_kernel_new(drm, bosize,
- MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace,
+ MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->vm,
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
if (IS_ERR(ptr))
return;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index b5f9d40687d5..e4924b5e1c48 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -255,7 +255,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
- MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
+ MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->vm, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -263,9 +263,9 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
/* The buffer to store counters needs to be unprivileged */
counters = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_COUNTER_SIZE,
- MSM_BO_WC, gpu->aspace, &counters_bo, &counters_iova);
+ MSM_BO_WC, gpu->vm, &counters_bo, &counters_iova);
if (IS_ERR(counters)) {
- msm_gem_kernel_put(bo, gpu->aspace);
+ msm_gem_kernel_put(bo, gpu->vm);
return PTR_ERR(counters);
}
@@ -296,8 +296,8 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
int i;
for (i = 0; i < gpu->nr_rings; i++) {
- msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace);
- msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->aspace);
+ msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->vm);
+ msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->vm);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 70f7ad806c34..00e1afd46b81 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -1335,7 +1335,7 @@ static const uint32_t a7xx_pwrup_reglist_regs[] = {
REG_A6XX_RB_NC_MODE_CNTL,
REG_A6XX_RB_CMP_DBG_ECO_CNTL,
REG_A7XX_GRAS_NC_MODE_CNTL,
- REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
+ REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE,
REG_A6XX_UCHE_GBIF_GX_CONFIG,
REG_A6XX_UCHE_CLIENT_PF,
REG_A6XX_TPL1_DBG_ECO_CNTL1,
@@ -1442,6 +1442,13 @@ static const struct adreno_info a7xx_gpus[] = {
.gmu_cgc_mode = 0x00020202,
},
.preempt_record_size = 4192 * SZ_1K,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 59, 1 },
+ { 7, 2 },
+ { 232, 3 },
+ { 146, 4 },
+ ),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */
.family = ADRENO_7XX_GEN3,
@@ -1474,6 +1481,45 @@ static const struct adreno_info a7xx_gpus[] = {
},
},
.preempt_record_size = 3572 * SZ_1K,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x43030c00),
+ .family = ADRENO_7XX_GEN2,
+ .fw = {
+ [ADRENO_FW_SQE] = "gen71500_sqe.fw",
+ [ADRENO_FW_GMU] = "gen71500_gmu.bin",
+ },
+ .gmem = SZ_1M + SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV |
+ ADRENO_QUIRK_PREEMPTION,
+ .init = a6xx_gpu_init,
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a740_hwcg,
+ .protect = &a730_protect,
+ .pwrup_reglist = &a7xx_pwrup_reglist,
+ .gmu_chipid = 0x70f0000,
+ .gmu_cgc_mode = 0x00020222,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(3),
+ .perfmode_bw = 16500000,
+ },
+ { /* sentinel */ },
+ },
+ },
+ .preempt_record_size = 4192 * SZ_1K,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 294, 1 },
+ { 263, 2 },
+ { 233, 3 },
+ { 141, 4 },
+ ),
}
};
DECLARE_ADRENO_GPULIST(a7xx);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 38c0f8ef85c3..28e6705c6da6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1259,15 +1259,17 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
{
- msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->debug.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->icache.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->log.obj, gmu->aspace);
-
- gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
- msm_gem_address_space_put(gmu->aspace);
+ struct msm_mmu *mmu = to_msm_vm(gmu->vm)->mmu;
+
+ msm_gem_kernel_put(gmu->hfi.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->debug.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->icache.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->dcache.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->dummy.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->log.obj, gmu->vm);
+
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(gmu->vm);
}
static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
@@ -1296,7 +1298,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
if (IS_ERR(bo->obj))
return PTR_ERR(bo->obj);
- ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
+ ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->vm, &bo->iova,
range_start, range_end);
if (ret) {
drm_gem_object_put(bo->obj);
@@ -1311,7 +1313,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
return 0;
}
-static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
+static int a6xx_gmu_memory_probe(struct drm_device *drm, struct a6xx_gmu *gmu)
{
struct msm_mmu *mmu;
@@ -1321,9 +1323,9 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
if (IS_ERR(mmu))
return PTR_ERR(mmu);
- gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
- if (IS_ERR(gmu->aspace))
- return PTR_ERR(gmu->aspace);
+ gmu->vm = msm_gem_vm_create(drm, mmu, "gmu", 0x0, 0x80000000, true);
+ if (IS_ERR(gmu->vm))
+ return PTR_ERR(gmu->vm);
return 0;
}
@@ -1940,7 +1942,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
goto err_put_device;
- ret = a6xx_gmu_memory_probe(gmu);
+ ret = a6xx_gmu_memory_probe(adreno_gpu->base.dev, gmu);
if (ret)
goto err_put_device;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index b2d4489b4024..d1ce11131ba6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -62,7 +62,7 @@ struct a6xx_gmu {
/* For serializing communication with the GMU: */
struct mutex lock;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
void __iomem *mmio;
void __iomem *rscc;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 491fde0083a2..45dd5fd1c2bf 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -111,7 +111,8 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
struct msm_ringbuffer *ring, struct msm_gem_submit *submit)
{
bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
- struct msm_file_private *ctx = submit->queue->ctx;
+ struct msm_context *ctx = submit->queue->ctx;
+ struct drm_gpuvm *vm = msm_context_vm(submit->dev, ctx);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
phys_addr_t ttbr;
u32 asid;
@@ -120,7 +121,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
if (ctx->seqno == ring->cur_ctx_seqno)
return;
- if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
+ if (msm_iommu_pagetable_params(to_msm_vm(vm)->mmu, &ttbr, &asid))
return;
if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) {
@@ -603,117 +604,118 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_CP_PROTECT(protect->count_max - 1), protect->regs[i]);
}
-static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
+static int a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
{
- gpu->ubwc_config.rgb565_predicator = 0;
- gpu->ubwc_config.uavflagprd_inv = 0;
- gpu->ubwc_config.min_acc_len = 0;
- gpu->ubwc_config.ubwc_swizzle = 0x6;
- gpu->ubwc_config.macrotile_mode = 0;
- gpu->ubwc_config.highest_bank_bit = 15;
+ const struct qcom_ubwc_cfg_data *common_cfg;
+ struct qcom_ubwc_cfg_data *cfg = &gpu->_ubwc_config;
+
+ /* Inherit the common config and make some necessary fixups */
+ common_cfg = qcom_ubwc_config_get_data();
+ if (IS_ERR(common_cfg))
+ return PTR_ERR(common_cfg);
+
+ /* Copy the data into the internal struct to drop the const qualifier (temporarily) */
+ *cfg = *common_cfg;
+
+ cfg->ubwc_swizzle = 0x6;
+ cfg->highest_bank_bit = 15;
if (adreno_is_a610(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 13;
- gpu->ubwc_config.min_acc_len = 1;
- gpu->ubwc_config.ubwc_swizzle = 0x7;
+ cfg->highest_bank_bit = 13;
+ cfg->ubwc_swizzle = 0x7;
}
if (adreno_is_a618(gpu))
- gpu->ubwc_config.highest_bank_bit = 14;
+ cfg->highest_bank_bit = 14;
if (adreno_is_a619(gpu))
/* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */
- gpu->ubwc_config.highest_bank_bit = 13;
+ cfg->highest_bank_bit = 13;
if (adreno_is_a619_holi(gpu))
- gpu->ubwc_config.highest_bank_bit = 13;
+ cfg->highest_bank_bit = 13;
- if (adreno_is_a621(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 13;
- gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.uavflagprd_inv = 2;
- }
-
- if (adreno_is_a623(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 16;
- gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.rgb565_predicator = 1;
- gpu->ubwc_config.uavflagprd_inv = 2;
- gpu->ubwc_config.macrotile_mode = 1;
- }
+ if (adreno_is_a621(gpu))
+ cfg->highest_bank_bit = 13;
- if (adreno_is_a640_family(gpu))
- gpu->ubwc_config.amsbc = 1;
-
- if (adreno_is_a680(gpu))
- gpu->ubwc_config.macrotile_mode = 1;
+ if (adreno_is_a623(gpu))
+ cfg->highest_bank_bit = 16;
if (adreno_is_a650(gpu) ||
adreno_is_a660(gpu) ||
adreno_is_a690(gpu) ||
adreno_is_a730(gpu) ||
adreno_is_a740_family(gpu)) {
- /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
- gpu->ubwc_config.highest_bank_bit = 16;
- gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.rgb565_predicator = 1;
- gpu->ubwc_config.uavflagprd_inv = 2;
- gpu->ubwc_config.macrotile_mode = 1;
+ /* TODO: get ddr type from bootloader and use 15 for LPDDR4 */
+ cfg->highest_bank_bit = 16;
}
if (adreno_is_a663(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 13;
- gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.rgb565_predicator = 1;
- gpu->ubwc_config.uavflagprd_inv = 2;
- gpu->ubwc_config.macrotile_mode = 1;
- gpu->ubwc_config.ubwc_swizzle = 0x4;
+ cfg->highest_bank_bit = 13;
+ cfg->ubwc_swizzle = 0x4;
}
- if (adreno_is_7c3(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 14;
- gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.uavflagprd_inv = 2;
- gpu->ubwc_config.macrotile_mode = 1;
- }
+ if (adreno_is_7c3(gpu))
+ cfg->highest_bank_bit = 14;
- if (adreno_is_a702(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 14;
- gpu->ubwc_config.min_acc_len = 1;
- }
+ if (adreno_is_a702(gpu))
+ cfg->highest_bank_bit = 14;
+
+ if (cfg->highest_bank_bit != common_cfg->highest_bank_bit)
+ DRM_WARN_ONCE("Inconclusive highest_bank_bit value: %u (GPU) vs %u (UBWC_CFG)\n",
+ cfg->highest_bank_bit, common_cfg->highest_bank_bit);
+
+ if (cfg->ubwc_swizzle != common_cfg->ubwc_swizzle)
+ DRM_WARN_ONCE("Inconclusive ubwc_swizzle value: %u (GPU) vs %u (UBWC_CFG)\n",
+ cfg->ubwc_swizzle, common_cfg->ubwc_swizzle);
+
+ gpu->ubwc_config = &gpu->_ubwc_config;
+
+ return 0;
}
static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct qcom_ubwc_cfg_data *cfg = adreno_gpu->ubwc_config;
/*
* We subtract 13 from the highest bank bit (13 is the minimum value
* allowed by hw) and write the lowest two bits of the remaining value
* as hbb_lo and the one above it as hbb_hi to the hardware.
*/
- BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13);
- u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
+ BUG_ON(cfg->highest_bank_bit < 13);
+ u32 hbb = cfg->highest_bank_bit - 13;
+ bool rgb565_predicator = cfg->ubwc_enc_version >= UBWC_4_0;
+ u32 level2_swizzling_dis = !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL2);
+ bool ubwc_mode = qcom_ubwc_get_ubwc_mode(cfg);
+ bool amsbc = cfg->ubwc_enc_version >= UBWC_3_0;
+ bool min_acc_len_64b = false;
+ u8 uavflagprd_inv = 0;
u32 hbb_hi = hbb >> 2;
u32 hbb_lo = hbb & 3;
- u32 ubwc_mode = adreno_gpu->ubwc_config.ubwc_swizzle & 1;
- u32 level2_swizzling_dis = !(adreno_gpu->ubwc_config.ubwc_swizzle & 2);
+
+ if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu))
+ uavflagprd_inv = 2;
+
+ if (adreno_is_a610(adreno_gpu) || adreno_is_a702(adreno_gpu))
+ min_acc_len_64b = true;
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
level2_swizzling_dis << 12 |
- adreno_gpu->ubwc_config.rgb565_predicator << 11 |
- hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 |
- adreno_gpu->ubwc_config.min_acc_len << 3 |
+ rgb565_predicator << 11 |
+ hbb_hi << 10 | amsbc << 4 |
+ min_acc_len_64b << 3 |
hbb_lo << 1 | ubwc_mode);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL,
level2_swizzling_dis << 6 | hbb_hi << 4 |
- adreno_gpu->ubwc_config.min_acc_len << 3 |
+ min_acc_len_64b << 3 |
hbb_lo << 1 | ubwc_mode);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
level2_swizzling_dis << 12 | hbb_hi << 10 |
- adreno_gpu->ubwc_config.uavflagprd_inv << 4 |
- adreno_gpu->ubwc_config.min_acc_len << 3 |
+ uavflagprd_inv << 4 |
+ min_acc_len_64b << 3 |
hbb_lo << 1 | ubwc_mode);
if (adreno_is_a7xx(adreno_gpu))
@@ -721,10 +723,10 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
FIELD_PREP(GENMASK(8, 5), hbb_lo));
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL,
- adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21);
+ min_acc_len_64b << 23 | hbb_lo << 21);
gpu_write(gpu, REG_A6XX_RBBM_NC_MODE_CNTL,
- adreno_gpu->ubwc_config.macrotile_mode);
+ cfg->macrotile_mode);
}
static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu)
@@ -970,7 +972,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
- msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->vm);
drm_gem_object_put(a6xx_gpu->sqe_bo);
a6xx_gpu->sqe_bo = NULL;
@@ -987,7 +989,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
sizeof(u32) * gpu->nr_rings,
MSM_BO_WC | MSM_BO_MAP_PRIV,
- gpu->aspace, &a6xx_gpu->shadow_bo,
+ gpu->vm, &a6xx_gpu->shadow_bo,
&a6xx_gpu->shadow_iova);
if (IS_ERR(a6xx_gpu->shadow))
@@ -998,7 +1000,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
a6xx_gpu->pwrup_reglist_ptr = msm_gem_kernel_new(gpu->dev, PAGE_SIZE,
MSM_BO_WC | MSM_BO_MAP_PRIV,
- gpu->aspace, &a6xx_gpu->pwrup_reglist_bo,
+ gpu->vm, &a6xx_gpu->pwrup_reglist_bo,
&a6xx_gpu->pwrup_reglist_iova);
if (IS_ERR(a6xx_gpu->pwrup_reglist_ptr))
@@ -2211,12 +2213,12 @@ static void a6xx_destroy(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
if (a6xx_gpu->sqe_bo) {
- msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->vm);
drm_gem_object_put(a6xx_gpu->sqe_bo);
}
if (a6xx_gpu->shadow_bo) {
- msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace);
+ msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->vm);
drm_gem_object_put(a6xx_gpu->shadow_bo);
}
@@ -2256,8 +2258,8 @@ static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
mutex_unlock(&a6xx_gpu->gmu.lock);
}
-static struct msm_gem_address_space *
-a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+static struct drm_gpuvm *
+a6xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -2271,22 +2273,21 @@ a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
!device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY))
quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- return adreno_iommu_create_address_space(gpu, pdev, quirks);
+ return adreno_iommu_create_vm(gpu, pdev, quirks);
}
-static struct msm_gem_address_space *
-a6xx_create_private_address_space(struct msm_gpu *gpu)
+static struct drm_gpuvm *
+a6xx_create_private_vm(struct msm_gpu *gpu, bool kernel_managed)
{
struct msm_mmu *mmu;
- mmu = msm_iommu_pagetable_create(gpu->aspace->mmu);
+ mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu, kernel_managed);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
- return msm_gem_address_space_create(mmu,
- "gpu", ADRENO_VM_START,
- adreno_private_address_space_size(gpu));
+ return msm_gem_vm_create(gpu->dev, mmu, "gpu", ADRENO_VM_START,
+ adreno_private_vm_size(gpu), kernel_managed);
}
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
@@ -2403,8 +2404,8 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
- .create_address_space = a6xx_create_address_space,
- .create_private_address_space = a6xx_create_private_address_space,
+ .create_vm = a6xx_create_vm,
+ .create_private_vm = a6xx_create_private_vm,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
@@ -2432,8 +2433,8 @@ static const struct adreno_gpu_funcs funcs_gmuwrapper = {
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
- .create_address_space = a6xx_create_address_space,
- .create_private_address_space = a6xx_create_private_address_space,
+ .create_vm = a6xx_create_vm,
+ .create_private_vm = a6xx_create_private_vm,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
@@ -2463,8 +2464,8 @@ static const struct adreno_gpu_funcs funcs_a7xx = {
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
- .create_address_space = a6xx_create_address_space,
- .create_private_address_space = a6xx_create_private_address_space,
+ .create_vm = a6xx_create_vm,
+ .create_private_vm = a6xx_create_private_vm,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
@@ -2560,11 +2561,15 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu->uche_trap_base = 0x1fffffffff000ull;
- if (gpu->aspace)
- msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
- a6xx_fault_handler);
+ msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu,
+ a6xx_fault_handler);
+
+ ret = a6xx_calc_ubwc_config(adreno_gpu);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
- a6xx_calc_ubwc_config(adreno_gpu);
/* Set up the preemption specific bits and pieces for each ringbuffer */
a6xx_preempt_init(gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 9201a53dd341..6e71f617fc3d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -6,6 +6,10 @@
#include "adreno_gpu.h"
+#include "a6xx_enums.xml.h"
+#include "a7xx_enums.xml.h"
+#include "a6xx_perfcntrs.xml.h"
+#include "a7xx_perfcntrs.xml.h"
#include "a6xx.xml.h"
#include "a6xx_gmu.h"
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 341a72a67401..faca2a0243ab 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -132,7 +132,7 @@ static int a6xx_crashdumper_init(struct msm_gpu *gpu,
struct a6xx_crashdumper *dumper)
{
dumper->ptr = msm_gem_kernel_new(gpu->dev,
- SZ_1M, MSM_BO_WC, gpu->aspace,
+ SZ_1M, MSM_BO_WC, gpu->vm,
&dumper->bo, &dumper->iova);
if (!IS_ERR(dumper->ptr))
@@ -158,7 +158,7 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu,
/* Make sure all pending memory writes are posted */
wmb();
- gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE, dumper->iova);
+ gpu_write64(gpu, REG_A6XX_CP_CRASH_DUMP_SCRIPT_BASE, dumper->iova);
gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
@@ -1619,7 +1619,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
a7xx_get_clusters(gpu, a6xx_state, dumper);
a7xx_get_dbgahb_clusters(gpu, a6xx_state, dumper);
- msm_gem_kernel_put(dumper->bo, gpu->aspace);
+ msm_gem_kernel_put(dumper->bo, gpu->vm);
}
a7xx_get_post_crashdumper_registers(gpu, a6xx_state);
@@ -1631,7 +1631,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
a6xx_get_clusters(gpu, a6xx_state, dumper);
a6xx_get_dbgahb_clusters(gpu, a6xx_state, dumper);
- msm_gem_kernel_put(dumper->bo, gpu->aspace);
+ msm_gem_kernel_put(dumper->bo, gpu->vm);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index e545106c70be..95d93ac6812a 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -212,7 +212,7 @@ static const struct a6xx_shader_block {
SHADER(A6XX_SP_LB_5_DATA, 0x200),
SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800),
SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
- SHADER(A6XX_SP_UAV_DATA, 0x80),
+ SHADER(A6XX_SP_GFX_UAV_BASE_DATA, 0x80),
SHADER(A6XX_SP_INST_TAG, 0x80),
SHADER(A6XX_SP_CB_BINDLESS_TAG, 0x80),
SHADER(A6XX_SP_TMO_UMO_TAG, 0x80),
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
index 3b17fd2dba89..6a12a35dabff 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
@@ -210,7 +210,7 @@ void a6xx_preempt_hw_init(struct msm_gpu *gpu)
gpu_write64(gpu, REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, 0);
/* Enable the GMEM save/restore feature for preemption */
- gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x1);
+ gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, 0x1);
/* Reset the preemption state */
set_preempt_state(a6xx_gpu, PREEMPT_NONE);
@@ -344,7 +344,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu,
ptr = msm_gem_kernel_new(gpu->dev,
PREEMPT_RECORD_SIZE(adreno_gpu),
- MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
+ MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->vm, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -362,7 +362,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu,
ptr = msm_gem_kernel_new(gpu->dev,
PREEMPT_SMMU_INFO_SIZE,
MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY,
- gpu->aspace, &bo, &iova);
+ gpu->vm, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -377,7 +377,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu,
struct a7xx_cp_smmu_info *smmu_info_ptr = ptr;
- msm_iommu_pagetable_params(gpu->aspace->mmu, &ttbr, &asid);
+ msm_iommu_pagetable_params(to_msm_vm(gpu->vm)->mmu, &ttbr, &asid);
smmu_info_ptr->magic = GEN7_CP_SMMU_INFO_MAGIC;
smmu_info_ptr->ttbr0 = ttbr;
@@ -405,7 +405,7 @@ void a6xx_preempt_fini(struct msm_gpu *gpu)
int i;
for (i = 0; i < gpu->nr_rings; i++)
- msm_gem_kernel_put(a6xx_gpu->preempt_bo[i], gpu->aspace);
+ msm_gem_kernel_put(a6xx_gpu->preempt_bo[i], gpu->vm);
}
void a6xx_preempt_init(struct msm_gpu *gpu)
@@ -431,7 +431,7 @@ void a6xx_preempt_init(struct msm_gpu *gpu)
a6xx_gpu->preempt_postamble_ptr = msm_gem_kernel_new(gpu->dev,
PAGE_SIZE,
MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY,
- gpu->aspace, &a6xx_gpu->preempt_postamble_bo,
+ gpu->vm, &a6xx_gpu->preempt_postamble_bo,
&a6xx_gpu->preempt_postamble_iova);
preempt_prepare_postamble(a6xx_gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 16e7ac444efd..50945bfe9b49 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -16,10 +16,6 @@ bool snapshot_debugbus = false;
MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
-bool allow_vram_carveout = false;
-MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
-module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
-
int enable_preemption = -1;
MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))");
module_param(enable_preemption, int, 0600);
@@ -264,42 +260,23 @@ static const struct component_ops a3xx_ops = {
.unbind = adreno_unbind,
};
-static void adreno_device_register_headless(void)
-{
- /* on imx5, we don't have a top-level mdp/dpu node
- * this creates a dummy node for the driver for that case
- */
- struct platform_device_info dummy_info = {
- .parent = NULL,
- .name = "msm",
- .id = -1,
- .res = NULL,
- .num_res = 0,
- .data = NULL,
- .size_data = 0,
- .dma_mask = ~0,
- };
- platform_device_register_full(&dummy_info);
-}
-
static int adreno_probe(struct platform_device *pdev)
{
+ if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon") ||
+ msm_gpu_no_components())
+ return msm_gpu_probe(pdev, &a3xx_ops);
- int ret;
-
- ret = component_add(&pdev->dev, &a3xx_ops);
- if (ret)
- return ret;
-
- if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
- adreno_device_register_headless();
-
- return 0;
+ return component_add(&pdev->dev, &a3xx_ops);
}
static void adreno_remove(struct platform_device *pdev)
{
- component_del(&pdev->dev, &a3xx_ops);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+
+ if (priv->kms_init)
+ component_del(&pdev->dev, &a3xx_ops);
+ else
+ msm_gpu_remove(pdev, &a3xx_ops);
}
static void adreno_shutdown(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
index 9a327d543f27..e02cabb39f19 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
@@ -1311,8 +1311,8 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x08000},
{ "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
REG_A7XX_CP_BV_SQE_STAT_DATA, 0x00040},
- { "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TBL_DBG_ADDR,
- REG_A7XX_CP_RESOURCE_TBL_DBG_DATA, 0x04100},
+ { "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR,
+ REG_A7XX_CP_RESOURCE_TABLE_DBG_DATA, 0x04100},
{ "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x00200},
{ "CP_LPAC_ROQ", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 86bff915c3e7..f1230465bf0d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -191,25 +191,27 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
-struct msm_gem_address_space *
-adreno_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev)
+struct drm_gpuvm *
+adreno_create_vm(struct msm_gpu *gpu,
+ struct platform_device *pdev)
{
- return adreno_iommu_create_address_space(gpu, pdev, 0);
+ return adreno_iommu_create_vm(gpu, pdev, 0);
}
-struct msm_gem_address_space *
-adreno_iommu_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev,
- unsigned long quirks)
+struct drm_gpuvm *
+adreno_iommu_create_vm(struct msm_gpu *gpu,
+ struct platform_device *pdev,
+ unsigned long quirks)
{
struct iommu_domain_geometry *geometry;
struct msm_mmu *mmu;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
u64 start, size;
mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks);
- if (IS_ERR_OR_NULL(mmu))
+ if (!mmu)
+ return ERR_PTR(-ENODEV);
+ else if (IS_ERR_OR_NULL(mmu))
return ERR_CAST(mmu);
geometry = msm_iommu_get_geometry(mmu);
@@ -224,16 +226,16 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
start = max_t(u64, SZ_16M, geometry->aperture_start);
size = geometry->aperture_end - start + 1;
- aspace = msm_gem_address_space_create(mmu, "gpu",
- start & GENMASK_ULL(48, 0), size);
+ vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", start & GENMASK_ULL(48, 0),
+ size, true);
- if (IS_ERR(aspace) && !IS_ERR(mmu))
+ if (IS_ERR(vm) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
- return aspace;
+ return vm;
}
-u64 adreno_private_address_space_size(struct msm_gpu *gpu)
+u64 adreno_private_vm_size(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
@@ -273,9 +275,11 @@ void adreno_check_and_reenable_stall(struct adreno_gpu *adreno_gpu)
if (!priv->stall_enabled &&
ktime_after(ktime_get(), priv->stall_reenable_time) &&
!READ_ONCE(gpu->crashstate)) {
+ struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
+
priv->stall_enabled = true;
- gpu->aspace->mmu->funcs->set_stall(gpu->aspace->mmu, true);
+ mmu->funcs->set_stall(mmu, true);
}
spin_unlock_irqrestore(&priv->fault_stall_lock, flags);
}
@@ -290,6 +294,7 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
u32 scratch[4])
{
struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
const char *type = "UNKNOWN";
bool do_devcoredump = info && (info->fsr & ARM_SMMU_FSR_SS) &&
!READ_ONCE(gpu->crashstate);
@@ -303,8 +308,9 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
if (priv->stall_enabled) {
priv->stall_enabled = false;
- gpu->aspace->mmu->funcs->set_stall(gpu->aspace->mmu, false);
+ mmu->funcs->set_stall(mmu, false);
}
+
priv->stall_reenable_time = ktime_add_ms(ktime_get(), 500);
spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags);
@@ -351,11 +357,20 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
return 0;
}
-int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+static bool
+adreno_smmu_has_prr(struct msm_gpu *gpu)
+{
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
+ return adreno_smmu && adreno_smmu->set_prr_addr;
+}
+
+int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t *value, uint32_t *len)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *drm = gpu->dev;
+ /* Note ctx can be NULL when called from rd_open(): */
+ struct drm_gpuvm *vm = ctx ? msm_context_vm(drm, ctx) : NULL;
/* No pointer params yet */
if (*len != 0)
@@ -401,8 +416,8 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
*value = 0;
return 0;
case MSM_PARAM_FAULTS:
- if (ctx->aspace)
- *value = gpu->global_faults + ctx->aspace->faults;
+ if (vm)
+ *value = gpu->global_faults + to_msm_vm(vm)->faults;
else
*value = gpu->global_faults;
return 0;
@@ -410,36 +425,39 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
*value = gpu->suspend_count;
return 0;
case MSM_PARAM_VA_START:
- if (ctx->aspace == gpu->aspace)
+ if (vm == gpu->vm)
return UERR(EINVAL, drm, "requires per-process pgtables");
- *value = ctx->aspace->va_start;
+ *value = vm->mm_start;
return 0;
case MSM_PARAM_VA_SIZE:
- if (ctx->aspace == gpu->aspace)
+ if (vm == gpu->vm)
return UERR(EINVAL, drm, "requires per-process pgtables");
- *value = ctx->aspace->va_size;
+ *value = vm->mm_range;
return 0;
case MSM_PARAM_HIGHEST_BANK_BIT:
- *value = adreno_gpu->ubwc_config.highest_bank_bit;
+ *value = adreno_gpu->ubwc_config->highest_bank_bit;
return 0;
case MSM_PARAM_RAYTRACING:
*value = adreno_gpu->has_ray_tracing;
return 0;
case MSM_PARAM_UBWC_SWIZZLE:
- *value = adreno_gpu->ubwc_config.ubwc_swizzle;
+ *value = adreno_gpu->ubwc_config->ubwc_swizzle;
return 0;
case MSM_PARAM_MACROTILE_MODE:
- *value = adreno_gpu->ubwc_config.macrotile_mode;
+ *value = adreno_gpu->ubwc_config->macrotile_mode;
return 0;
case MSM_PARAM_UCHE_TRAP_BASE:
*value = adreno_gpu->uche_trap_base;
return 0;
+ case MSM_PARAM_HAS_PRR:
+ *value = adreno_smmu_has_prr(gpu);
+ return 0;
default:
return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
}
-int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+int adreno_set_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t value, uint32_t len)
{
struct drm_device *drm = gpu->dev;
@@ -485,7 +503,22 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
case MSM_PARAM_SYSPROF:
if (!capable(CAP_SYS_ADMIN))
return UERR(EPERM, drm, "invalid permissions");
- return msm_file_private_set_sysprof(ctx, gpu, value);
+ return msm_context_set_sysprof(ctx, gpu, value);
+ case MSM_PARAM_EN_VM_BIND:
+ /* We can only support VM_BIND with per-process pgtables: */
+ if (ctx->vm == gpu->vm)
+ return UERR(EINVAL, drm, "requires per-process pgtables");
+
+ /*
+ * We can only swtich to VM_BIND mode if the VM has not yet
+ * been created:
+ */
+ if (ctx->vm)
+ return UERR(EBUSY, drm, "VM already created");
+
+ ctx->userspace_managed_vm = value;
+
+ return 0;
default:
return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
@@ -607,7 +640,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
void *ptr;
ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4,
- MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
+ MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->vm, &bo, iova);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
@@ -800,6 +833,7 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state)
for (i = 0; state->bos && i < state->nr_bos; i++)
kvfree(state->bos[i].data);
+ kfree(state->vm_logs);
kfree(state->bos);
kfree(state->comm);
kfree(state->cmd);
@@ -940,6 +974,16 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
info->ptes[0], info->ptes[1], info->ptes[2], info->ptes[3]);
}
+ if (state->vm_logs) {
+ drm_puts(p, "vm-log:\n");
+ for (i = 0; i < state->nr_vm_logs; i++) {
+ struct msm_gem_vm_log_entry *e = &state->vm_logs[i];
+ drm_printf(p, " - %s:%d: 0x%016llx-0x%016llx\n",
+ e->op, e->queue_id, e->iova,
+ e->iova + e->range);
+ }
+ }
+
drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
drm_puts(p, "ringbuffer:\n");
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index bc063594a359..9dc93c247196 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -12,13 +12,14 @@
#include <linux/firmware.h>
#include <linux/iopoll.h>
+#include <linux/soc/qcom/ubwc.h>
+
#include "msm_gpu.h"
#include "adreno_common.xml.h"
#include "adreno_pm4.xml.h"
extern bool snapshot_debugbus;
-extern bool allow_vram_carveout;
enum {
ADRENO_FW_PM4 = 0,
@@ -205,44 +206,12 @@ struct adreno_gpu {
/* firmware: */
const struct firmware *fw[ADRENO_FW_MAX];
- struct {
- /**
- * @rgb565_predicator: Unknown, introduced with A650 family,
- * related to UBWC mode/ver 4
- */
- u32 rgb565_predicator;
- /** @uavflagprd_inv: Unknown, introduced with A650 family */
- u32 uavflagprd_inv;
- /** @min_acc_len: Whether the minimum access length is 64 bits */
- u32 min_acc_len;
- /**
- * @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling.
- *
- * UBWC 1.0 always enables all three levels.
- * UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3.
- * UBWC 4.0 adds the optional ability to disable levels 2 & 3.
- *
- * This is a bitmask where BIT(0) enables level 1, BIT(1)
- * controls level 2, and BIT(2) enables level 3.
- */
- u32 ubwc_swizzle;
- /**
- * @highest_bank_bit: Highest Bank Bit
- *
- * The Highest Bank Bit value represents the bit of the highest
- * DDR bank. This should ideally use DRAM type detection.
- */
- u32 highest_bank_bit;
- u32 amsbc;
- /**
- * @macrotile_mode: Macrotile Mode
- *
- * Whether to use 4-channel macrotiling mode or the newer
- * 8-channel macrotiling mode introduced in UBWC 3.1. 0 is
- * 4-channel and 1 is 8-channel.
- */
- u32 macrotile_mode;
- } ubwc_config;
+ /*
+ * The migration to the central UBWC config db is still in flight - keep
+ * a copy containing some local fixups until that's done.
+ */
+ const struct qcom_ubwc_cfg_data *ubwc_config;
+ struct qcom_ubwc_cfg_data _ubwc_config;
/*
* Register offsets are different between some GPUs.
@@ -580,10 +549,10 @@ static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
/* Put vm_start above 32b to catch issues with not setting xyz_BASE_HI */
#define ADRENO_VM_START 0x100000000ULL
-u64 adreno_private_address_space_size(struct msm_gpu *gpu);
-int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+u64 adreno_private_vm_size(struct msm_gpu *gpu);
+int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t *value, uint32_t *len);
-int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+int adreno_set_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t value, uint32_t len);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
@@ -623,14 +592,14 @@ void adreno_show_object(struct drm_printer *p, void **ptr, int len,
* Common helper function to initialize the default address space for arm-smmu
* attached targets
*/
-struct msm_gem_address_space *
-adreno_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev);
-
-struct msm_gem_address_space *
-adreno_iommu_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev,
- unsigned long quirks);
+struct drm_gpuvm *
+adreno_create_vm(struct msm_gpu *gpu,
+ struct platform_device *pdev);
+
+struct drm_gpuvm *
+adreno_iommu_create_vm(struct msm_gpu *gpu,
+ struct platform_device *pdev,
+ unsigned long quirks);
int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
struct adreno_smmu_fault_info *info, const char *block,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
index ffc4d4257ae5..56d3c38c8778 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
@@ -21,7 +21,6 @@ static const struct dpu_caps sm8650_dpu_caps = {
static const struct dpu_mdp_cfg sm8650_mdp = {
.name = "top_0",
.base = 0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
@@ -31,32 +30,26 @@ static const struct dpu_ctl_cfg sm8650_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x1000,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -139,7 +132,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -147,7 +140,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -155,7 +148,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -163,7 +156,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -171,14 +164,14 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -189,22 +182,18 @@ static const struct dpu_dspp_cfg sm8650_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -213,67 +202,57 @@ static const struct dpu_pingpong_cfg sm8650_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_2", .id = PINGPONG_CWB_2,
.base = 0x7e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_4,
}, {
.name = "pingpong_cwb_3", .id = PINGPONG_CWB_3,
.base = 0x7e400, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_4,
},
@@ -307,32 +286,30 @@ static const struct dpu_dsc_cfg sm8650_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_2_0", .id = DSC_4,
.base = 0x82000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_2_1", .id = DSC_5,
.base = 0x82000, .len = 0x6,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
},
};
@@ -341,7 +318,7 @@ static const struct dpu_wb_cfg sm8650_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
@@ -374,7 +351,6 @@ static const struct dpu_intf_cfg sm8650_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -383,7 +359,6 @@ static const struct dpu_intf_cfg sm8650_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -393,7 +368,6 @@ static const struct dpu_intf_cfg sm8650_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -403,7 +377,6 @@ static const struct dpu_intf_cfg sm8650_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h
new file mode 100644
index 000000000000..db8cc2d0112c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h
@@ -0,0 +1,494 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 Linaro Limited
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_12_0_SM8750_H
+#define _DPU_12_0_SM8750_H
+
+static const struct dpu_caps sm8750_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 8192,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sm8750_mdp = {
+ .name = "top_0",
+ .base = 0, .len = 0x494,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+static const struct dpu_ctl_cfg sm8750_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm8750_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_12", .id = SSPP_DMA4,
+ .base = 0x2c000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 14,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_13", .id = SSPP_DMA5,
+ .base = 0x2e000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 15,
+ .type = SSPP_TYPE_DMA,
+ },
+};
+
+static const struct dpu_lm_cfg sm8750_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ }, {
+ .name = "lm_6", .id = LM_6,
+ .base = 0x4a000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_7,
+ .pingpong = PINGPONG_6,
+ }, {
+ .name = "lm_7", .id = LM_7,
+ .base = 0x4b000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_6,
+ .pingpong = PINGPONG_7,
+ },
+};
+
+static const struct dpu_dspp_cfg sm8750_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm8750_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_6", .id = PINGPONG_6,
+ .base = 0x6f000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 20),
+ }, {
+ .name = "pingpong_7", .id = PINGPONG_7,
+ .base = 0x70000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 21),
+ }, {
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
+ .base = 0x66000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_4,
+ }, {
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
+ .base = 0x66400, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_4,
+ }, {
+ .name = "pingpong_cwb_2", .id = PINGPONG_CWB_2,
+ .base = 0x7e000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_5,
+ }, {
+ .name = "pingpong_cwb_3", .id = PINGPONG_CWB_3,
+ .base = 0x7e400, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_5,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sm8750_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x1c,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x1c,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x1c,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x51000, .len = 0x1c,
+ }, {
+ .name = "merge_3d_4", .id = MERGE_3D_4,
+ .base = 0x66700, .len = 0x1c,
+ }, {
+ .name = "merge_3d_5", .id = MERGE_3D_5,
+ .base = 0x7e700, .len = 0x1c,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sm8750_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ }, {
+ .name = "dce_2_0", .id = DSC_4,
+ .base = 0x82000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_2_1", .id = DSC_5,
+ .base = 0x82000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ }, {
+ .name = "dce_3_0", .id = DSC_6,
+ .base = 0x83000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_3_1", .id = DSC_7,
+ .base = 0x83000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ },
+};
+
+static const struct dpu_wb_cfg sm8750_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SDM845_MASK,
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_cwb_cfg sm8750_cwb[] = {
+ {
+ .name = "cwb_0", .id = CWB_0,
+ .base = 0x66200, .len = 0x20,
+ },
+ {
+ .name = "cwb_1", .id = CWB_1,
+ .base = 0x66600, .len = 0x20,
+ },
+ {
+ .name = "cwb_2", .id = CWB_2,
+ .base = 0x7e200, .len = 0x20,
+ },
+ {
+ .name = "cwb_3", .id = CWB_3,
+ .base = 0x7e600, .len = 0x20,
+ },
+};
+
+static const struct dpu_intf_cfg sm8750_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x4bc,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x4bc,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x4bc,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x4bc,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
+};
+
+static const struct dpu_perf_cfg sm8750_perf_data = {
+ .max_bw_low = 18900000,
+ .max_bw_high = 28500000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm8750_mdss_ver = {
+ .core_major_ver = 12,
+ .core_minor_ver = 0,
+};
+
+const struct dpu_mdss_cfg dpu_sm8750_cfg = {
+ .mdss_ver = &sm8750_mdss_ver,
+ .caps = &sm8750_dpu_caps,
+ .mdp = &sm8750_mdp,
+ .cdm = &dpu_cdm_5_x,
+ .ctl_count = ARRAY_SIZE(sm8750_ctl),
+ .ctl = sm8750_ctl,
+ .sspp_count = ARRAY_SIZE(sm8750_sspp),
+ .sspp = sm8750_sspp,
+ .mixer_count = ARRAY_SIZE(sm8750_lm),
+ .mixer = sm8750_lm,
+ .dspp_count = ARRAY_SIZE(sm8750_dspp),
+ .dspp = sm8750_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8750_pp),
+ .pingpong = sm8750_pp,
+ .dsc_count = ARRAY_SIZE(sm8750_dsc),
+ .dsc = sm8750_dsc,
+ .merge_3d_count = ARRAY_SIZE(sm8750_merge_3d),
+ .merge_3d = sm8750_merge_3d,
+ .wb_count = ARRAY_SIZE(sm8750_wb),
+ .wb = sm8750_wb,
+ .cwb_count = ARRAY_SIZE(sm8750_cwb),
+ .cwb = sm8650_cwb,
+ .intf_count = ARRAY_SIZE(sm8750_intf),
+ .intf = sm8750_intf,
+ .vbif_count = ARRAY_SIZE(sm8650_vbif),
+ .vbif = sm8650_vbif,
+ .perf = &sm8750_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
index 39027a21c6fe..29e0eba91930 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
@@ -19,7 +19,6 @@ static const struct dpu_mdp_cfg msm8937_mdp[] = {
{
.name = "top_0",
.base = 0x0, .len = 0x454,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
@@ -116,7 +115,6 @@ static const struct dpu_dspp_cfg msm8937_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
index 8d1b43ea1663..cb1ee4b63f9f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
@@ -19,7 +19,6 @@ static const struct dpu_mdp_cfg msm8917_mdp[] = {
{
.name = "top_0",
.base = 0x0, .len = 0x454,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
@@ -103,7 +102,6 @@ static const struct dpu_dspp_cfg msm8917_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
index 16c12499b24b..b44d02b48418 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
@@ -19,7 +19,6 @@ static const struct dpu_mdp_cfg msm8953_mdp[] = {
{
.name = "top_0",
.base = 0x0, .len = 0x454,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
@@ -116,7 +115,6 @@ static const struct dpu_dspp_cfg msm8953_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
index 91f514d28ac6..8af63db315b4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
@@ -22,7 +22,6 @@ static const struct dpu_mdp_cfg msm8996_mdp[] = {
{
.name = "top_0",
.base = 0x0, .len = 0x454,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -181,28 +180,24 @@ static const struct dpu_pingpong_cfg msm8996_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
@@ -223,12 +218,10 @@ static const struct dpu_dspp_cfg msm8996_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index 413cd59dc0c4..f91220496082 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -23,7 +23,6 @@ static const struct dpu_caps msm8998_dpu_caps = {
static const struct dpu_mdp_cfg msm8998_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x458,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -170,28 +169,24 @@ static const struct dpu_pingpong_cfg msm8998_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
@@ -212,12 +207,10 @@ static const struct dpu_dspp_cfg msm8998_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
index b2eb7ca699e3..8f9a097147c0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
@@ -22,7 +22,6 @@ static const struct dpu_caps sdm660_dpu_caps = {
static const struct dpu_mdp_cfg sdm660_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x458,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -141,28 +140,24 @@ static const struct dpu_pingpong_cfg sdm660_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
@@ -183,12 +178,10 @@ static const struct dpu_dspp_cfg sdm660_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
index 85e121ad84a0..0ad18bd273ff 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
@@ -22,7 +22,6 @@ static const struct dpu_caps sdm630_dpu_caps = {
static const struct dpu_mdp_cfg sdm630_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x458,
- .features = BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
@@ -115,14 +114,12 @@ static const struct dpu_pingpong_cfg sdm630_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
@@ -133,7 +130,6 @@ static const struct dpu_dspp_cfg sdm630_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &msm8998_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
index 49363d7d5b93..5cc9f55d542b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -23,7 +23,6 @@ static const struct dpu_caps sdm845_dpu_caps = {
static const struct dpu_mdp_cfg sdm845_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = BIT(DPU_MDP_AUDIO_SELECT) | BIT(DPU_MDP_VSYNC_SEL),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -134,7 +133,7 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -142,7 +141,7 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -150,7 +149,7 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_2,
@@ -158,7 +157,7 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -170,22 +169,18 @@ static const struct dpu_dspp_cfg sdm845_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -194,28 +189,24 @@ static const struct dpu_pingpong_cfg sdm845_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
index c2fde980fb52..0f5e9babdeea 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
@@ -11,7 +11,6 @@
static const struct dpu_mdp_cfg sdm670_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = BIT(DPU_MDP_AUDIO_SELECT),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -69,7 +68,7 @@ static const struct dpu_lm_cfg sdm670_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -77,7 +76,7 @@ static const struct dpu_lm_cfg sdm670_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -85,14 +84,14 @@ static const struct dpu_lm_cfg sdm670_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_2,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -103,12 +102,10 @@ static const struct dpu_dspp_cfg sdm670_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 08d38e1d420c..ae1b2ed96e9f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -23,7 +23,6 @@ static const struct dpu_caps sm8150_dpu_caps = {
static const struct dpu_mdp_cfg sm8150_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = BIT(DPU_MDP_AUDIO_SELECT),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -41,32 +40,26 @@ static const struct dpu_ctl_cfg sm8150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -143,7 +136,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -151,7 +144,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -159,7 +152,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -167,7 +160,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -175,14 +168,14 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -193,22 +186,18 @@ static const struct dpu_dspp_cfg sm8150_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -217,42 +206,36 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x72000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x72800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
@@ -276,19 +259,15 @@ static const struct dpu_dsc_cfg sm8150_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_1", .id = DSC_1,
.base = 0x80400, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_2", .id = DSC_2,
.base = 0x80800, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_3", .id = DSC_3,
.base = 0x80c00, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -296,7 +275,7 @@ static const struct dpu_wb_cfg sm8150_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -311,7 +290,6 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -320,7 +298,6 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -330,7 +307,6 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -340,7 +316,6 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index d6f8b1030c68..b572cfa7ed35 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -23,7 +23,6 @@ static const struct dpu_caps sc8180x_dpu_caps = {
static const struct dpu_mdp_cfg sc8180x_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = BIT(DPU_MDP_AUDIO_SELECT),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -41,32 +40,26 @@ static const struct dpu_ctl_cfg sc8180x_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -143,7 +136,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -151,7 +144,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -159,7 +152,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -167,7 +160,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -175,14 +168,14 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -193,22 +186,18 @@ static const struct dpu_dspp_cfg sc8180x_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -217,42 +206,36 @@ static const struct dpu_pingpong_cfg sc8180x_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x72000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x72800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
@@ -276,27 +259,21 @@ static const struct dpu_dsc_cfg sc8180x_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_1", .id = DSC_1,
.base = 0x80400, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_2", .id = DSC_2,
.base = 0x80800, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_3", .id = DSC_3,
.base = 0x80c00, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_4", .id = DSC_4,
.base = 0x81000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_5", .id = DSC_5,
.base = 0x81400, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -304,7 +281,7 @@ static const struct dpu_wb_cfg sc8180x_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -319,7 +296,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -328,7 +304,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -338,7 +313,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -350,7 +324,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
{
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = 999,
.prog_fetch_lines_worst_case = 24,
@@ -359,7 +332,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
}, {
.name = "intf_4", .id = INTF_4,
.base = 0x6c000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -368,7 +340,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
}, {
.name = "intf_5", .id = INTF_5,
.base = 0x6c800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
@@ -383,6 +354,7 @@ static const struct dpu_perf_cfg sc8180x_perf_data = {
.min_core_ib = 2400000,
.min_llcc_ib = 800000,
.min_dram_ib = 800000,
+ .min_prefill_lines = 24,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
.safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
.qos_lut_tbl = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
index 71ba48b05656..a56c288ac10c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
@@ -23,7 +23,6 @@ static const struct dpu_caps sm7150_dpu_caps = {
static const struct dpu_mdp_cfg sm7150_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = BIT(DPU_MDP_AUDIO_SELECT),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -38,32 +37,26 @@ static const struct dpu_ctl_cfg sm7150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -116,7 +109,7 @@ static const struct dpu_lm_cfg sm7150_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -124,7 +117,7 @@ static const struct dpu_lm_cfg sm7150_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -132,14 +125,14 @@ static const struct dpu_lm_cfg sm7150_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -150,12 +143,10 @@ static const struct dpu_dspp_cfg sm7150_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -164,28 +155,24 @@ static const struct dpu_pingpong_cfg sm7150_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
@@ -206,11 +193,9 @@ static const struct dpu_dsc_cfg sm7150_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_1", .id = DSC_1,
.base = 0x80400, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -218,7 +203,6 @@ static const struct dpu_intf_cfg sm7150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -227,7 +211,6 @@ static const struct dpu_intf_cfg sm7150_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -237,7 +220,6 @@ static const struct dpu_intf_cfg sm7150_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x2bc,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -247,7 +229,6 @@ static const struct dpu_intf_cfg sm7150_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -260,7 +241,7 @@ static const struct dpu_wb_cfg sm7150_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
index da11830d4407..26883f6b66b3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
@@ -20,7 +20,6 @@ static const struct dpu_caps sm6150_dpu_caps = {
static const struct dpu_mdp_cfg sm6150_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = 0,
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
@@ -35,32 +34,26 @@ static const struct dpu_ctl_cfg sm6150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -113,20 +106,17 @@ static const struct dpu_lm_cfg sm6150_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_1,
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_2,
},
@@ -136,7 +126,6 @@ static const struct dpu_dspp_cfg sm6150_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -145,19 +134,16 @@ static const struct dpu_pingpong_cfg sm6150_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
},
@@ -167,7 +153,7 @@ static const struct dpu_wb_cfg sm6150_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -182,7 +168,6 @@ static const struct dpu_intf_cfg sm6150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -191,7 +176,6 @@ static const struct dpu_intf_cfg sm6150_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -201,7 +185,6 @@ static const struct dpu_intf_cfg sm6150_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index fcfb3774f7a1..fbf50f279e66 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -22,7 +22,6 @@ static const struct dpu_caps sm6125_dpu_caps = {
static const struct dpu_mdp_cfg sm6125_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
- .features = 0,
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
@@ -35,32 +34,26 @@ static const struct dpu_ctl_cfg sm6125_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -97,7 +90,6 @@ static const struct dpu_lm_cfg sm6125_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
@@ -105,7 +97,6 @@ static const struct dpu_lm_cfg sm6125_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_1,
.dspp = 0,
@@ -117,7 +108,6 @@ static const struct dpu_dspp_cfg sm6125_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -126,14 +116,12 @@ static const struct dpu_pingpong_cfg sm6125_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.merge_3d = 0,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.merge_3d = 0,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
@@ -144,7 +132,7 @@ static const struct dpu_wb_cfg sm6125_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -159,7 +147,6 @@ static const struct dpu_intf_cfg sm6125_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -168,7 +155,6 @@ static const struct dpu_intf_cfg sm6125_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = 0,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index a86fdb33ebdd..7b8b7a1c2d76 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -39,32 +39,26 @@ static const struct dpu_ctl_cfg sm8250_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -141,7 +135,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -149,7 +143,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -157,7 +151,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -165,7 +159,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -173,14 +167,14 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -191,22 +185,18 @@ static const struct dpu_dspp_cfg sm8250_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -215,42 +205,36 @@ static const struct dpu_pingpong_cfg sm8250_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x72000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x72800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
@@ -274,19 +258,15 @@ static const struct dpu_dsc_cfg sm8250_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_1", .id = DSC_1,
.base = 0x80400, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_2", .id = DSC_2,
.base = 0x80800, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_3", .id = DSC_3,
.base = 0x80c00, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -294,7 +274,6 @@ static const struct dpu_intf_cfg sm8250_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -303,7 +282,6 @@ static const struct dpu_intf_cfg sm8250_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -313,7 +291,6 @@ static const struct dpu_intf_cfg sm8250_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -323,7 +300,6 @@ static const struct dpu_intf_cfg sm8250_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -336,7 +312,7 @@ static const struct dpu_wb_cfg sm8250_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
index 842fcc5887fe..c990ba3b5db0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -32,17 +32,14 @@ static const struct dpu_ctl_cfg sc7180_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
},
};
@@ -87,7 +84,7 @@ static const struct dpu_lm_cfg sc7180_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -95,7 +92,7 @@ static const struct dpu_lm_cfg sc7180_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -106,7 +103,6 @@ static const struct dpu_dspp_cfg sc7180_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -115,14 +111,12 @@ static const struct dpu_pingpong_cfg sc7180_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
@@ -133,7 +127,6 @@ static const struct dpu_intf_cfg sc7180_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -142,7 +135,6 @@ static const struct dpu_intf_cfg sc7180_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -156,7 +148,7 @@ static const struct dpu_wb_cfg sc7180_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
index c5fd89dd7c89..343ff5482382 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
@@ -29,7 +29,6 @@ static const struct dpu_ctl_cfg sm6115_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
};
@@ -58,7 +57,6 @@ static const struct dpu_lm_cfg sm6115_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &qcm2290_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
@@ -69,7 +67,6 @@ static const struct dpu_dspp_cfg sm6115_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -78,7 +75,6 @@ static const struct dpu_pingpong_cfg sm6115_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
@@ -89,7 +85,6 @@ static const struct dpu_intf_cfg sm6115_intf[] = {
{
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index a234bb289d24..093d16bdc450 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -35,22 +35,18 @@ static const struct dpu_ctl_cfg sm6350_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
},
};
@@ -95,7 +91,7 @@ static const struct dpu_lm_cfg sm6350_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -103,7 +99,7 @@ static const struct dpu_lm_cfg sm6350_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -115,7 +111,6 @@ static const struct dpu_dspp_cfg sm6350_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -124,14 +119,12 @@ static struct dpu_pingpong_cfg sm6350_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
@@ -142,7 +135,6 @@ static const struct dpu_dsc_cfg sm6350_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -150,7 +142,7 @@ static const struct dpu_wb_cfg sm6350_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -165,7 +157,6 @@ static const struct dpu_intf_cfg sm6350_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
- .features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 35,
@@ -174,7 +165,6 @@ static const struct dpu_intf_cfg sm6350_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 35,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
index 53f3be28f6f6..47053bf9b0a2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
@@ -29,7 +29,6 @@ static const struct dpu_ctl_cfg qcm2290_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
};
@@ -58,7 +57,6 @@ static const struct dpu_lm_cfg qcm2290_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &qcm2290_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
@@ -69,7 +67,6 @@ static const struct dpu_dspp_cfg qcm2290_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -78,7 +75,6 @@ static const struct dpu_pingpong_cfg qcm2290_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
@@ -89,7 +85,6 @@ static const struct dpu_intf_cfg qcm2290_intf[] = {
{
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
index 3a3bc8e429be..98190ee7ec7a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
@@ -30,7 +30,6 @@ static const struct dpu_ctl_cfg sm6375_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
};
@@ -59,7 +58,6 @@ static const struct dpu_lm_cfg sm6375_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_QCM2290_MASK,
.sblk = &qcm2290_lm_sblk,
.lm_pair = 0,
.pingpong = PINGPONG_0,
@@ -71,7 +69,6 @@ static const struct dpu_dspp_cfg sm6375_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -80,7 +77,6 @@ static const struct dpu_pingpong_cfg sm6375_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
@@ -91,7 +87,6 @@ static const struct dpu_dsc_cfg sm6375_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
- .features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
@@ -99,7 +94,6 @@ static const struct dpu_intf_cfg sm6375_intf[] = {
{
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
- .features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index 90e86063a372..85aae40c210f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -39,32 +39,26 @@ static const struct dpu_ctl_cfg sm8350_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -141,7 +135,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -149,7 +143,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -157,7 +151,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -165,7 +159,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -173,14 +167,14 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -191,22 +185,18 @@ static const struct dpu_dspp_cfg sm8350_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -215,42 +205,36 @@ static const struct dpu_pingpong_cfg sm8350_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
@@ -279,22 +263,20 @@ static const struct dpu_dsc_cfg sm8350_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
},
};
@@ -303,7 +285,7 @@ static const struct dpu_wb_cfg sm8350_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -318,7 +300,6 @@ static const struct dpu_intf_cfg sm8350_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -327,7 +308,6 @@ static const struct dpu_intf_cfg sm8350_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x2c4,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -337,7 +317,6 @@ static const struct dpu_intf_cfg sm8350_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x2c4,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -347,7 +326,6 @@ static const struct dpu_intf_cfg sm8350_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
index e9625c48c567..8f978b9c3452 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
@@ -32,22 +32,18 @@ static const struct dpu_ctl_cfg sc7280_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
},
};
@@ -92,21 +88,21 @@ static const struct dpu_lm_cfg sc7280_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sc7180_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -117,7 +113,6 @@ static const struct dpu_dspp_cfg sc7280_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -126,28 +121,24 @@ static const struct dpu_pingpong_cfg sc7280_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
@@ -159,7 +150,7 @@ static const struct dpu_dsc_cfg sc7280_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
},
};
@@ -168,7 +159,7 @@ static const struct dpu_wb_cfg sc7280_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -183,7 +174,6 @@ static const struct dpu_intf_cfg sc7280_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -192,7 +182,6 @@ static const struct dpu_intf_cfg sc7280_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x2c4,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -202,7 +191,6 @@ static const struct dpu_intf_cfg sc7280_intf[] = {
}, {
.name = "intf_5", .id = INTF_5,
.base = 0x39000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index 139f11321fea..303d33dc7783 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -21,7 +21,6 @@ static const struct dpu_caps sc8280xp_dpu_caps = {
static const struct dpu_mdp_cfg sc8280xp_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -39,32 +38,26 @@ static const struct dpu_ctl_cfg sc8280xp_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -141,7 +134,7 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -149,7 +142,7 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -157,7 +150,7 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -165,7 +158,7 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -173,14 +166,14 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -191,22 +184,18 @@ static const struct dpu_dspp_cfg sc8280xp_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -215,42 +204,36 @@ static const struct dpu_pingpong_cfg sc8280xp_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
@@ -279,32 +262,28 @@ static const struct dpu_dsc_cfg sc8280xp_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_2_0", .id = DSC_4,
.base = 0x82000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_2_1", .id = DSC_5,
.base = 0x82000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
},
};
@@ -314,7 +293,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -323,7 +301,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -333,7 +310,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -343,7 +319,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -352,7 +327,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_4", .id = INTF_4,
.base = 0x38000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -361,7 +335,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_5", .id = INTF_5,
.base = 0x39000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_3,
.prog_fetch_lines_worst_case = 24,
@@ -370,7 +343,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_6", .id = INTF_6,
.base = 0x3a000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
@@ -379,7 +351,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
@@ -388,7 +359,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
}, {
.name = "intf_8", .id = INTF_8,
.base = 0x3c000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index 461294143a90..b09a6af4c474 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -21,7 +21,6 @@ static const struct dpu_caps sm8450_dpu_caps = {
static const struct dpu_mdp_cfg sm8450_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -40,32 +39,26 @@ static const struct dpu_ctl_cfg sm8450_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -142,7 +135,7 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -150,7 +143,7 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -158,7 +151,7 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -166,7 +159,7 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -174,14 +167,14 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -192,22 +185,18 @@ static const struct dpu_dspp_cfg sm8450_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -216,55 +205,47 @@ static const struct dpu_pingpong_cfg sm8450_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x65800, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x65c00, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
},
@@ -295,22 +276,20 @@ static const struct dpu_dsc_cfg sm8450_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
},
};
@@ -319,7 +298,7 @@ static const struct dpu_wb_cfg sm8450_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -334,7 +313,6 @@ static const struct dpu_intf_cfg sm8450_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -343,7 +321,6 @@ static const struct dpu_intf_cfg sm8450_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -353,7 +330,6 @@ static const struct dpu_intf_cfg sm8450_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -363,7 +339,6 @@ static const struct dpu_intf_cfg sm8450_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
index c248b3b55c41..0f7b4a224e4c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
@@ -20,7 +20,6 @@ static const struct dpu_caps sa8775p_dpu_caps = {
static const struct dpu_mdp_cfg sa8775p_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
@@ -39,32 +38,26 @@ static const struct dpu_ctl_cfg sa8775p_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x204,
- .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -141,7 +134,7 @@ static const struct dpu_lm_cfg sa8775p_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -149,7 +142,7 @@ static const struct dpu_lm_cfg sa8775p_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -157,7 +150,7 @@ static const struct dpu_lm_cfg sa8775p_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -165,7 +158,7 @@ static const struct dpu_lm_cfg sa8775p_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -173,14 +166,14 @@ static const struct dpu_lm_cfg sa8775p_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x400,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -191,22 +184,18 @@ static const struct dpu_dspp_cfg sa8775p_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -215,55 +204,47 @@ static const struct dpu_pingpong_cfg sa8775p_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_6", .id = PINGPONG_CWB_0,
.base = 0x65800, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_7", .id = PINGPONG_CWB_1,
.base = 0x65c00, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
},
@@ -294,32 +275,28 @@ static const struct dpu_dsc_cfg sa8775p_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_2_0", .id = DSC_4,
.base = 0x82000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_2_1", .id = DSC_5,
.base = 0x82000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
},
};
@@ -328,7 +305,7 @@ static const struct dpu_wb_cfg sa8775p_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
@@ -344,7 +321,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -353,7 +329,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -363,7 +338,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -373,7 +347,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
@@ -382,7 +355,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_4", .id = INTF_4,
.base = 0x38000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -391,7 +363,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_6", .id = INTF_6,
.base = 0x3A000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
@@ -400,7 +371,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
@@ -409,7 +379,6 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
}, {
.name = "intf_8", .id = INTF_8,
.base = 0x3c000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index 59c7fdf28e89..465b6460f875 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -21,7 +21,6 @@ static const struct dpu_caps sm8550_dpu_caps = {
static const struct dpu_mdp_cfg sm8550_mdp = {
.name = "top_0",
.base = 0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
@@ -31,32 +30,26 @@ static const struct dpu_ctl_cfg sm8550_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -139,7 +132,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -147,7 +140,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -155,7 +148,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -163,7 +156,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -171,14 +164,14 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -189,22 +182,18 @@ static const struct dpu_dspp_cfg sm8550_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -212,55 +201,47 @@ static const struct dpu_pingpong_cfg sm8550_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
},
@@ -291,22 +272,20 @@ static const struct dpu_dsc_cfg sm8550_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
},
};
@@ -315,7 +294,7 @@ static const struct dpu_wb_cfg sm8550_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
@@ -329,7 +308,6 @@ static const struct dpu_intf_cfg sm8550_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -338,7 +316,6 @@ static const struct dpu_intf_cfg sm8550_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -348,7 +325,6 @@ static const struct dpu_intf_cfg sm8550_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -358,7 +334,6 @@ static const struct dpu_intf_cfg sm8550_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
index 5667d055fbd1..6caa7d40f368 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
@@ -21,7 +21,6 @@ static const struct dpu_caps sar2130p_dpu_caps = {
static const struct dpu_mdp_cfg sar2130p_mdp = {
.name = "top_0",
.base = 0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
@@ -31,32 +30,26 @@ static const struct dpu_ctl_cfg sar2130p_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -139,7 +132,7 @@ static const struct dpu_lm_cfg sar2130p_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -147,7 +140,7 @@ static const struct dpu_lm_cfg sar2130p_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -155,7 +148,7 @@ static const struct dpu_lm_cfg sar2130p_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -163,7 +156,7 @@ static const struct dpu_lm_cfg sar2130p_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -171,14 +164,14 @@ static const struct dpu_lm_cfg sar2130p_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -189,22 +182,18 @@ static const struct dpu_dspp_cfg sar2130p_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -212,55 +201,47 @@ static const struct dpu_pingpong_cfg sar2130p_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
},
@@ -291,22 +272,20 @@ static const struct dpu_dsc_cfg sar2130p_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
},
};
@@ -315,7 +294,7 @@ static const struct dpu_wb_cfg sar2130p_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
@@ -329,7 +308,6 @@ static const struct dpu_intf_cfg sar2130p_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -338,7 +316,6 @@ static const struct dpu_intf_cfg sar2130p_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -348,7 +325,6 @@ static const struct dpu_intf_cfg sar2130p_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -358,7 +334,6 @@ static const struct dpu_intf_cfg sar2130p_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index 52cc10aec1f9..7243eebb85f3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -20,7 +20,6 @@ static const struct dpu_caps x1e80100_dpu_caps = {
static const struct dpu_mdp_cfg x1e80100_mdp = {
.name = "top_0",
.base = 0, .len = 0x494,
- .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
@@ -30,32 +29,26 @@ static const struct dpu_ctl_cfg x1e80100_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x19000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a000, .len = 0x290,
- .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -138,7 +131,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
@@ -146,7 +139,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
@@ -154,7 +147,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
@@ -162,7 +155,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
@@ -170,14 +163,14 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
- .features = MIXER_SDM845_MASK,
+ .features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
.pingpong = PINGPONG_5,
@@ -188,22 +181,18 @@ static const struct dpu_dspp_cfg x1e80100_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_2", .id = DSPP_2,
.base = 0x58000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_3", .id = DSPP_3,
.base = 0x5a000, .len = 0x1800,
- .features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
@@ -212,55 +201,47 @@ static const struct dpu_pingpong_cfg x1e80100_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x69000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
- .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
},
@@ -291,22 +272,20 @@ static const struct dpu_dsc_cfg x1e80100_dsc[] = {
{
.name = "dce_0_0", .id = DSC_0,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_0_1", .id = DSC_1,
.base = 0x80000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2),
.sblk = &dsc_sblk_1,
}, {
.name = "dce_1_0", .id = DSC_2,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_0,
}, {
.name = "dce_1_1", .id = DSC_3,
.base = 0x81000, .len = 0x4,
- .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
.sblk = &dsc_sblk_1,
},
};
@@ -315,7 +294,7 @@ static const struct dpu_wb_cfg x1e80100_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SM8250_MASK,
+ .features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb_yuv,
.num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
@@ -330,7 +309,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x34000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -339,7 +317,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
@@ -349,7 +326,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x36000, .len = 0x300,
- .features = INTF_SC7280_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -359,7 +335,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
@@ -368,7 +343,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_4", .id = INTF_4,
.base = 0x38000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
@@ -377,7 +351,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_5", .id = INTF_5,
.base = 0x39000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_3,
.prog_fetch_lines_worst_case = 24,
@@ -386,7 +359,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_6", .id = INTF_6,
.base = 0x3A000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
@@ -395,7 +367,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_2, /* pair with intf_6 for DP MST */
.prog_fetch_lines_worst_case = 24,
@@ -404,7 +375,6 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
}, {
.name = "intf_8", .id = INTF_8,
.base = 0x3c000, .len = 0x280,
- .features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */
.prog_fetch_lines_worst_case = 24,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index a4b0fe0d9899..d4b545448d74 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -320,14 +320,22 @@ static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
}
static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
- struct dpu_plane_state *pstate, const struct msm_format *format)
+ struct dpu_plane_state *pstate,
+ const struct msm_format *format,
+ const struct dpu_mdss_version *mdss_ver)
{
struct dpu_hw_mixer *lm = mixer->hw_lm;
- uint32_t blend_op;
- uint32_t fg_alpha, bg_alpha;
+ u32 blend_op;
+ u32 fg_alpha, bg_alpha, max_alpha;
- fg_alpha = pstate->base.alpha >> 8;
- bg_alpha = 0xff - fg_alpha;
+ if (mdss_ver->core_major_ver < 12) {
+ max_alpha = 0xff;
+ fg_alpha = pstate->base.alpha >> 8;
+ } else {
+ max_alpha = 0x3ff;
+ fg_alpha = pstate->base.alpha >> 6;
+ }
+ bg_alpha = max_alpha - fg_alpha;
/* default to opaque blending */
if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
@@ -337,7 +345,7 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
} else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
DPU_BLEND_BG_ALPHA_FG_PIXEL;
- if (fg_alpha != 0xff) {
+ if (fg_alpha != max_alpha) {
bg_alpha = fg_alpha;
blend_op |= DPU_BLEND_BG_MOD_ALPHA |
DPU_BLEND_BG_INV_MOD_ALPHA;
@@ -348,7 +356,7 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
/* coverage blending */
blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
DPU_BLEND_BG_ALPHA_FG_PIXEL;
- if (fg_alpha != 0xff) {
+ if (fg_alpha != max_alpha) {
bg_alpha = fg_alpha;
blend_op |= DPU_BLEND_FG_MOD_ALPHA |
DPU_BLEND_FG_INV_MOD_ALPHA |
@@ -402,7 +410,7 @@ static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc,
struct dpu_hw_stage_cfg *stage_cfg
)
{
- uint32_t lm_idx;
+ u32 lm_idx;
enum dpu_sspp sspp_idx;
struct drm_plane_state *state;
@@ -442,12 +450,13 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct dpu_plane_state *pstate = NULL;
const struct msm_format *format;
struct dpu_hw_ctl *ctl = mixer->lm_ctl;
-
- uint32_t lm_idx;
+ u32 lm_idx;
bool bg_alpha_enable = false;
DECLARE_BITMAP(active_fetch, SSPP_MAX);
+ DECLARE_BITMAP(active_pipes, SSPP_MAX);
memset(active_fetch, 0, sizeof(active_fetch));
+ memset(active_pipes, 0, sizeof(active_pipes));
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
if (!state)
@@ -465,6 +474,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
bg_alpha_enable = true;
set_bit(pstate->pipe.sspp->idx, active_fetch);
+ set_bit(pstate->pipe.sspp->idx, active_pipes);
_dpu_crtc_blend_setup_pipe(crtc, plane,
mixer, cstate->num_mixers,
pstate->stage,
@@ -473,6 +483,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
if (pstate->r_pipe.sspp) {
set_bit(pstate->r_pipe.sspp->idx, active_fetch);
+ set_bit(pstate->r_pipe.sspp->idx, active_pipes);
_dpu_crtc_blend_setup_pipe(crtc, plane,
mixer, cstate->num_mixers,
pstate->stage,
@@ -482,7 +493,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
/* blend config update */
for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
- _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format);
+ _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format,
+ ctl->mdss_ver);
if (bg_alpha_enable && !format->alpha_enable)
mixer[lm_idx].mixer_op_mode = 0;
@@ -495,6 +507,9 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
if (ctl->ops.set_active_fetch_pipes)
ctl->ops.set_active_fetch_pipes(ctl, active_fetch);
+ if (ctl->ops.set_active_pipes)
+ ctl->ops.set_active_pipes(ctl, active_pipes);
+
_dpu_crtc_program_lm_output_roi(crtc);
}
@@ -510,6 +525,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
struct dpu_hw_ctl *ctl;
struct dpu_hw_mixer *lm;
struct dpu_hw_stage_cfg stage_cfg;
+ DECLARE_BITMAP(active_lms, LM_MAX);
int i;
DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
@@ -521,10 +537,16 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
mixer[i].lm_ctl);
if (mixer[i].lm_ctl->ops.set_active_fetch_pipes)
mixer[i].lm_ctl->ops.set_active_fetch_pipes(mixer[i].lm_ctl, NULL);
+ if (mixer[i].lm_ctl->ops.set_active_pipes)
+ mixer[i].lm_ctl->ops.set_active_pipes(mixer[i].lm_ctl, NULL);
+
+ if (mixer[i].hw_lm->ops.clear_all_blendstages)
+ mixer[i].hw_lm->ops.clear_all_blendstages(mixer[i].hw_lm);
}
/* initialize stage cfg */
memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+ memset(active_lms, 0, sizeof(active_lms));
_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg);
@@ -538,13 +560,22 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
ctl->ops.update_pending_flush_mixer(ctl,
mixer[i].hw_lm->idx);
+ set_bit(lm->idx, active_lms);
+ if (ctl->ops.set_active_lms)
+ ctl->ops.set_active_lms(ctl, active_lms);
+
DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n",
mixer[i].hw_lm->idx - LM_0,
mixer[i].mixer_op_mode,
ctl->idx - CTL_0);
- ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
- &stage_cfg);
+ if (ctl->ops.setup_blendstage)
+ ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+ &stage_cfg);
+
+ if (lm->ops.setup_blendstage)
+ lm->ops.setup_blendstage(lm, mixer[i].hw_lm->idx,
+ &stage_cfg);
}
}
@@ -711,7 +742,7 @@ void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event)
fevent->event = event;
fevent->crtc = crtc;
fevent->ts = ktime_get();
- kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
+ kthread_queue_work(priv->kms->event_thread[crtc_id].worker, &fevent->work);
}
/**
@@ -880,7 +911,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
dev = crtc->dev;
priv = dev->dev_private;
- if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ if (crtc->index >= ARRAY_SIZE(priv->kms->event_thread)) {
DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index c0ed110a7d30..05e5f3463e30 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -264,7 +264,7 @@ bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc)
mode = &phys_enc->cached_mode;
return phys_enc->hw_intf->cap->type == INTF_DP &&
- msm_dp_needs_periph_flush(priv->dp[disp_info->h_tile_instance[0]], mode);
+ msm_dp_needs_periph_flush(priv->kms->dp[disp_info->h_tile_instance[0]], mode);
}
/**
@@ -283,9 +283,9 @@ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
index = disp_info->h_tile_instance[0];
if (disp_info->intf_type == INTF_DP)
- return msm_dp_wide_bus_available(priv->dp[index]);
+ return msm_dp_wide_bus_available(priv->kms->dp[index]);
else if (disp_info->intf_type == INTF_DSI)
- return msm_dsi_wide_bus_enabled(priv->dsi[index]);
+ return msm_dsi_wide_bus_enabled(priv->kms->dsi[index]);
return false;
}
@@ -647,7 +647,7 @@ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
int index = dpu_enc->disp_info.h_tile_instance[0];
if (dpu_enc->disp_info.intf_type == INTF_DSI)
- return msm_dsi_get_dsc_config(priv->dsi[index]);
+ return msm_dsi_get_dsc_config(priv->kms->dsi[index]);
return NULL;
}
@@ -709,7 +709,8 @@ void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
topology->num_cdm++;
} else if (disp_info->intf_type == INTF_DP) {
- if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
+ if (msm_dp_is_yuv_420_enabled(priv->kms->dp[disp_info->h_tile_instance[0]],
+ adj_mode))
topology->num_cdm++;
}
}
@@ -980,7 +981,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
return 0;
}
- queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
+ queue_delayed_work(priv->kms->wq, &dpu_enc->delayed_off_work,
msecs_to_jiffies(dpu_enc->idle_timeout));
trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
@@ -2195,8 +2196,17 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
if (ctl->ops.setup_blendstage)
ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
+ if (hw_mixer[i]->ops.clear_all_blendstages)
+ hw_mixer[i]->ops.clear_all_blendstages(hw_mixer[i]);
+
+ if (ctl->ops.set_active_lms)
+ ctl->ops.set_active_lms(ctl, NULL);
+
if (ctl->ops.set_active_fetch_pipes)
ctl->ops.set_active_fetch_pipes(ctl, NULL);
+
+ if (ctl->ops.set_active_pipes)
+ ctl->ops.set_active_pipes(ctl, NULL);
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index a0ba55ab3c89..0ec6d67c7c70 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -69,7 +69,8 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
/* setup which pp blk will connect to this intf */
- if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
+ if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5 &&
+ phys_enc->hw_intf->ops.bind_pingpong_blk)
phys_enc->hw_intf->ops.bind_pingpong_blk(
phys_enc->hw_intf,
phys_enc->hw_pp->idx);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 1c468ca5d692..0ba777bda253 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -313,8 +313,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
- &timing_params, fmt,
- phys_enc->dpu_kms->catalog->mdss_ver);
+ &timing_params, fmt);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
/* setup which pp blk will connect to this intf */
@@ -378,7 +377,7 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg)
static bool dpu_encoder_phys_vid_needs_single_flush(
struct dpu_encoder_phys *phys_enc)
{
- return !(phys_enc->hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG)) &&
+ return !(phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5) &&
phys_enc->split_role != ENC_ROLE_SOLO;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 849fea580a4c..56a5b596554d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -218,7 +218,6 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb;
- struct dpu_hw_ctl *ctl;
struct dpu_hw_cdm *hw_cdm;
if (!phys_enc) {
@@ -227,10 +226,9 @@ static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc)
}
hw_wb = phys_enc->hw_wb;
- ctl = phys_enc->hw_ctl;
hw_cdm = phys_enc->hw_cdm;
- if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) &&
+ if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5 &&
(phys_enc->hw_ctl &&
phys_enc->hw_ctl->ops.setup_intf_cfg)) {
struct dpu_hw_intf_cfg intf_cfg = {0};
@@ -534,7 +532,6 @@ static void dpu_encoder_phys_wb_enable(struct dpu_encoder_phys *phys_enc)
static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
- struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
@@ -556,7 +553,7 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
* WB support is added to those targets will need to add
* the legacy teardown sequence as well.
*/
- if (hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG))
+ if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5)
dpu_encoder_helper_phys_cleanup(phys_enc);
phys_enc->enable_state = DPU_ENC_DISABLED;
@@ -566,7 +563,6 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc
struct drm_writeback_job *job)
{
const struct msm_format *format;
- struct msm_gem_address_space *aspace;
struct dpu_hw_wb_cfg *wb_cfg;
int ret;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
@@ -576,13 +572,12 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc
wb_enc->wb_job = job;
wb_enc->wb_conn = job->connector;
- aspace = phys_enc->dpu_kms->base.aspace;
wb_cfg = &wb_enc->wb_cfg;
memset(wb_cfg, 0, sizeof(struct dpu_hw_wb_cfg));
- ret = msm_framebuffer_prepare(job->fb, aspace, false);
+ ret = msm_framebuffer_prepare(job->fb, false);
if (ret) {
DPU_ERROR("prep fb failed, %d\n", ret);
return;
@@ -596,7 +591,7 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc
return;
}
- dpu_format_populate_addrs(aspace, job->fb, &wb_cfg->dest);
+ dpu_format_populate_addrs(job->fb, &wb_cfg->dest);
wb_cfg->dest.width = job->fb->width;
wb_cfg->dest.height = job->fb->height;
@@ -619,14 +614,11 @@ static void dpu_encoder_phys_wb_cleanup_wb_job(struct dpu_encoder_phys *phys_enc
struct drm_writeback_job *job)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
- struct msm_gem_address_space *aspace;
if (!job->fb)
return;
- aspace = phys_enc->dpu_kms->base.aspace;
-
- msm_framebuffer_cleanup(job->fb, aspace, false);
+ msm_framebuffer_cleanup(job->fb, false);
wb_enc->wb_job = NULL;
wb_enc->wb_conn = NULL;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 59c9427da7dd..b0d585c5315c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -274,15 +274,14 @@ int dpu_format_populate_plane_sizes(
return _dpu_format_populate_plane_sizes_linear(fmt, fb, layout);
}
-static void _dpu_format_populate_addrs_ubwc(struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
+static void _dpu_format_populate_addrs_ubwc(struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout)
{
const struct msm_format *fmt;
uint32_t base_addr = 0;
bool meta;
- base_addr = msm_framebuffer_iova(fb, aspace, 0);
+ base_addr = msm_framebuffer_iova(fb, 0);
fmt = msm_framebuffer_format(fb);
meta = MSM_FORMAT_IS_UBWC(fmt);
@@ -355,26 +354,23 @@ static void _dpu_format_populate_addrs_ubwc(struct msm_gem_address_space *aspace
}
}
-static void _dpu_format_populate_addrs_linear(struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
+static void _dpu_format_populate_addrs_linear(struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout)
{
unsigned int i;
/* Populate addresses for simple formats here */
for (i = 0; i < layout->num_planes; ++i)
- layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
-}
+ layout->plane_addr[i] = msm_framebuffer_iova(fb, i);
+ }
/**
* dpu_format_populate_addrs - populate buffer addresses based on
* mmu, fb, and format found in the fb
- * @aspace: address space pointer
* @fb: framebuffer pointer
* @layout: format layout structure to populate
*/
-void dpu_format_populate_addrs(struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
+void dpu_format_populate_addrs(struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout)
{
const struct msm_format *fmt;
@@ -384,7 +380,7 @@ void dpu_format_populate_addrs(struct msm_gem_address_space *aspace,
/* Populate the addresses given the fb */
if (MSM_FORMAT_IS_UBWC(fmt) ||
MSM_FORMAT_IS_TILE(fmt))
- _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
+ _dpu_format_populate_addrs_ubwc(fb, layout);
else
- _dpu_format_populate_addrs_linear(aspace, fb, layout);
+ _dpu_format_populate_addrs_linear(fb, layout);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
index c6145d43aa3f..dc03f522e616 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -31,8 +31,7 @@ static inline bool dpu_find_format(u32 format, const u32 *supported_formats,
return false;
}
-void dpu_format_populate_addrs(struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
+void dpu_format_populate_addrs(struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout);
int dpu_format_populate_plane_sizes(
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index c878fe196aeb..e824cd64fd3f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -35,12 +35,12 @@
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
#define VIG_SDM845_MASK_NO_SDMA \
- (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
#define VIG_SDM845_MASK_SDMA \
(VIG_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2))
-#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
+#define VIG_QCM2290_MASK (VIG_BASE_MASK)
#define DMA_MSM8953_MASK \
(BIT(DPU_SSPP_QOS))
@@ -60,7 +60,7 @@
(VIG_SC7280_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
#define DMA_SDM845_MASK_NO_SDMA \
- (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
+ (BIT(DPU_SSPP_QOS) | \
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
@@ -89,39 +89,6 @@
#define MIXER_MSM8998_MASK \
(BIT(DPU_MIXER_SOURCESPLIT))
-#define MIXER_SDM845_MASK \
- (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
-
-#define MIXER_QCM2290_MASK \
- (BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
-
-#define PINGPONG_MSM8996_MASK \
- (BIT(DPU_PINGPONG_DSC))
-
-#define PINGPONG_SDM845_MASK \
- (BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
-
-#define PINGPONG_SM8150_MASK \
- (BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
-
-#define CTL_SC7280_MASK \
- (BIT(DPU_CTL_ACTIVE_CFG) | \
- BIT(DPU_CTL_FETCH_ACTIVE) | \
- BIT(DPU_CTL_VM_CFG) | \
- BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
-
-#define CTL_SM8550_MASK \
- (CTL_SC7280_MASK | BIT(DPU_CTL_HAS_LAYER_EXT4))
-
-#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
-
-#define INTF_SC7180_MASK \
- (BIT(DPU_INTF_INPUT_CTRL) | \
- BIT(DPU_INTF_STATUS_SUPPORTED) | \
- BIT(DPU_DATA_HCTL_EN))
-
-#define INTF_SC7280_MASK (INTF_SC7180_MASK)
-
#define WB_SDM845_MASK (BIT(DPU_WB_LINE_MODE) | \
BIT(DPU_WB_UBWC) | \
BIT(DPU_WB_YUV_CONFIG) | \
@@ -131,9 +98,6 @@
BIT(DPU_WB_QOS_8LVL) | \
BIT(DPU_WB_CDP))
-#define WB_SM8250_MASK (WB_SDM845_MASK | \
- BIT(DPU_WB_INPUT_CTRL))
-
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
#define DEFAULT_DPU_LINE_WIDTH 2048
#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
@@ -362,6 +326,9 @@ static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_2 =
static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_3 =
_VIG_SBLK(SSPP_SCALER_VER(3, 3));
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_4 =
+ _VIG_SBLK(SSPP_SCALER_VER(3, 4));
+
static const struct dpu_sspp_sub_blks dpu_rgb_sblk = _RGB_SBLK();
static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK();
@@ -396,6 +363,16 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
},
};
+static const struct dpu_lm_sub_blks sm8750_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 11, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ /* 0x40 + n*0x30 */
+ 0x40, 0x70, 0xa0, 0xd0, 0x100, 0x130, 0x160, 0x190, 0x1c0,
+ 0x1f0, 0x220
+ },
+};
+
static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
.maxwidth = DEFAULT_DPU_LINE_WIDTH,
.maxblendstages = 4, /* excluding base layer */
@@ -417,6 +394,11 @@ static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
.len = 0x90, .version = 0x40000},
};
+static const struct dpu_dspp_sub_blks sm8750_dspp_sblk = {
+ .pcc = {.name = "pcc", .base = 0x1700,
+ .len = 0x90, .version = 0x60000},
+};
+
/*************************************************************
* PINGPONG sub blocks config
*************************************************************/
@@ -448,6 +430,16 @@ static const struct dpu_dsc_sub_blks dsc_sblk_1 = {
.ctl = {.name = "ctl", .base = 0xF80, .len = 0x10},
};
+static const struct dpu_dsc_sub_blks sm8750_dsc_sblk_0 = {
+ .enc = {.name = "enc", .base = 0x100, .len = 0x100},
+ .ctl = {.name = "ctl", .base = 0xF00, .len = 0x24},
+};
+
+static const struct dpu_dsc_sub_blks sm8750_dsc_sblk_1 = {
+ .enc = {.name = "enc", .base = 0x200, .len = 0x100},
+ .ctl = {.name = "ctl", .base = 0xF80, .len = 0x24},
+};
+
/*************************************************************
* CDM block config
*************************************************************/
@@ -738,3 +730,4 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_9_2_x1e80100.h"
#include "catalog/dpu_10_0_sm8650.h"
+#include "catalog/dpu_12_0_sm8750.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 01dd6e65f777..a78bb2c334e3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -29,26 +29,6 @@
#define MAX_XIN_COUNT 16
/**
- * MDP TOP BLOCK features
- * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be done per pipe
- * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
- * @DPU_MDP_PERIPH_0_REMOVED Indicates that access to periph top0 block results
- * in a failure
- * @DPU_MDP_VSYNC_SEL Enables vsync source selection via MDP_VSYNC_SEL register
- * (moved into INTF block since DPU 5.0.0)
- * @DPU_MDP_MAX Maximum value
-
- */
-enum {
- DPU_MDP_PANIC_PER_PIPE = 0x1,
- DPU_MDP_10BIT_SUPPORT,
- DPU_MDP_AUDIO_SELECT,
- DPU_MDP_PERIPH_0_REMOVED,
- DPU_MDP_VSYNC_SEL,
- DPU_MDP_MAX
-};
-
-/**
* SSPP sub-blocks/features
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
* @DPU_SSPP_SCALER_QSEED3_COMPATIBLE, QSEED3-compatible alogorithm support (includes QSEED3, QSEED3LITE and QSEED4)
@@ -57,7 +37,6 @@ enum {
* @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
* @DPU_SSPP_CURSOR, SSPP can be used as a cursor layer
* @DPU_SSPP_QOS, SSPP support QoS control, danger/safe/creq
- * @DPU_SSPP_QOS_8LVL, SSPP support 8-level QoS control
* @DPU_SSPP_EXCL_RECT, SSPP supports exclusion rect
* @DPU_SSPP_SMART_DMA_V1, SmartDMA 1.0 support
* @DPU_SSPP_SMART_DMA_V2, SmartDMA 2.0 support
@@ -75,7 +54,6 @@ enum {
DPU_SSPP_CSC_10BIT,
DPU_SSPP_CURSOR,
DPU_SSPP_QOS,
- DPU_SSPP_QOS_8LVL,
DPU_SSPP_EXCL_RECT,
DPU_SSPP_SMART_DMA_V1,
DPU_SSPP_SMART_DMA_V2,
@@ -88,20 +66,12 @@ enum {
/*
* MIXER sub-blocks/features
- * @DPU_MIXER_LAYER Layer mixer layer blend configuration,
* @DPU_MIXER_SOURCESPLIT Layer mixer supports source-split configuration
- * @DPU_MIXER_GC Gamma correction block
- * @DPU_DIM_LAYER Layer mixer supports dim layer
- * @DPU_MIXER_COMBINED_ALPHA Layer mixer has combined alpha register
* @DPU_MIXER_MAX maximum value
*/
enum {
- DPU_MIXER_LAYER = 0x1,
- DPU_MIXER_SOURCESPLIT,
- DPU_MIXER_GC,
- DPU_DIM_LAYER,
- DPU_MIXER_COMBINED_ALPHA,
- DPU_MIXER_MAX
+ DPU_MIXER_SOURCESPLIT = 0x1,
+ DPU_MIXER_MAX,
};
/**
@@ -114,57 +84,16 @@ enum {
};
/**
- * PINGPONG sub-blocks
- * @DPU_PINGPONG_SPLIT PP block supports split fifo
- * @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
- * @DPU_PINGPONG_DITHER Dither blocks
- * @DPU_PINGPONG_DSC PP block supports DSC
- * @DPU_PINGPONG_MAX
- */
-enum {
- DPU_PINGPONG_SPLIT = 0x1,
- DPU_PINGPONG_SLAVE,
- DPU_PINGPONG_DITHER,
- DPU_PINGPONG_DSC,
- DPU_PINGPONG_MAX
-};
-
-/**
* CTL sub-blocks
* @DPU_CTL_SPLIT_DISPLAY: CTL supports video mode split display
- * @DPU_CTL_FETCH_ACTIVE: Active CTL for fetch HW (SSPPs)
- * @DPU_CTL_VM_CFG: CTL config to support multiple VMs
- * @DPU_CTL_HAS_LAYER_EXT4: CTL has the CTL_LAYER_EXT4 register
- * @DPU_CTL_DSPP_BLOCK_FLUSH: CTL config to support dspp sub-block flush
* @DPU_CTL_MAX
*/
enum {
DPU_CTL_SPLIT_DISPLAY = 0x1,
- DPU_CTL_ACTIVE_CFG,
- DPU_CTL_FETCH_ACTIVE,
- DPU_CTL_VM_CFG,
- DPU_CTL_HAS_LAYER_EXT4,
- DPU_CTL_DSPP_SUB_BLOCK_FLUSH,
DPU_CTL_MAX
};
/**
- * INTF sub-blocks
- * @DPU_INTF_INPUT_CTRL Supports the setting of pp block from which
- * pixel data arrives to this INTF
- * @DPU_DATA_HCTL_EN Allows data to be transferred at different rate
- * than video timing
- * @DPU_INTF_STATUS_SUPPORTED INTF block has INTF_STATUS register
- * @DPU_INTF_MAX
- */
-enum {
- DPU_INTF_INPUT_CTRL = 0x1,
- DPU_DATA_HCTL_EN,
- DPU_INTF_STATUS_SUPPORTED,
- DPU_INTF_MAX
-};
-
-/**
* WB sub-blocks and features
* @DPU_WB_LINE_MODE Writeback module supports line/linear mode
* @DPU_WB_BLOCK_MODE Writeback module supports block mode read
@@ -180,8 +109,6 @@ enum {
* @DPU_WB_QOS, Writeback supports QoS control, danger/safe/creq
* @DPU_WB_QOS_8LVL, Writeback supports 8-level QoS control
* @DPU_WB_CDP Writeback supports client driven prefetch
- * @DPU_WB_INPUT_CTRL Writeback supports from which pp block input pixel
- * data arrives.
* @DPU_WB_CROP CWB supports cropping
* @DPU_WB_MAX maximum value
*/
@@ -195,7 +122,6 @@ enum {
DPU_WB_QOS,
DPU_WB_QOS_8LVL,
DPU_WB_CDP,
- DPU_WB_INPUT_CTRL,
DPU_WB_CROP,
DPU_WB_MAX
};
@@ -214,16 +140,11 @@ enum {
/**
* DSC sub-blocks/features
- * @DPU_DSC_OUTPUT_CTRL Configure which PINGPONG block gets
- * the pixel output from this DSC.
- * @DPU_DSC_HW_REV_1_2 DSC block supports DSC 1.1 and 1.2
* @DPU_DSC_NATIVE_42x_EN Supports NATIVE_422_EN and NATIVE_420_EN encoding
* @DPU_DSC_MAX
*/
enum {
- DPU_DSC_OUTPUT_CTRL = 0x1,
- DPU_DSC_HW_REV_1_2,
- DPU_DSC_NATIVE_42x_EN,
+ DPU_DSC_NATIVE_42x_EN = 0x1,
DPU_DSC_MAX
};
@@ -233,14 +154,12 @@ enum {
* @id: enum identifying this block
* @base: register base offset to mdss
* @len: length of hardware block
- * @features bit mask identifying sub-blocks/features
*/
#define DPU_HW_BLK_INFO \
char name[DPU_HW_BLK_NAME_LEN]; \
u32 id; \
u32 base; \
- u32 len; \
- unsigned long features
+ u32 len
/**
* struct dpu_scaler_blk: Scaler information
@@ -455,7 +374,6 @@ struct dpu_clk_ctrl_reg {
/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
* @id: index identifying this block
* @base: register base offset to mdss
- * @features bit mask identifying sub-blocks/features
* @clk_ctrls clock control register definition
*/
struct dpu_mdp_cfg {
@@ -471,6 +389,7 @@ struct dpu_mdp_cfg {
*/
struct dpu_ctl_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
unsigned int intr_start;
};
@@ -486,6 +405,7 @@ struct dpu_ctl_cfg {
*/
struct dpu_sspp_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
const struct dpu_sspp_sub_blks *sblk;
u32 xin_id;
enum dpu_clk_ctrl_type clk_ctrl;
@@ -503,6 +423,7 @@ struct dpu_sspp_cfg {
*/
struct dpu_lm_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
const struct dpu_lm_sub_blks *sblk;
u32 pingpong;
u32 dspp;
@@ -513,7 +434,6 @@ struct dpu_lm_cfg {
* struct dpu_dspp_cfg - information of DSPP blocks
* @id enum identifying this block
* @base register offset of this block
- * @features bit mask identifying sub-blocks/features
* supported by this block
* @sblk sub-blocks information
*/
@@ -526,7 +446,6 @@ struct dpu_dspp_cfg {
* struct dpu_pingpong_cfg - information of PING-PONG blocks
* @id enum identifying this block
* @base register offset of this block
- * @features bit mask identifying sub-blocks/features
* @intr_done: index for PINGPONG done interrupt
* @intr_rdptr: index for PINGPONG readpointer done interrupt
* @sblk sub-blocks information
@@ -543,8 +462,6 @@ struct dpu_pingpong_cfg {
* struct dpu_merge_3d_cfg - information of DSPP blocks
* @id enum identifying this block
* @base register offset of this block
- * @features bit mask identifying sub-blocks/features
- * supported by this block
* @sblk sub-blocks information
*/
struct dpu_merge_3d_cfg {
@@ -562,6 +479,7 @@ struct dpu_merge_3d_cfg {
*/
struct dpu_dsc_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
const struct dpu_dsc_sub_blks *sblk;
};
@@ -569,7 +487,6 @@ struct dpu_dsc_cfg {
* struct dpu_intf_cfg - information of timing engine blocks
* @id enum identifying this block
* @base register offset of this block
- * @features bit mask identifying sub-blocks/features
* @type: Interface type(DSI, DP, HDMI)
* @controller_id: Controller Instance ID in case of multiple of intf type
* @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
@@ -600,6 +517,7 @@ struct dpu_intf_cfg {
*/
struct dpu_wb_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
u8 vbif_idx;
u32 maxlinewidth;
u32 xin_id;
@@ -668,6 +586,7 @@ struct dpu_vbif_qos_tbl {
*/
struct dpu_vbif_cfg {
DPU_HW_BLK_INFO;
+ unsigned long features;
u32 default_ot_rd_limit;
u32 default_ot_wr_limit;
u32 xin_halt_timeout;
@@ -685,7 +604,6 @@ struct dpu_vbif_cfg {
* @name string name for debug purposes
* @id enum identifying this block
* @base register offset of this block
- * @features bit mask identifying sub-blocks/features
*/
struct dpu_cdm_cfg {
DPU_HW_BLK_INFO;
@@ -860,6 +778,7 @@ extern const struct dpu_mdss_cfg dpu_sm8450_cfg;
extern const struct dpu_mdss_cfg dpu_sa8775p_cfg;
extern const struct dpu_mdss_cfg dpu_sm8550_cfg;
extern const struct dpu_mdss_cfg dpu_sm8650_cfg;
+extern const struct dpu_mdss_cfg dpu_sm8750_cfg;
extern const struct dpu_mdss_cfg dpu_x1e80100_cfg;
#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 573e42b06ad0..ac834db2e4c1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -42,6 +42,8 @@
#define CTL_INTF_FLUSH 0x110
#define CTL_CDM_FLUSH 0x114
#define CTL_PERIPH_FLUSH 0x128
+#define CTL_PIPE_ACTIVE 0x12c
+#define CTL_LAYER_ACTIVE 0x130
#define CTL_INTF_MASTER 0x134
#define CTL_DSPP_n_FLUSH(n) ((0x13C) + ((n) * 4))
@@ -64,6 +66,8 @@ static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
1, 2, 3, 4, 5};
+static const u32 lm_tbl[LM_MAX] = {CTL_INVALID_BIT, 0, 1, 2, 3, 4, 5, 6, 7};
+
static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
enum dpu_lm lm)
{
@@ -555,7 +559,7 @@ exit:
DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
- if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
+ if (ctx->mdss_ver->core_major_ver >= 9)
DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
}
@@ -575,7 +579,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
* per VM. Explicitly disable it until VM support is
* added in SW. Power on reset value is not disable.
*/
- if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
+ if (ctx->mdss_ver->core_major_ver >= 7)
mode_sel = CTL_DEFAULT_GROUP_ID << 28;
if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
@@ -676,11 +680,18 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
merge3d_active);
}
- dpu_hw_ctl_clear_all_blendstages(ctx);
+ if (ctx->ops.clear_all_blendstages)
+ ctx->ops.clear_all_blendstages(ctx);
+
+ if (ctx->ops.set_active_lms)
+ ctx->ops.set_active_lms(ctx, NULL);
if (ctx->ops.set_active_fetch_pipes)
ctx->ops.set_active_fetch_pipes(ctx, NULL);
+ if (ctx->ops.set_active_pipes)
+ ctx->ops.set_active_pipes(ctx, NULL);
+
if (cfg->intf) {
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
intf_active &= ~BIT(cfg->intf - INTF_0);
@@ -737,55 +748,39 @@ static void dpu_hw_ctl_set_active_fetch_pipes(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
}
-static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
- unsigned long cap)
+static void dpu_hw_ctl_set_active_pipes(struct dpu_hw_ctl *ctx,
+ unsigned long *active_pipes)
{
- if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
- ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
- ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
- ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
- ops->update_pending_flush_intf =
- dpu_hw_ctl_update_pending_flush_intf_v1;
+ int i;
+ u32 val = 0;
- ops->update_pending_flush_periph =
- dpu_hw_ctl_update_pending_flush_periph_v1;
+ if (active_pipes) {
+ for (i = 0; i < SSPP_MAX; i++) {
+ if (test_bit(i, active_pipes) &&
+ fetch_tbl[i] != CTL_INVALID_BIT)
+ val |= BIT(fetch_tbl[i]);
+ }
+ }
- ops->update_pending_flush_merge_3d =
- dpu_hw_ctl_update_pending_flush_merge_3d_v1;
- ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
- ops->update_pending_flush_cwb = dpu_hw_ctl_update_pending_flush_cwb_v1;
- ops->update_pending_flush_dsc =
- dpu_hw_ctl_update_pending_flush_dsc_v1;
- ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
- } else {
- ops->trigger_flush = dpu_hw_ctl_trigger_flush;
- ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
- ops->update_pending_flush_intf =
- dpu_hw_ctl_update_pending_flush_intf;
- ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
- ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
+ DPU_REG_WRITE(&ctx->hw, CTL_PIPE_ACTIVE, val);
+}
+
+static void dpu_hw_ctl_set_active_lms(struct dpu_hw_ctl *ctx,
+ unsigned long *active_lms)
+{
+ int i;
+ u32 val = 0;
+
+ if (active_lms) {
+ for (i = LM_0; i < LM_MAX; i++) {
+ if (test_bit(i, active_lms) &&
+ lm_tbl[i] != CTL_INVALID_BIT)
+ val |= BIT(lm_tbl[i]);
+ }
}
- ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
- ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
- ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
- ops->get_flush_register = dpu_hw_ctl_get_flush_register;
- ops->trigger_start = dpu_hw_ctl_trigger_start;
- ops->is_started = dpu_hw_ctl_is_started;
- ops->trigger_pending = dpu_hw_ctl_trigger_pending;
- ops->reset = dpu_hw_ctl_reset_control;
- ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
- ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
- ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
- ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
- ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
- if (cap & BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
- ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
- else
- ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
- if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
- ops->set_active_fetch_pipes = dpu_hw_ctl_set_active_fetch_pipes;
-};
+ DPU_REG_WRITE(&ctx->hw, CTL_LAYER_ACTIVE, val);
+}
/**
* dpu_hw_ctl_init() - Initializes the ctl_path hw driver object.
@@ -793,12 +788,14 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
* @dev: Corresponding device for devres management
* @cfg: ctl_path catalog entry for which driver object is required
* @addr: mapped register io address of MDP
+ * @mdss_ver: dpu core's major and minor versions
* @mixer_count: Number of mixers in @mixer
* @mixer: Pointer to an array of Layer Mixers defined in the catalog
*/
struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
const struct dpu_ctl_cfg *cfg,
void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver,
u32 mixer_count,
const struct dpu_lm_cfg *mixer)
{
@@ -812,7 +809,59 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
c->hw.log_mask = DPU_DBG_MASK_CTL;
c->caps = cfg;
- _setup_ctl_ops(&c->ops, c->caps->features);
+ c->mdss_ver = mdss_ver;
+
+ if (mdss_ver->core_major_ver >= 5) {
+ c->ops.trigger_flush = dpu_hw_ctl_trigger_flush_v1;
+ c->ops.setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
+ c->ops.reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
+ c->ops.update_pending_flush_intf =
+ dpu_hw_ctl_update_pending_flush_intf_v1;
+
+ c->ops.update_pending_flush_periph =
+ dpu_hw_ctl_update_pending_flush_periph_v1;
+
+ c->ops.update_pending_flush_merge_3d =
+ dpu_hw_ctl_update_pending_flush_merge_3d_v1;
+ c->ops.update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
+ c->ops.update_pending_flush_cwb = dpu_hw_ctl_update_pending_flush_cwb_v1;
+ c->ops.update_pending_flush_dsc =
+ dpu_hw_ctl_update_pending_flush_dsc_v1;
+ c->ops.update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
+ } else {
+ c->ops.trigger_flush = dpu_hw_ctl_trigger_flush;
+ c->ops.setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+ c->ops.update_pending_flush_intf =
+ dpu_hw_ctl_update_pending_flush_intf;
+ c->ops.update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
+ c->ops.update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
+ }
+ c->ops.clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
+ c->ops.update_pending_flush = dpu_hw_ctl_update_pending_flush;
+ c->ops.get_pending_flush = dpu_hw_ctl_get_pending_flush;
+ c->ops.get_flush_register = dpu_hw_ctl_get_flush_register;
+ c->ops.trigger_start = dpu_hw_ctl_trigger_start;
+ c->ops.is_started = dpu_hw_ctl_is_started;
+ c->ops.trigger_pending = dpu_hw_ctl_trigger_pending;
+ c->ops.reset = dpu_hw_ctl_reset_control;
+ c->ops.wait_reset_status = dpu_hw_ctl_wait_reset_status;
+ if (mdss_ver->core_major_ver < 12) {
+ c->ops.clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
+ c->ops.setup_blendstage = dpu_hw_ctl_setup_blendstage;
+ } else {
+ c->ops.set_active_pipes = dpu_hw_ctl_set_active_pipes;
+ c->ops.set_active_lms = dpu_hw_ctl_set_active_lms;
+ }
+ c->ops.update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
+ c->ops.update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
+ if (mdss_ver->core_major_ver >= 7)
+ c->ops.update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
+ else
+ c->ops.update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
+
+ if (mdss_ver->core_major_ver >= 7)
+ c->ops.set_active_fetch_pipes = dpu_hw_ctl_set_active_fetch_pipes;
+
c->idx = cfg->id;
c->mixer_count = mixer_count;
c->mixer_hw_caps = mixer;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index feb09590bc8f..15931b22ec94 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -258,6 +258,23 @@ struct dpu_hw_ctl_ops {
void (*set_active_fetch_pipes)(struct dpu_hw_ctl *ctx,
unsigned long *fetch_active);
+
+ /**
+ * Set active pipes attached to this CTL
+ * @ctx: ctl path ctx pointer
+ * @active_pipes: bitmap of enum dpu_sspp
+ */
+ void (*set_active_pipes)(struct dpu_hw_ctl *ctx,
+ unsigned long *active_pipes);
+
+ /**
+ * Set active layer mixers attached to this CTL
+ * @ctx: ctl path ctx pointer
+ * @active_lms: bitmap of enum dpu_lm
+ */
+ void (*set_active_lms)(struct dpu_hw_ctl *ctx,
+ unsigned long *active_lms);
+
};
/**
@@ -274,6 +291,7 @@ struct dpu_hw_ctl_ops {
* @pending_cwb_flush_mask: pending CWB flush
* @pending_dsc_flush_mask: pending DSC flush
* @pending_cdm_flush_mask: pending CDM flush
+ * @mdss_ver: MDSS revision information
* @ops: operation list
*/
struct dpu_hw_ctl {
@@ -295,6 +313,8 @@ struct dpu_hw_ctl {
u32 pending_dsc_flush_mask;
u32 pending_cdm_flush_mask;
+ const struct dpu_mdss_version *mdss_ver;
+
/* ops */
struct dpu_hw_ctl_ops ops;
};
@@ -312,6 +332,7 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
const struct dpu_ctl_cfg *cfg,
void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver,
u32 mixer_count,
const struct dpu_lm_cfg *mixer);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index cec6d4e8baec..3a149caa7ff4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -181,26 +181,18 @@ static void dpu_hw_dsc_bind_pingpong_blk(
DPU_REG_WRITE(c, dsc_ctl_offset, mux_cfg);
}
-static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
- unsigned long cap)
-{
- ops->dsc_disable = dpu_hw_dsc_disable;
- ops->dsc_config = dpu_hw_dsc_config;
- ops->dsc_config_thresh = dpu_hw_dsc_config_thresh;
- if (cap & BIT(DPU_DSC_OUTPUT_CTRL))
- ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
-};
-
/**
* dpu_hw_dsc_init() - Initializes the DSC hw driver object.
* @dev: Corresponding device for devres management
* @cfg: DSC catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
+ * @mdss_ver: dpu core's major and minor versions
* Return: Error code or allocated dpu_hw_dsc context
*/
struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
- void __iomem *addr)
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver)
{
struct dpu_hw_dsc *c;
@@ -213,7 +205,12 @@ struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
c->idx = cfg->id;
c->caps = cfg;
- _setup_dsc_ops(&c->ops, c->caps->features);
+
+ c->ops.dsc_disable = dpu_hw_dsc_disable;
+ c->ops.dsc_config = dpu_hw_dsc_config;
+ c->ops.dsc_config_thresh = dpu_hw_dsc_config_thresh;
+ if (mdss_ver->core_major_ver >= 5)
+ c->ops.dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index fc171bdeca48..b7013c9822d2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -64,7 +64,8 @@ struct dpu_hw_dsc {
struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
- void __iomem *addr);
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver);
struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
index b9c433567262..b3395e9c34a1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
@@ -360,8 +360,7 @@ static void dpu_hw_dsc_bind_pingpong_blk_1_2(struct dpu_hw_dsc *hw_dsc,
DPU_REG_WRITE(hw, sblk->ctl.base + DSC_CTL, mux_cfg);
}
-static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops,
- const unsigned long features)
+static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops)
{
ops->dsc_disable = dpu_hw_dsc_disable_1_2;
ops->dsc_config = dpu_hw_dsc_config_1_2;
@@ -391,7 +390,7 @@ struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
c->idx = cfg->id;
c->caps = cfg;
- _setup_dcs_ops_1_2(&c->ops, c->caps->features);
+ _setup_dcs_ops_1_2(&c->ops);
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index 829ca272873e..11fb1bc54fa9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -63,13 +63,6 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
DPU_REG_WRITE(&ctx->hw, base, PCC_EN);
}
-static void _setup_dspp_ops(struct dpu_hw_dspp *c,
- unsigned long features)
-{
- if (test_bit(DPU_DSPP_PCC, &features))
- c->ops.setup_pcc = dpu_setup_dspp_pcc;
-}
-
/**
* dpu_hw_dspp_init() - Initializes the DSPP hw driver object.
* should be called once before accessing every DSPP.
@@ -97,7 +90,8 @@ struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev,
/* Assign ops */
c->idx = cfg->id;
c->cap = cfg;
- _setup_dspp_ops(c, c->cap->features);
+ if (c->cap->sblk->pcc.base)
+ c->ops.setup_pcc = dpu_setup_dspp_pcc;
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index fb1d25baa518..a80ac82a9625 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -98,8 +98,7 @@
static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *intf,
const struct dpu_hw_intf_timing_params *p,
- const struct msm_format *fmt,
- const struct dpu_mdss_version *mdss_ver)
+ const struct msm_format *fmt)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
u32 hsync_period, vsync_period;
@@ -180,7 +179,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *intf,
/* TODO: handle DSC+DP case, we only handle DSC+DSI case so far */
if (p->compression_en && !dp_intf &&
- mdss_ver->core_major_ver >= 7)
+ intf->mdss_ver->core_major_ver >= 7)
intf_cfg2 |= INTF_CFG2_DCE_DATA_COMPRESS;
hsync_data_start_x = hsync_start_x;
@@ -238,7 +237,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *intf,
DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
- if (intf->cap->features & BIT(DPU_DATA_HCTL_EN)) {
+ if (intf->mdss_ver->core_major_ver >= 5) {
/*
* DATA_HCTL_EN controls data timing which can be different from
* video timing. It is recommended to enable it for all cases, except
@@ -309,9 +308,8 @@ static void dpu_hw_intf_get_status(
struct dpu_hw_intf_status *s)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
- unsigned long cap = intf->cap->features;
- if (cap & BIT(DPU_INTF_STATUS_SUPPORTED))
+ if (intf->mdss_ver->core_major_ver >= 5)
s->is_en = DPU_REG_READ(c, INTF_STATUS) & BIT(0);
else
s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
@@ -580,6 +578,8 @@ struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
c->idx = cfg->id;
c->cap = cfg;
+ c->mdss_ver = mdss_rev;
+
c->ops.setup_timing_gen = dpu_hw_intf_setup_timing_engine;
c->ops.setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
c->ops.get_status = dpu_hw_intf_get_status;
@@ -588,7 +588,7 @@ struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
c->ops.setup_misr = dpu_hw_intf_setup_misr;
c->ops.collect_misr = dpu_hw_intf_collect_misr;
- if (cfg->features & BIT(DPU_INTF_INPUT_CTRL))
+ if (mdss_rev->core_major_ver >= 5)
c->ops.bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
/* INTF TE is only for DSI interfaces */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 114be272ac0a..f31067a9aaf1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -81,8 +81,7 @@ struct dpu_hw_intf_cmd_mode_cfg {
struct dpu_hw_intf_ops {
void (*setup_timing_gen)(struct dpu_hw_intf *intf,
const struct dpu_hw_intf_timing_params *p,
- const struct msm_format *fmt,
- const struct dpu_mdss_version *mdss_ver);
+ const struct msm_format *fmt);
void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
const struct dpu_hw_intf_prog_fetch *fetch);
@@ -126,6 +125,8 @@ struct dpu_hw_intf {
enum dpu_intf idx;
const struct dpu_intf_cfg *cap;
+ const struct dpu_mdss_version *mdss_ver;
+
/* ops */
struct dpu_hw_intf_ops ops;
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 81b56f066519..e8a76d5192c2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -19,12 +19,28 @@
/* These register are offset to mixer base + stage base */
#define LM_BLEND0_OP 0x00
+
+/* <v12 DPU with offset to mixer base + stage base */
#define LM_BLEND0_CONST_ALPHA 0x04
#define LM_FG_COLOR_FILL_COLOR_0 0x08
#define LM_FG_COLOR_FILL_COLOR_1 0x0C
#define LM_FG_COLOR_FILL_SIZE 0x10
#define LM_FG_COLOR_FILL_XY 0x14
+/* >= v12 DPU */
+#define LM_BG_SRC_SEL_V12 0x14
+#define LM_BG_SRC_SEL_V12_RESET_VALUE 0x0000c0c0
+#define LM_BORDER_COLOR_0_V12 0x1c
+#define LM_BORDER_COLOR_1_V12 0x20
+
+/* >= v12 DPU with offset to mixer base + stage base */
+#define LM_BLEND0_FG_SRC_SEL_V12 0x04
+#define LM_BLEND0_CONST_ALPHA_V12 0x08
+#define LM_FG_COLOR_FILL_COLOR_0_V12 0x0c
+#define LM_FG_COLOR_FILL_COLOR_1_V12 0x10
+#define LM_FG_COLOR_FILL_SIZE_V12 0x14
+#define LM_FG_COLOR_FILL_XY_V12 0x18
+
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
@@ -83,6 +99,22 @@ static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
}
}
+static void dpu_hw_lm_setup_border_color_v12(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ if (border_en) {
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_0_V12,
+ (color->color_0 & 0x3ff) |
+ ((color->color_1 & 0x3ff) << 16));
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_1_V12,
+ (color->color_2 & 0x3ff) |
+ ((color->color_3 & 0x3ff) << 16));
+ }
+}
+
static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx)
{
dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, 0x0);
@@ -112,6 +144,27 @@ static void dpu_hw_lm_setup_blend_config_combined_alpha(struct dpu_hw_mixer *ctx
DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
}
+static void
+dpu_hw_lm_setup_blend_config_combined_alpha_v12(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha,
+ u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+ u32 const_alpha;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ const_alpha = (bg_alpha & 0x3ff) | ((fg_alpha & 0x3ff) << 16);
+ DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA_V12 + stage_off, const_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
{
@@ -144,18 +197,146 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
-static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops,
- unsigned long features)
+static void dpu_hw_lm_setup_color3_v12(struct dpu_hw_mixer *ctx,
+ uint32_t mixer_op_mode)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int op_mode, stages, stage_off, i;
+
+ stages = ctx->cap->sblk->maxblendstages;
+ if (stages <= 0)
+ return;
+
+ for (i = DPU_STAGE_0; i <= stages; i++) {
+ stage_off = _stage_offset(ctx, i);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ /* set color_out3 bit in blend0_op when enabled in mixer_op_mode */
+ op_mode = DPU_REG_READ(c, LM_BLEND0_OP + stage_off);
+ if (mixer_op_mode & BIT(i))
+ op_mode |= BIT(30);
+ else
+ op_mode &= ~BIT(30);
+
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, op_mode);
+ }
+}
+
+static int _set_staged_sspp(u32 stage, struct dpu_hw_stage_cfg *stage_cfg,
+ int pipes_per_stage, u32 *value)
{
- ops->setup_mixer_out = dpu_hw_lm_setup_out;
- if (test_bit(DPU_MIXER_COMBINED_ALPHA, &features))
- ops->setup_blend_config = dpu_hw_lm_setup_blend_config_combined_alpha;
+ int i;
+ u32 pipe_type = 0, pipe_id = 0, rec_id = 0;
+ u32 src_sel[PIPES_PER_STAGE];
+
+ *value = LM_BG_SRC_SEL_V12_RESET_VALUE;
+ if (!stage_cfg || !pipes_per_stage)
+ return 0;
+
+ for (i = 0; i < pipes_per_stage; i++) {
+ enum dpu_sspp pipe = stage_cfg->stage[stage][i];
+ enum dpu_sspp_multirect_index rect_index = stage_cfg->multirect_index[stage][i];
+
+ src_sel[i] = LM_BG_SRC_SEL_V12_RESET_VALUE;
+
+ if (!pipe)
+ continue;
+
+ /* translate pipe data to SWI pipe_type, pipe_id */
+ if (pipe >= SSPP_DMA0 && pipe <= SSPP_DMA5) {
+ pipe_type = 0;
+ pipe_id = pipe - SSPP_DMA0;
+ } else if (pipe >= SSPP_VIG0 && pipe <= SSPP_VIG3) {
+ pipe_type = 1;
+ pipe_id = pipe - SSPP_VIG0;
+ } else {
+ DPU_ERROR("invalid rec-%d pipe:%d\n", i, pipe);
+ return -EINVAL;
+ }
+
+ /* translate rec data to SWI rec_id */
+ if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) {
+ rec_id = 0;
+ } else if (rect_index == DPU_SSPP_RECT_1) {
+ rec_id = 1;
+ } else {
+ DPU_ERROR("invalid rec-%d rect_index:%d\n", i, rect_index);
+ rec_id = 0;
+ }
+
+ /* calculate SWI value for rec-0 and rec-1 and store it temporary buffer */
+ src_sel[i] = (((pipe_type & 0x3) << 6) | ((rec_id & 0x3) << 4) | (pipe_id & 0xf));
+ }
+
+ /* calculate final SWI register value for rec-0 and rec-1 */
+ *value = 0;
+ for (i = 0; i < pipes_per_stage; i++)
+ *value |= src_sel[i] << (i * 8);
+
+ return 0;
+}
+
+static int dpu_hw_lm_setup_blendstage(struct dpu_hw_mixer *ctx, enum dpu_lm lm,
+ struct dpu_hw_stage_cfg *stage_cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int i, ret, stages, stage_off, pipes_per_stage;
+ u32 value;
+
+ stages = ctx->cap->sblk->maxblendstages;
+ if (stages <= 0)
+ return -EINVAL;
+
+ if (test_bit(DPU_MIXER_SOURCESPLIT, &ctx->cap->features))
+ pipes_per_stage = PIPES_PER_STAGE;
else
- ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
- ops->setup_alpha_out = dpu_hw_lm_setup_color3;
- ops->setup_border_color = dpu_hw_lm_setup_border_color;
- ops->setup_misr = dpu_hw_lm_setup_misr;
- ops->collect_misr = dpu_hw_lm_collect_misr;
+ pipes_per_stage = 1;
+
+ /*
+ * When stage configuration is empty, we can enable the
+ * border color by setting the corresponding LAYER_ACTIVE bit
+ * and un-staging all the pipes from the layer mixer.
+ */
+ if (!stage_cfg)
+ DPU_REG_WRITE(c, LM_BG_SRC_SEL_V12, LM_BG_SRC_SEL_V12_RESET_VALUE);
+
+ for (i = DPU_STAGE_0; i <= stages; i++) {
+ stage_off = _stage_offset(ctx, i);
+ if (stage_off < 0)
+ return stage_off;
+
+ ret = _set_staged_sspp(i, stage_cfg, pipes_per_stage, &value);
+ if (ret)
+ return ret;
+
+ DPU_REG_WRITE(c, LM_BLEND0_FG_SRC_SEL_V12 + stage_off, value);
+ }
+
+ return 0;
+}
+
+static int dpu_hw_lm_clear_all_blendstages(struct dpu_hw_mixer *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int i, stages, stage_off;
+
+ stages = ctx->cap->sblk->maxblendstages;
+ if (stages <= 0)
+ return -EINVAL;
+
+ DPU_REG_WRITE(c, LM_BG_SRC_SEL_V12, LM_BG_SRC_SEL_V12_RESET_VALUE);
+
+ for (i = DPU_STAGE_0; i <= stages; i++) {
+ stage_off = _stage_offset(ctx, i);
+ if (stage_off < 0)
+ return stage_off;
+
+ DPU_REG_WRITE(c, LM_BLEND0_FG_SRC_SEL_V12 + stage_off,
+ LM_BG_SRC_SEL_V12_RESET_VALUE);
+ }
+
+ return 0;
}
/**
@@ -164,10 +345,12 @@ static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops,
* @dev: Corresponding device for devres management
* @cfg: mixer catalog entry for which driver object is required
* @addr: mapped register io address of MDP
+ * @mdss_ver: DPU core's major and minor versions
*/
struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
const struct dpu_lm_cfg *cfg,
- void __iomem *addr)
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver)
{
struct dpu_hw_mixer *c;
@@ -186,7 +369,24 @@ struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
/* Assign ops */
c->idx = cfg->id;
c->cap = cfg;
- _setup_mixer_ops(&c->ops, c->cap->features);
+ c->ops.setup_mixer_out = dpu_hw_lm_setup_out;
+ if (mdss_ver->core_major_ver >= 12)
+ c->ops.setup_blend_config = dpu_hw_lm_setup_blend_config_combined_alpha_v12;
+ else if (mdss_ver->core_major_ver >= 4)
+ c->ops.setup_blend_config = dpu_hw_lm_setup_blend_config_combined_alpha;
+ else
+ c->ops.setup_blend_config = dpu_hw_lm_setup_blend_config;
+ if (mdss_ver->core_major_ver < 12) {
+ c->ops.setup_alpha_out = dpu_hw_lm_setup_color3;
+ c->ops.setup_border_color = dpu_hw_lm_setup_border_color;
+ } else {
+ c->ops.setup_alpha_out = dpu_hw_lm_setup_color3_v12;
+ c->ops.setup_blendstage = dpu_hw_lm_setup_blendstage;
+ c->ops.clear_all_blendstages = dpu_hw_lm_clear_all_blendstages;
+ c->ops.setup_border_color = dpu_hw_lm_setup_border_color_v12;
+ }
+ c->ops.setup_misr = dpu_hw_lm_setup_misr;
+ c->ops.collect_misr = dpu_hw_lm_collect_misr;
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 6f60fa9b3cd7..1b9ecd082d7f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -11,6 +11,7 @@
#include "dpu_hw_util.h"
struct dpu_hw_mixer;
+struct dpu_hw_stage_cfg;
struct dpu_hw_mixer_cfg {
u32 out_width;
@@ -49,6 +50,23 @@ struct dpu_hw_lm_ops {
void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
/**
+ * Clear layer mixer to pipe configuration
+ * @ctx : mixer ctx pointer
+ * Returns: 0 on success or -error
+ */
+ int (*clear_all_blendstages)(struct dpu_hw_mixer *ctx);
+
+ /**
+ * Configure layer mixer to pipe configuration
+ * @ctx : mixer ctx pointer
+ * @lm : layer mixer enumeration
+ * @stage_cfg : blend stage configuration
+ * Returns: 0 on success or -error
+ */
+ int (*setup_blendstage)(struct dpu_hw_mixer *ctx, enum dpu_lm lm,
+ struct dpu_hw_stage_cfg *stage_cfg);
+
+ /**
* setup_border_color : enable/disable border color
*/
void (*setup_border_color)(struct dpu_hw_mixer *ctx,
@@ -95,6 +113,7 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
const struct dpu_lm_cfg *cfg,
- void __iomem *addr);
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_ver);
#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
index 0b3325f9c870..83b1dbecddd2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
@@ -33,8 +33,7 @@ static void dpu_hw_merge_3d_setup_3d_mode(struct dpu_hw_merge_3d *merge_3d,
}
}
-static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c,
- unsigned long features)
+static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c)
{
c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode;
};
@@ -62,7 +61,7 @@ struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev,
c->idx = cfg->id;
c->caps = cfg;
- _setup_merge_3d_ops(c, c->caps->features);
+ _setup_merge_3d_ops(c);
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 36c0ec775b92..138071be5649 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -319,13 +319,13 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev,
c->ops.disable_autorefresh = dpu_hw_pp_disable_autorefresh;
}
- if (test_bit(DPU_PINGPONG_DSC, &cfg->features)) {
+ if (mdss_rev->core_major_ver < 7) {
c->ops.setup_dsc = dpu_hw_pp_setup_dsc;
c->ops.enable_dsc = dpu_hw_pp_dsc_enable;
c->ops.disable_dsc = dpu_hw_pp_dsc_disable;
}
- if (test_bit(DPU_PINGPONG_DITHER, &cfg->features))
+ if (mdss_rev->core_major_ver >= 3)
c->ops.setup_dither = dpu_hw_pp_setup_dither;
return c;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 32c7c8084553..6f1fc790ad6d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -10,11 +10,11 @@
#include "dpu_hw_sspp.h"
#include "dpu_kms.h"
-#include "msm_mdss.h"
-
#include <drm/drm_file.h>
#include <drm/drm_managed.h>
+#include <linux/soc/qcom/ubwc.h>
+
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
/* SSPP registers */
@@ -543,7 +543,7 @@ static void dpu_hw_sspp_setup_qos_lut(struct dpu_hw_sspp *ctx,
return;
_dpu_hw_setup_qos_lut(&ctx->hw, SSPP_DANGER_LUT,
- test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features),
+ ctx->mdss_ver->core_major_ver >= 4,
cfg);
}
@@ -684,7 +684,7 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
const struct dpu_sspp_cfg *cfg,
void __iomem *addr,
- const struct msm_mdss_data *mdss_data,
+ const struct qcom_ubwc_cfg_data *mdss_data,
const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_sspp *hw_pipe;
@@ -703,6 +703,9 @@ struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
hw_pipe->ubwc = mdss_data;
hw_pipe->idx = cfg->id;
hw_pipe->cap = cfg;
+
+ hw_pipe->mdss_ver = mdss_rev;
+
_setup_layer_ops(hw_pipe, hw_pipe->cap->features, mdss_rev);
return hw_pipe;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 56a0edf2a57c..bdac5c04bf79 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -308,12 +308,14 @@ struct dpu_hw_sspp_ops {
struct dpu_hw_sspp {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
- const struct msm_mdss_data *ubwc;
+ const struct qcom_ubwc_cfg_data *ubwc;
/* Pipe */
enum dpu_sspp idx;
const struct dpu_sspp_cfg *cap;
+ const struct dpu_mdss_version *mdss_ver;
+
/* Ops */
struct dpu_hw_sspp_ops ops;
};
@@ -323,7 +325,7 @@ struct dpu_kms;
struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
const struct dpu_sspp_cfg *cfg,
void __iomem *addr,
- const struct msm_mdss_data *mdss_data,
+ const struct qcom_ubwc_cfg_data *mdss_data,
const struct dpu_mdss_version *mdss_rev);
int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index 562a3f4c5238..96dc10589bee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -264,15 +264,15 @@ static void dpu_hw_dp_phy_intf_sel(struct dpu_hw_mdp *mdp,
}
static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
- unsigned long cap, const struct dpu_mdss_version *mdss_rev)
+ const struct dpu_mdss_version *mdss_rev)
{
ops->setup_split_pipe = dpu_hw_setup_split_pipe;
ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
ops->get_danger_status = dpu_hw_get_danger_status;
- if (cap & BIT(DPU_MDP_VSYNC_SEL))
+ if (mdss_rev->core_major_ver < 5)
ops->setup_vsync_source = dpu_hw_setup_vsync_sel;
- else if (!(cap & BIT(DPU_MDP_PERIPH_0_REMOVED)))
+ else if (mdss_rev->core_major_ver < 8)
ops->setup_vsync_source = dpu_hw_setup_wd_timer;
ops->get_safe_status = dpu_hw_get_safe_status;
@@ -280,7 +280,8 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
if (mdss_rev->core_major_ver >= 5)
ops->dp_phy_intf_sel = dpu_hw_dp_phy_intf_sel;
- if (cap & BIT(DPU_MDP_AUDIO_SELECT))
+ if (mdss_rev->core_major_ver == 4 ||
+ mdss_rev->core_major_ver == 5)
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
@@ -312,7 +313,7 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
* Assign ops
*/
mdp->caps = cfg;
- _setup_mdp_ops(&mdp->ops, mdp->caps->features, mdss_rev);
+ _setup_mdp_ops(&mdp->ops, mdss_rev);
return mdp;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
index 4853e516c487..478a091aeccf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -208,7 +208,7 @@ static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
if (test_bit(DPU_WB_CDP, &features))
ops->setup_cdp = dpu_hw_wb_setup_cdp;
- if (test_bit(DPU_WB_INPUT_CTRL, &features))
+ if (mdss_rev->core_major_ver >= 5)
ops->bind_pingpong_blk = dpu_hw_wb_bind_pingpong_blk;
if (mdss_rev->core_major_ver >= 9)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 1fd82b6747e9..12dcb32b4724 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -20,9 +20,10 @@
#include <drm/drm_vblank.h>
#include <drm/drm_writeback.h>
+#include <linux/soc/qcom/ubwc.h>
+
#include "msm_drv.h"
#include "msm_mmu.h"
-#include "msm_mdss.h"
#include "msm_gem.h"
#include "disp/msm_disp_snapshot.h"
@@ -582,7 +583,7 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
struct msm_display_info info;
int i, rc = 0;
- if (!(priv->dsi[0] || priv->dsi[1]))
+ if (!(priv->kms->dsi[0] || priv->kms->dsi[1]))
return rc;
/*
@@ -593,26 +594,26 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
*
* TODO: Support swapping DSI0 and DSI1 in the bonded setup.
*/
- for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ for (i = 0; i < ARRAY_SIZE(priv->kms->dsi); i++) {
int other = (i + 1) % 2;
- if (!priv->dsi[i])
+ if (!priv->kms->dsi[i])
continue;
- if (msm_dsi_is_bonded_dsi(priv->dsi[i]) &&
- !msm_dsi_is_master_dsi(priv->dsi[i]))
+ if (msm_dsi_is_bonded_dsi(priv->kms->dsi[i]) &&
+ !msm_dsi_is_master_dsi(priv->kms->dsi[i]))
continue;
memset(&info, 0, sizeof(info));
info.intf_type = INTF_DSI;
info.h_tile_instance[info.num_of_h_tiles++] = i;
- if (msm_dsi_is_bonded_dsi(priv->dsi[i]))
+ if (msm_dsi_is_bonded_dsi(priv->kms->dsi[i]))
info.h_tile_instance[info.num_of_h_tiles++] = other;
- info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]);
+ info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->kms->dsi[i]);
- rc = dpu_kms_dsi_set_te_source(&info, priv->dsi[i]);
+ rc = dpu_kms_dsi_set_te_source(&info, priv->kms->dsi[i]);
if (rc) {
DPU_ERROR("failed to identify TE source for dsi display\n");
return rc;
@@ -624,15 +625,15 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
return PTR_ERR(encoder);
}
- rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
+ rc = msm_dsi_modeset_init(priv->kms->dsi[i], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
i, rc);
break;
}
- if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
- rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
+ if (msm_dsi_is_bonded_dsi(priv->kms->dsi[i]) && priv->kms->dsi[other]) {
+ rc = msm_dsi_modeset_init(priv->kms->dsi[other], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
other, rc);
@@ -654,8 +655,8 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
int rc;
int i;
- for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
- if (!priv->dp[i])
+ for (i = 0; i < ARRAY_SIZE(priv->kms->dp); i++) {
+ if (!priv->kms->dp[i])
continue;
memset(&info, 0, sizeof(info));
@@ -670,7 +671,7 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
}
yuv_supported = !!dpu_kms->catalog->cdm;
- rc = msm_dp_modeset_init(priv->dp[i], dev, encoder, yuv_supported);
+ rc = msm_dp_modeset_init(priv->kms->dp[i], dev, encoder, yuv_supported);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
return rc;
@@ -688,7 +689,7 @@ static int _dpu_kms_initialize_hdmi(struct drm_device *dev,
struct msm_display_info info;
int rc;
- if (!priv->hdmi)
+ if (!priv->kms->hdmi)
return 0;
memset(&info, 0, sizeof(info));
@@ -702,7 +703,7 @@ static int _dpu_kms_initialize_hdmi(struct drm_device *dev,
return PTR_ERR(encoder);
}
- rc = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
+ rc = msm_hdmi_modeset_init(priv->kms->hdmi, dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
return rc;
@@ -874,12 +875,11 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
ret = PTR_ERR(crtc);
return ret;
}
- priv->num_crtcs++;
}
/* All CRTCs are compatible with all encoders */
drm_for_each_encoder(encoder, dev)
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
+ encoder->possible_crtcs = (1 << dev->mode_config.num_crtc) - 1;
return 0;
}
@@ -1022,7 +1022,7 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
dpu_kms->mmio + cat->wb[i].base, "%s",
cat->wb[i].name);
- if (cat->mdp[0].features & BIT(DPU_MDP_PERIPH_0_REMOVED)) {
+ if (dpu_kms->catalog->mdss_ver->core_major_ver >= 8) {
msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0,
dpu_kms->mmio + cat->mdp[0].base, "top");
msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len - MDP_PERIPH_TOP0_END,
@@ -1043,7 +1043,7 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base,
"%s", cat->dsc[i].name);
- if (cat->dsc[i].features & BIT(DPU_DSC_HW_REV_1_2)) {
+ if (cat->mdss_ver->core_major_ver >= 7) {
struct dpu_dsc_blk enc = cat->dsc[i].sblk->enc;
struct dpu_dsc_blk ctl = cat->dsc[i].sblk->ctl;
@@ -1095,26 +1095,26 @@ static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
{
struct msm_mmu *mmu;
- if (!dpu_kms->base.aspace)
+ if (!dpu_kms->base.vm)
return;
- mmu = dpu_kms->base.aspace->mmu;
+ mmu = to_msm_vm(dpu_kms->base.vm)->mmu;
mmu->funcs->detach(mmu);
- msm_gem_address_space_put(dpu_kms->base.aspace);
+ drm_gpuvm_put(dpu_kms->base.vm);
- dpu_kms->base.aspace = NULL;
+ dpu_kms->base.vm = NULL;
}
static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
{
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
- aspace = msm_kms_init_aspace(dpu_kms->dev);
- if (IS_ERR(aspace))
- return PTR_ERR(aspace);
+ vm = msm_kms_init_vm(dpu_kms->dev);
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
- dpu_kms->base.aspace = aspace;
+ dpu_kms->base.vm = vm;
return 0;
}
@@ -1189,10 +1189,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto err_pm_put;
}
- dpu_kms->mdss = msm_mdss_get_mdss_data(dpu_kms->pdev->dev.parent);
+ dpu_kms->mdss = qcom_ubwc_config_get_data();
if (IS_ERR(dpu_kms->mdss)) {
rc = PTR_ERR(dpu_kms->mdss);
- DPU_ERROR("failed to get MDSS data: %d\n", rc);
+ DPU_ERROR("failed to get UBWC config data: %d\n", rc);
goto err_pm_put;
}
@@ -1533,6 +1533,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, },
{ .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, },
{ .compatible = "qcom,sm8650-dpu", .data = &dpu_sm8650_cfg, },
+ { .compatible = "qcom,sm8750-dpu", .data = &dpu_sm8750_cfg, },
{ .compatible = "qcom,x1e80100-dpu", .data = &dpu_x1e80100_cfg, },
{}
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index a57ec2ec1060..993cf512f8c5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -60,7 +60,7 @@ struct dpu_kms {
struct msm_kms base;
struct drm_device *dev;
const struct dpu_mdss_cfg *catalog;
- const struct msm_mdss_data *mdss;
+ const struct qcom_ubwc_cfg_data *mdss;
/* io/register spaces: */
void __iomem *mmio, *vbif[VBIF_MAX];
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 421138bc3cb7..01171c535a27 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -17,8 +17,9 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <linux/soc/qcom/ubwc.h>
+
#include "msm_drv.h"
-#include "msm_mdss.h"
#include "dpu_kms.h"
#include "dpu_hw_sspp.h"
#include "dpu_hw_util.h"
@@ -71,7 +72,7 @@ static const uint32_t qcom_compressed_supported_formats[] = {
/*
* struct dpu_plane - local dpu plane structure
- * @aspace: address space pointer
+ * @vm: address space pointer
* @csc_ptr: Points to dpu_csc_cfg structure to use for current
* @catalog: Points to dpu catalog structure
* @revalidate: force revalidation of all the plane properties
@@ -646,7 +647,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_framebuffer *fb = new_state->fb;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
- struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
if (!new_state->fb)
@@ -654,9 +654,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
- /* cache aspace */
- pstate->aspace = kms->base.aspace;
-
/*
* TODO: Need to sort out the msm_framebuffer_prepare() call below so
* we can use msm_atomic_prepare_fb() instead of doing the
@@ -664,13 +661,10 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
*/
drm_gem_plane_helper_prepare_fb(plane, new_state);
- if (pstate->aspace) {
- ret = msm_framebuffer_prepare(new_state->fb,
- pstate->aspace, pstate->needs_dirtyfb);
- if (ret) {
- DPU_ERROR("failed to prepare framebuffer\n");
- return ret;
- }
+ ret = msm_framebuffer_prepare(new_state->fb, pstate->needs_dirtyfb);
+ if (ret) {
+ DPU_ERROR("failed to prepare framebuffer\n");
+ return ret;
}
return 0;
@@ -689,8 +683,7 @@ static void dpu_plane_cleanup_fb(struct drm_plane *plane,
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id);
- msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace,
- old_pstate->needs_dirtyfb);
+ msm_framebuffer_cleanup(old_state->fb, old_pstate->needs_dirtyfb);
}
static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
@@ -1457,7 +1450,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane,
pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe);
pdpu->is_rt_pipe = is_rt_pipe;
- dpu_format_populate_addrs(pstate->aspace, new_state->fb, &pstate->layout);
+ dpu_format_populate_addrs(new_state->fb, &pstate->layout);
DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
", %p4cc ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index acd5725175cd..a3a6e9028333 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -17,7 +17,6 @@
/**
* struct dpu_plane_state: Define dpu extension of drm plane state object
* @base: base drm plane state object
- * @aspace: pointer to address space for input/output buffers
* @pipe: software pipe description
* @r_pipe: software pipe description of the second pipe
* @pipe_cfg: software pipe configuration
@@ -34,7 +33,6 @@
*/
struct dpu_plane_state {
struct drm_plane_state base;
- struct msm_gem_address_space *aspace;
struct dpu_sw_pipe pipe;
struct dpu_sw_pipe r_pipe;
struct dpu_sw_pipe_cfg pipe_cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 2e296f79cba1..25382120cb1a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -40,7 +40,7 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx,
int dpu_rm_init(struct drm_device *dev,
struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
- const struct msm_mdss_data *mdss_data,
+ const struct qcom_ubwc_cfg_data *mdss_data,
void __iomem *mmio)
{
int rc, i;
@@ -60,7 +60,7 @@ int dpu_rm_init(struct drm_device *dev,
struct dpu_hw_mixer *hw;
const struct dpu_lm_cfg *lm = &cat->mixer[i];
- hw = dpu_hw_lm_init(dev, lm, mmio);
+ hw = dpu_hw_lm_init(dev, lm, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed lm object creation: err %d\n", rc);
@@ -142,7 +142,7 @@ int dpu_rm_init(struct drm_device *dev,
struct dpu_hw_ctl *hw;
const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
- hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mixer_count, cat->mixer);
+ hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mdss_ver, cat->mixer_count, cat->mixer);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed ctl object creation: err %d\n", rc);
@@ -168,10 +168,10 @@ int dpu_rm_init(struct drm_device *dev,
struct dpu_hw_dsc *hw;
const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
- if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features))
+ if (cat->mdss_ver->core_major_ver >= 7)
hw = dpu_hw_dsc_init_1_2(dev, dsc, mmio);
else
- hw = dpu_hw_dsc_init(dev, dsc, mmio);
+ hw = dpu_hw_dsc_init(dev, dsc, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index aa62966056d4..ccd64404f12d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -69,7 +69,7 @@ struct msm_display_topology {
int dpu_rm_init(struct drm_device *dev,
struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
- const struct msm_mdss_data *mdss_data,
+ const struct qcom_ubwc_cfg_data *mdss_data,
void __iomem *mmio);
int dpu_rm_reserve(struct dpu_rm *rm,
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index b8610aa806ea..da53ca88251e 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -17,7 +17,6 @@
struct mdp4_crtc {
struct drm_crtc base;
char name[8];
- int id;
int ovlp;
enum mdp4_dma dma;
bool enabled;
@@ -120,7 +119,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
struct msm_kms *kms = &mdp4_kms->base.base;
- msm_gem_unpin_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->vm);
drm_gem_object_put(val);
}
@@ -369,7 +368,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_get(next_bo);
- msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
+ msm_gem_get_and_pin_iova(next_bo, kms->vm, &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -427,7 +426,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
}
if (cursor_bo) {
- ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->vm, &iova);
if (ret)
goto fail;
} else {
@@ -511,7 +510,7 @@ static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
if (pending & PENDING_CURSOR) {
update_cursor(crtc);
- drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
+ drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->kms->wq);
}
}
@@ -539,7 +538,7 @@ static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc)
mdp4_crtc->flushed_mask),
msecs_to_jiffies(50));
if (ret <= 0)
- dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id);
+ dev_warn(dev->dev, "vblank time out, crtc=%s\n", mdp4_crtc->base.name);
mdp4_crtc->flushed_mask = 0;
@@ -624,7 +623,7 @@ static void mdp4_crtc_flip_cleanup(struct drm_device *dev, void *ptr)
/* initialize crtc */
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
- struct drm_plane *plane, int id, int ovlp_id,
+ struct drm_plane *plane, int ovlp_id,
enum mdp4_dma dma_id)
{
struct drm_crtc *crtc = NULL;
@@ -639,8 +638,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
crtc = &mdp4_crtc->base;
- mdp4_crtc->id = id;
-
mdp4_crtc->ovlp = ovlp_id;
mdp4_crtc->dma = dma_id;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 7e942c1337b3..0952c7f18abd 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -122,15 +122,16 @@ static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct device *dev = mdp4_kms->dev->dev;
- struct msm_gem_address_space *aspace = kms->aspace;
if (mdp4_kms->blank_cursor_iova)
- msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
+ msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->vm);
drm_gem_object_put(mdp4_kms->blank_cursor_bo);
- if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu);
- msm_gem_address_space_put(aspace);
+ if (kms->vm) {
+ struct msm_mmu *mmu = to_msm_vm(kms->vm)->mmu;
+
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(kms->vm);
}
if (mdp4_kms->rpm_enabled)
@@ -249,9 +250,9 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
/* DTV can be hooked to DMA_E: */
encoder->possible_crtcs = 1 << 1;
- if (priv->hdmi) {
+ if (priv->kms->hdmi) {
/* Construct bridge/connector for HDMI: */
- ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
+ ret = msm_hdmi_modeset_init(priv->kms->hdmi, dev, encoder);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
return ret;
@@ -263,7 +264,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
/* only DSI1 supported for now */
dsi_id = 0;
- if (!priv->dsi[dsi_id])
+ if (!priv->kms->dsi[dsi_id])
break;
encoder = mdp4_dsi_encoder_init(dev);
@@ -277,7 +278,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
/* TODO: Add DMA_S later? */
encoder->possible_crtcs = 1 << DMA_P;
- ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
+ ret = msm_dsi_modeset_init(priv->kms->dsi[dsi_id], dev, encoder);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
ret);
@@ -296,7 +297,6 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
static int modeset_init(struct mdp4_kms *mdp4_kms)
{
struct drm_device *dev = mdp4_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
struct drm_plane *plane;
struct drm_crtc *crtc;
int i, ret;
@@ -338,7 +338,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
goto fail;
}
- crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
+ crtc = mdp4_crtc_init(dev, plane, i,
mdp4_crtcs[i]);
if (IS_ERR(crtc)) {
DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
@@ -346,8 +346,6 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
ret = PTR_ERR(crtc);
goto fail;
}
-
- priv->num_crtcs++;
}
/*
@@ -398,7 +396,7 @@ static int mdp4_kms_init(struct drm_device *dev)
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(priv->kms));
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
int ret;
u32 major, minor;
unsigned long max_clk;
@@ -467,19 +465,20 @@ static int mdp4_kms_init(struct drm_device *dev)
} else if (!mmu) {
DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
- aspace = NULL;
+ vm = NULL;
} else {
- aspace = msm_gem_address_space_create(mmu,
- "mdp4", 0x1000, 0x100000000 - 0x1000);
+ vm = msm_gem_vm_create(dev, mmu, "mdp4",
+ 0x1000, 0x100000000 - 0x1000,
+ true);
- if (IS_ERR(aspace)) {
+ if (IS_ERR(vm)) {
if (!IS_ERR(mmu))
mmu->funcs->destroy(mmu);
- ret = PTR_ERR(aspace);
+ ret = PTR_ERR(vm);
goto fail;
}
- kms->aspace = aspace;
+ kms->vm = vm;
}
ret = modeset_init(mdp4_kms);
@@ -496,7 +495,7 @@ static int mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
+ ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->vm,
&mdp4_kms->blank_cursor_iova);
if (ret) {
DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index f9d988076337..fb348583dc84 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -185,7 +185,7 @@ void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc);
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
- struct drm_plane *plane, int id, int ovlp_id,
+ struct drm_plane *plane, int ovlp_id,
enum mdp4_dma dma_id);
long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 3fefb2088008..098c3b5ff2b2 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -79,30 +79,25 @@ static const struct drm_plane_funcs mdp4_plane_funcs = {
static int mdp4_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct msm_drm_private *priv = plane->dev->dev_private;
- struct msm_kms *kms = priv->kms;
-
if (!new_state->fb)
return 0;
drm_gem_plane_helper_prepare_fb(plane, new_state);
- return msm_framebuffer_prepare(new_state->fb, kms->aspace, false);
+ return msm_framebuffer_prepare(new_state->fb, false);
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- struct mdp4_kms *mdp4_kms = get_kms(plane);
- struct msm_kms *kms = &mdp4_kms->base.base;
struct drm_framebuffer *fb = old_state->fb;
if (!fb)
return;
DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, kms->aspace, false);
+ msm_framebuffer_cleanup(fb, false);
}
@@ -141,7 +136,6 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
- struct msm_kms *kms = &mdp4_kms->base.base;
enum mdp4_pipe pipe = mdp4_plane->pipe;
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
@@ -153,13 +147,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 0));
+ msm_framebuffer_iova(fb, 0));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 1));
+ msm_framebuffer_iova(fb, 1));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 2));
+ msm_framebuffer_iova(fb, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 3));
+ msm_framebuffer_iova(fb, 3));
}
static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 0f653e62b4a0..4c4900a7beda 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -169,7 +169,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
struct msm_kms *kms = &mdp5_kms->base.base;
- msm_gem_unpin_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->vm);
drm_gem_object_put(val);
}
@@ -993,7 +993,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->vm,
&mdp5_crtc->cursor.iova);
if (ret) {
drm_gem_object_put(cursor_bo);
@@ -1196,7 +1196,7 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
}
if (pending & PENDING_CURSOR)
- drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
+ drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->kms->wq);
}
static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 3fcca7a3d82e..5b6ca8dd929e 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -198,11 +198,12 @@ static void mdp5_destroy(struct mdp5_kms *mdp5_kms);
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct msm_gem_address_space *aspace = kms->aspace;
- if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu);
- msm_gem_address_space_put(aspace);
+ if (kms->vm) {
+ struct msm_mmu *mmu = to_msm_vm(kms->vm)->mmu;
+
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(kms->vm);
}
mdp_kms_destroy(&mdp5_kms->base);
@@ -311,7 +312,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num);
break;
case INTF_HDMI:
- if (!priv->hdmi)
+ if (!priv->kms->hdmi)
break;
ctl = mdp5_ctlm_request(ctlm, intf->num);
@@ -326,7 +327,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
break;
}
- ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
+ ret = msm_hdmi_modeset_init(priv->kms->hdmi, dev, encoder);
break;
case INTF_DSI:
{
@@ -334,14 +335,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
mdp5_cfg_get_hw_config(mdp5_kms->cfg);
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
- if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
+ if ((dsi_id >= ARRAY_SIZE(priv->kms->dsi)) || (dsi_id < 0)) {
DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
intf->num);
ret = -EINVAL;
break;
}
- if (!priv->dsi[dsi_id])
+ if (!priv->kms->dsi[dsi_id])
break;
ctl = mdp5_ctlm_request(ctlm, intf->num);
@@ -356,9 +357,10 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
break;
}
- ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
+ ret = msm_dsi_modeset_init(priv->kms->dsi[dsi_id], dev, encoder);
if (!ret)
- mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id]));
+ mdp5_encoder_set_intf_mode(encoder,
+ msm_dsi_is_cmd_mode(priv->kms->dsi[dsi_id]));
break;
}
@@ -374,7 +376,6 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
static int modeset_init(struct mdp5_kms *mdp5_kms)
{
struct drm_device *dev = mdp5_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
unsigned int num_crtcs;
int i, ret, pi = 0, ci = 0;
struct drm_plane *primary[MAX_BASES] = { NULL };
@@ -442,7 +443,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
goto fail;
}
- priv->num_crtcs++;
}
/*
@@ -450,7 +450,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
* crtcs for the encoders
*/
drm_for_each_encoder(encoder, dev)
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
+ encoder->possible_crtcs = (1 << dev->mode_config.num_crtc) - 1;
return 0;
@@ -500,7 +500,7 @@ static int mdp5_kms_init(struct drm_device *dev)
struct mdp5_kms *mdp5_kms;
struct mdp5_cfg *config;
struct msm_kms *kms = priv->kms;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
int i, ret;
ret = mdp5_init(to_platform_device(dev->dev), dev);
@@ -534,13 +534,13 @@ static int mdp5_kms_init(struct drm_device *dev)
}
mdelay(16);
- aspace = msm_kms_init_aspace(mdp5_kms->dev);
- if (IS_ERR(aspace)) {
- ret = PTR_ERR(aspace);
+ vm = msm_kms_init_vm(mdp5_kms->dev);
+ if (IS_ERR(vm)) {
+ ret = PTR_ERR(vm);
goto fail;
}
- kms->aspace = aspace;
+ kms->vm = vm;
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index bb1601921938..7c790406d533 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -135,8 +135,6 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct msm_drm_private *priv = plane->dev->dev_private;
- struct msm_kms *kms = priv->kms;
bool needs_dirtyfb = to_mdp5_plane_state(new_state)->needs_dirtyfb;
if (!new_state->fb)
@@ -144,14 +142,12 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
drm_gem_plane_helper_prepare_fb(plane, new_state);
- return msm_framebuffer_prepare(new_state->fb, kms->aspace, needs_dirtyfb);
+ return msm_framebuffer_prepare(new_state->fb, needs_dirtyfb);
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- struct msm_kms *kms = &mdp5_kms->base.base;
struct drm_framebuffer *fb = old_state->fb;
bool needed_dirtyfb = to_mdp5_plane_state(old_state)->needs_dirtyfb;
@@ -159,7 +155,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
return;
DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, kms->aspace, needed_dirtyfb);
+ msm_framebuffer_cleanup(fb, needed_dirtyfb);
}
static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
@@ -467,8 +463,6 @@ static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
enum mdp5_pipe pipe,
struct drm_framebuffer *fb)
{
- struct msm_kms *kms = &mdp5_kms->base.base;
-
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
@@ -478,13 +472,13 @@ static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 0));
+ msm_framebuffer_iova(fb, 0));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 1));
+ msm_framebuffer_iova(fb, 1));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 2));
+ msm_framebuffer_iova(fb, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
- msm_framebuffer_iova(fb, kms->aspace, 3));
+ msm_framebuffer_iova(fb, 3));
}
/* Note: mdp5_plane->pipe_lock must be locked */
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
index 07a2c1e87219..071bcdea80f7 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -127,18 +127,18 @@ void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state)
priv = drm_dev->dev_private;
kms = priv->kms;
- for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
- if (!priv->dp[i])
+ for (i = 0; i < ARRAY_SIZE(kms->dp); i++) {
+ if (!kms->dp[i])
continue;
- msm_dp_snapshot(disp_state, priv->dp[i]);
+ msm_dp_snapshot(disp_state, kms->dp[i]);
}
- for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
- if (!priv->dsi[i])
+ for (i = 0; i < ARRAY_SIZE(kms->dsi); i++) {
+ if (!kms->dsi[i])
continue;
- msm_dsi_snapshot(disp_state, priv->dsi[i]);
+ msm_dsi_snapshot(disp_state, kms->dsi[i]);
}
if (kms->funcs->snapshot)
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index f8bfb908f9b4..41018e82efa1 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -11,7 +11,6 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_edid.h>
-#include "dp_catalog.h"
#include "dp_audio.h"
#include "dp_drm.h"
#include "dp_panel.h"
@@ -22,13 +21,28 @@
struct msm_dp_audio_private {
struct platform_device *pdev;
struct drm_device *drm_dev;
- struct msm_dp_catalog *catalog;
+ void __iomem *link_base;
u32 channels;
struct msm_dp_audio msm_dp_audio;
};
+static inline u32 msm_dp_read_link(struct msm_dp_audio_private *audio, u32 offset)
+{
+ return readl_relaxed(audio->link_base + offset);
+}
+
+static inline void msm_dp_write_link(struct msm_dp_audio_private *audio,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure link reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, audio->link_base + offset);
+}
+
static void msm_dp_audio_stream_sdp(struct msm_dp_audio_private *audio)
{
struct dp_sdp_header sdp_hdr = {
@@ -37,8 +51,12 @@ static void msm_dp_audio_stream_sdp(struct msm_dp_audio_private *audio)
.HB2 = 0x00,
.HB3 = audio->channels - 1,
};
+ u32 header[2];
- msm_dp_catalog_write_audio_stream(audio->catalog, &sdp_hdr);
+ msm_dp_utils_pack_sdp_header(&sdp_hdr, header);
+
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_STREAM_0, header[0]);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_STREAM_1, header[1]);
}
static void msm_dp_audio_timestamp_sdp(struct msm_dp_audio_private *audio)
@@ -49,8 +67,12 @@ static void msm_dp_audio_timestamp_sdp(struct msm_dp_audio_private *audio)
.HB2 = 0x17,
.HB3 = 0x0 | (0x11 << 2),
};
+ u32 header[2];
+
+ msm_dp_utils_pack_sdp_header(&sdp_hdr, header);
- msm_dp_catalog_write_audio_timestamp(audio->catalog, &sdp_hdr);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_TIMESTAMP_0, header[0]);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_TIMESTAMP_1, header[1]);
}
static void msm_dp_audio_infoframe_sdp(struct msm_dp_audio_private *audio)
@@ -61,8 +83,12 @@ static void msm_dp_audio_infoframe_sdp(struct msm_dp_audio_private *audio)
.HB2 = 0x1b,
.HB3 = 0x0 | (0x11 << 2),
};
+ u32 header[2];
- msm_dp_catalog_write_audio_infoframe(audio->catalog, &sdp_hdr);
+ msm_dp_utils_pack_sdp_header(&sdp_hdr, header);
+
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_INFOFRAME_0, header[0]);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_INFOFRAME_1, header[1]);
}
static void msm_dp_audio_copy_management_sdp(struct msm_dp_audio_private *audio)
@@ -73,8 +99,12 @@ static void msm_dp_audio_copy_management_sdp(struct msm_dp_audio_private *audio)
.HB2 = 0x0f,
.HB3 = 0x00,
};
+ u32 header[2];
+
+ msm_dp_utils_pack_sdp_header(&sdp_hdr, header);
- msm_dp_catalog_write_audio_copy_mgmt(audio->catalog, &sdp_hdr);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_COPYMANAGEMENT_0, header[0]);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_COPYMANAGEMENT_1, header[1]);
}
static void msm_dp_audio_isrc_sdp(struct msm_dp_audio_private *audio)
@@ -85,13 +115,53 @@ static void msm_dp_audio_isrc_sdp(struct msm_dp_audio_private *audio)
.HB2 = 0x0f,
.HB3 = 0x00,
};
+ u32 header[2];
+ u32 reg;
+
+ /* XXX: is it necessary to preserve this field? */
+ reg = msm_dp_read_link(audio, MMSS_DP_AUDIO_ISRC_1);
+ sdp_hdr.HB3 = FIELD_GET(HEADER_3_MASK, reg);
+
+ msm_dp_utils_pack_sdp_header(&sdp_hdr, header);
+
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_ISRC_0, header[0]);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_ISRC_1, header[1]);
+}
+
+static void msm_dp_audio_config_sdp(struct msm_dp_audio_private *audio)
+{
+ u32 sdp_cfg, sdp_cfg2;
+
+ sdp_cfg = msm_dp_read_link(audio, MMSS_DP_SDP_CFG);
+ /* AUDIO_TIMESTAMP_SDP_EN */
+ sdp_cfg |= BIT(1);
+ /* AUDIO_STREAM_SDP_EN */
+ sdp_cfg |= BIT(2);
+ /* AUDIO_COPY_MANAGEMENT_SDP_EN */
+ sdp_cfg |= BIT(5);
+ /* AUDIO_ISRC_SDP_EN */
+ sdp_cfg |= BIT(6);
+ /* AUDIO_INFOFRAME_SDP_EN */
+ sdp_cfg |= BIT(20);
+
+ drm_dbg_dp(audio->drm_dev, "sdp_cfg = 0x%x\n", sdp_cfg);
+
+ msm_dp_write_link(audio, MMSS_DP_SDP_CFG, sdp_cfg);
+
+ sdp_cfg2 = msm_dp_read_link(audio, MMSS_DP_SDP_CFG2);
+ /* IFRM_REGSRC -> Do not use reg values */
+ sdp_cfg2 &= ~BIT(0);
+ /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
+ sdp_cfg2 &= ~BIT(1);
+
+ drm_dbg_dp(audio->drm_dev, "sdp_cfg2 = 0x%x\n", sdp_cfg2);
- msm_dp_catalog_write_audio_isrc(audio->catalog, &sdp_hdr);
+ msm_dp_write_link(audio, MMSS_DP_SDP_CFG2, sdp_cfg2);
}
static void msm_dp_audio_setup_sdp(struct msm_dp_audio_private *audio)
{
- msm_dp_catalog_audio_config_sdp(audio->catalog);
+ msm_dp_audio_config_sdp(audio);
msm_dp_audio_stream_sdp(audio);
msm_dp_audio_timestamp_sdp(audio);
@@ -102,8 +172,7 @@ static void msm_dp_audio_setup_sdp(struct msm_dp_audio_private *audio)
static void msm_dp_audio_setup_acr(struct msm_dp_audio_private *audio)
{
- u32 select = 0;
- struct msm_dp_catalog *catalog = audio->catalog;
+ u32 select, acr_ctrl;
switch (audio->msm_dp_audio.bw_code) {
case DP_LINK_BW_1_62:
@@ -124,13 +193,17 @@ static void msm_dp_audio_setup_acr(struct msm_dp_audio_private *audio)
break;
}
- msm_dp_catalog_audio_config_acr(catalog, select);
+ acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
+
+ drm_dbg_dp(audio->drm_dev, "select: %#x, acr_ctrl: %#x\n",
+ select, acr_ctrl);
+
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
}
static void msm_dp_audio_safe_to_exit_level(struct msm_dp_audio_private *audio)
{
- struct msm_dp_catalog *catalog = audio->catalog;
- u32 safe_to_exit_level = 0;
+ u32 safe_to_exit_level, mainlink_levels;
switch (audio->msm_dp_audio.lane_count) {
case 1:
@@ -150,14 +223,33 @@ static void msm_dp_audio_safe_to_exit_level(struct msm_dp_audio_private *audio)
break;
}
- msm_dp_catalog_audio_sfe_level(catalog, safe_to_exit_level);
+ mainlink_levels = msm_dp_read_link(audio, REG_DP_MAINLINK_LEVELS);
+ mainlink_levels &= 0xFE0;
+ mainlink_levels |= safe_to_exit_level;
+
+ drm_dbg_dp(audio->drm_dev,
+ "mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
+ mainlink_levels, safe_to_exit_level);
+
+ msm_dp_write_link(audio, REG_DP_MAINLINK_LEVELS, mainlink_levels);
}
static void msm_dp_audio_enable(struct msm_dp_audio_private *audio, bool enable)
{
- struct msm_dp_catalog *catalog = audio->catalog;
+ u32 audio_ctrl;
+
+ audio_ctrl = msm_dp_read_link(audio, MMSS_DP_AUDIO_CFG);
+
+ if (enable)
+ audio_ctrl |= BIT(0);
+ else
+ audio_ctrl &= ~BIT(0);
+
+ drm_dbg_dp(audio->drm_dev, "dp_audio_cfg = 0x%x\n", audio_ctrl);
- msm_dp_catalog_audio_enable(catalog, enable);
+ msm_dp_write_link(audio, MMSS_DP_AUDIO_CFG, audio_ctrl);
+ /* make sure audio engine is disabled */
+ wmb();
}
static struct msm_dp_audio_private *msm_dp_audio_get_data(struct msm_dp *msm_dp_display)
@@ -173,8 +265,8 @@ static struct msm_dp_audio_private *msm_dp_audio_get_data(struct msm_dp *msm_dp_
return container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
}
-int msm_dp_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+int msm_dp_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
@@ -216,8 +308,8 @@ end:
return rc;
}
-void msm_dp_audio_shutdown(struct drm_connector *connector,
- struct drm_bridge *bridge)
+void msm_dp_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connecter)
{
struct msm_dp_audio_private *audio;
struct msm_dp *msm_dp_display;
@@ -246,13 +338,13 @@ void msm_dp_audio_shutdown(struct drm_connector *connector,
}
struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
- struct msm_dp_catalog *catalog)
+ void __iomem *link_base)
{
int rc = 0;
struct msm_dp_audio_private *audio;
struct msm_dp_audio *msm_dp_audio;
- if (!pdev || !catalog) {
+ if (!pdev) {
DRM_ERROR("invalid input\n");
rc = -EINVAL;
goto error;
@@ -265,7 +357,7 @@ struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
}
audio->pdev = pdev;
- audio->catalog = catalog;
+ audio->link_base = link_base;
msm_dp_audio = &audio->msm_dp_audio;
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
index 58fc14693e48..ce2342856adb 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.h
+++ b/drivers/gpu/drm/msm/dp/dp_audio.h
@@ -8,9 +8,10 @@
#include <linux/platform_device.h>
-#include "dp_catalog.h"
#include <sound/hdmi-codec.h>
+struct drm_bridge;
+
/**
* struct msm_dp_audio
* @lane_count: number of lanes configured in current session
@@ -27,13 +28,13 @@ struct msm_dp_audio {
* Creates and instance of dp audio.
*
* @pdev: caller's platform device instance.
- * @catalog: an instance of msm_dp_catalog module.
+ * @link_base: pointer to the msm_dp_link resource.
*
* Returns the error code in case of failure, otherwize
* an instance of newly created msm_dp_module.
*/
struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
- struct msm_dp_catalog *catalog);
+ void __iomem *link_base);
/**
* msm_dp_audio_put()
@@ -44,12 +45,12 @@ struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
*/
void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio);
-int msm_dp_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+int msm_dp_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params);
-void msm_dp_audio_shutdown(struct drm_connector *connector,
- struct drm_bridge *bridge);
+void msm_dp_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector);
#endif /* _DP_AUDIO_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index bc8d46abfc61..3825a2fb48e2 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -4,6 +4,7 @@
*/
#include <linux/delay.h>
+#include <linux/iopoll.h>
#include <linux/phy/phy.h>
#include <drm/drm_print.h>
@@ -22,7 +23,7 @@ enum msm_dp_aux_err {
struct msm_dp_aux_private {
struct device *dev;
- struct msm_dp_catalog *catalog;
+ void __iomem *aux_base;
struct phy *phy;
@@ -45,6 +46,80 @@ struct msm_dp_aux_private {
struct drm_dp_aux msm_dp_aux;
};
+static inline u32 msm_dp_read_aux(struct msm_dp_aux_private *aux, u32 offset)
+{
+ return readl_relaxed(aux->aux_base + offset);
+}
+
+static inline void msm_dp_write_aux(struct msm_dp_aux_private *aux,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure aux reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, aux->aux_base + offset);
+}
+
+static void msm_dp_aux_clear_hw_interrupts(struct msm_dp_aux_private *aux)
+{
+ msm_dp_read_aux(aux, REG_DP_PHY_AUX_INTERRUPT_STATUS);
+ msm_dp_write_aux(aux, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
+ msm_dp_write_aux(aux, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
+ msm_dp_write_aux(aux, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
+}
+
+/*
+ * NOTE: resetting AUX controller will also clear any pending HPD related interrupts
+ */
+static void msm_dp_aux_reset(struct msm_dp_aux_private *aux)
+{
+ u32 aux_ctrl;
+
+ aux_ctrl = msm_dp_read_aux(aux, REG_DP_AUX_CTRL);
+
+ aux_ctrl |= DP_AUX_CTRL_RESET;
+ msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
+ usleep_range(1000, 1100); /* h/w recommended delay */
+
+ aux_ctrl &= ~DP_AUX_CTRL_RESET;
+ msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+static void msm_dp_aux_enable(struct msm_dp_aux_private *aux)
+{
+ u32 aux_ctrl;
+
+ aux_ctrl = msm_dp_read_aux(aux, REG_DP_AUX_CTRL);
+
+ msm_dp_write_aux(aux, REG_DP_TIMEOUT_COUNT, 0xffff);
+ msm_dp_write_aux(aux, REG_DP_AUX_LIMITS, 0xffff);
+
+ aux_ctrl |= DP_AUX_CTRL_ENABLE;
+ msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+static void msm_dp_aux_disable(struct msm_dp_aux_private *aux)
+{
+ u32 aux_ctrl;
+
+ aux_ctrl = msm_dp_read_aux(aux, REG_DP_AUX_CTRL);
+ aux_ctrl &= ~DP_AUX_CTRL_ENABLE;
+ msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+static int msm_dp_aux_wait_for_hpd_connect_state(struct msm_dp_aux_private *aux,
+ unsigned long wait_us)
+{
+ u32 state;
+
+ /* poll for hpd connected status every 2ms and timeout after wait_us */
+ return readl_poll_timeout(aux->aux_base +
+ REG_DP_DP_HPD_INT_STATUS,
+ state, state & DP_DP_HPD_STATE_STATUS_CONNECTED,
+ min(wait_us, 2000), wait_us);
+}
+
#define MAX_AUX_RETRIES 5
static ssize_t msm_dp_aux_write(struct msm_dp_aux_private *aux,
@@ -88,11 +163,11 @@ static ssize_t msm_dp_aux_write(struct msm_dp_aux_private *aux,
/* index = 0, write */
if (i == 0)
reg |= DP_AUX_DATA_INDEX_WRITE;
- msm_dp_catalog_aux_write_data(aux->catalog, reg);
+ msm_dp_write_aux(aux, REG_DP_AUX_DATA, reg);
}
- msm_dp_catalog_aux_clear_trans(aux->catalog, false);
- msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+ msm_dp_write_aux(aux, REG_DP_AUX_TRANS_CTRL, 0);
+ msm_dp_aux_clear_hw_interrupts(aux);
reg = 0; /* Transaction number == 1 */
if (!aux->native) { /* i2c */
@@ -106,7 +181,7 @@ static ssize_t msm_dp_aux_write(struct msm_dp_aux_private *aux,
}
reg |= DP_AUX_TRANS_CTRL_GO;
- msm_dp_catalog_aux_write_trans(aux->catalog, reg);
+ msm_dp_write_aux(aux, REG_DP_AUX_TRANS_CTRL, reg);
return len;
}
@@ -139,20 +214,22 @@ static ssize_t msm_dp_aux_cmd_fifo_rx(struct msm_dp_aux_private *aux,
u32 i, actual_i;
u32 len = msg->size;
- msm_dp_catalog_aux_clear_trans(aux->catalog, true);
+ data = msm_dp_read_aux(aux, REG_DP_AUX_TRANS_CTRL);
+ data &= ~DP_AUX_TRANS_CTRL_GO;
+ msm_dp_write_aux(aux, REG_DP_AUX_TRANS_CTRL, data);
data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
data |= DP_AUX_DATA_READ; /* read */
- msm_dp_catalog_aux_write_data(aux->catalog, data);
+ msm_dp_write_aux(aux, REG_DP_AUX_DATA, data);
dp = msg->buffer;
/* discard first byte */
- data = msm_dp_catalog_aux_read_data(aux->catalog);
+ data = msm_dp_read_aux(aux, REG_DP_AUX_DATA);
for (i = 0; i < len; i++) {
- data = msm_dp_catalog_aux_read_data(aux->catalog);
+ data = msm_dp_read_aux(aux, REG_DP_AUX_DATA);
*dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff);
actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF;
@@ -335,8 +412,8 @@ static ssize_t msm_dp_aux_transfer(struct drm_dp_aux *msm_dp_aux,
phy_calibrate(aux->phy);
}
/* reset aux if link is in connected state */
- if (msm_dp_catalog_link_is_connected(aux->catalog))
- msm_dp_catalog_aux_reset(aux->catalog);
+ if (msm_dp_aux_is_link_connected(msm_dp_aux))
+ msm_dp_aux_reset(aux);
} else {
aux->retry_cnt = 0;
switch (aux->aux_error_num) {
@@ -369,9 +446,8 @@ exit:
return ret;
}
-irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux)
+irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux, u32 isr)
{
- u32 isr;
struct msm_dp_aux_private *aux;
if (!msm_dp_aux) {
@@ -381,12 +457,6 @@ irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux)
aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
- isr = msm_dp_catalog_aux_get_irq(aux->catalog);
-
- /* no interrupts pending, return immediately */
- if (!isr)
- return IRQ_NONE;
-
if (!aux->cmd_busy) {
DRM_ERROR("Unexpected DP AUX IRQ %#010x when not busy\n", isr);
return IRQ_NONE;
@@ -403,7 +473,7 @@ irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux)
if (isr & DP_INTR_AUX_ERROR) {
aux->aux_error_num = DP_AUX_ERR_PHY;
- msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+ msm_dp_aux_clear_hw_interrupts(aux);
} else if (isr & DP_INTR_NACK_DEFER) {
aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
} else if (isr & DP_INTR_WRONG_ADDR) {
@@ -444,7 +514,7 @@ void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux)
aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
phy_calibrate(aux->phy);
- msm_dp_catalog_aux_reset(aux->catalog);
+ msm_dp_aux_reset(aux);
}
void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux)
@@ -460,7 +530,7 @@ void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux)
mutex_lock(&aux->mutex);
- msm_dp_catalog_aux_enable(aux->catalog, true);
+ msm_dp_aux_enable(aux);
aux->retry_cnt = 0;
aux->initted = true;
@@ -476,7 +546,7 @@ void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux)
mutex_lock(&aux->mutex);
aux->initted = false;
- msm_dp_catalog_aux_enable(aux->catalog, false);
+ msm_dp_aux_disable(aux);
mutex_unlock(&aux->mutex);
}
@@ -517,23 +587,105 @@ static int msm_dp_wait_hpd_asserted(struct drm_dp_aux *msm_dp_aux,
if (ret)
return ret;
- ret = msm_dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us);
+ ret = msm_dp_aux_wait_for_hpd_connect_state(aux, wait_us);
pm_runtime_put_sync(aux->dev);
return ret;
}
-struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog,
+void msm_dp_aux_hpd_enable(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ u32 reg;
+
+ /* Configure REFTIMER and enable it */
+ reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_REFTIMER);
+ reg |= DP_DP_HPD_REFTIMER_ENABLE;
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_REFTIMER, reg);
+
+ /* Enable HPD */
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
+}
+
+void msm_dp_aux_hpd_disable(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ u32 reg;
+
+ reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_REFTIMER);
+ reg &= ~DP_DP_HPD_REFTIMER_ENABLE;
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_REFTIMER, reg);
+
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_CTRL, 0);
+}
+
+void msm_dp_aux_hpd_intr_enable(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ u32 reg;
+
+ reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_MASK);
+ reg |= DP_DP_HPD_INT_MASK;
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_INT_MASK,
+ reg & DP_DP_HPD_INT_MASK);
+}
+
+void msm_dp_aux_hpd_intr_disable(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ u32 reg;
+
+ reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_MASK);
+ reg &= ~DP_DP_HPD_INT_MASK;
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_INT_MASK,
+ reg & DP_DP_HPD_INT_MASK);
+}
+
+u32 msm_dp_aux_get_hpd_intr_status(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ int isr, mask;
+
+ isr = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_STATUS);
+ msm_dp_write_aux(aux, REG_DP_DP_HPD_INT_ACK,
+ (isr & DP_DP_HPD_INT_MASK));
+ mask = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_MASK);
+
+ /*
+ * We only want to return interrupts that are unmasked to the caller.
+ * However, the interrupt status field also contains other
+ * informational bits about the HPD state status, so we only mask
+ * out the part of the register that tells us about which interrupts
+ * are pending.
+ */
+ return isr & (mask | ~DP_DP_HPD_INT_MASK);
+}
+
+u32 msm_dp_aux_is_link_connected(struct drm_dp_aux *msm_dp_aux)
+{
+ struct msm_dp_aux_private *aux =
+ container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
+ u32 status;
+
+ status = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_STATUS);
+ status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
+ status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
+
+ return status;
+}
+
+struct drm_dp_aux *msm_dp_aux_get(struct device *dev,
struct phy *phy,
- bool is_edp)
+ bool is_edp,
+ void __iomem *aux_base)
{
struct msm_dp_aux_private *aux;
- if (!catalog) {
- DRM_ERROR("invalid input\n");
- return ERR_PTR(-ENODEV);
- }
-
aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
if (!aux)
return ERR_PTR(-ENOMEM);
@@ -544,9 +696,9 @@ struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *cat
mutex_init(&aux->mutex);
aux->dev = dev;
- aux->catalog = catalog;
aux->phy = phy;
aux->retry_cnt = 0;
+ aux->aux_base = aux_base;
/*
* Use the drm_dp_aux_init() to use the aux adapter
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index 39c5b4c8596a..4be02e8b4d0b 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -6,21 +6,28 @@
#ifndef _DP_AUX_H_
#define _DP_AUX_H_
-#include "dp_catalog.h"
#include <drm/display/drm_dp_helper.h>
int msm_dp_aux_register(struct drm_dp_aux *msm_dp_aux);
void msm_dp_aux_unregister(struct drm_dp_aux *msm_dp_aux);
-irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux);
+irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux, u32 isr);
void msm_dp_aux_enable_xfers(struct drm_dp_aux *msm_dp_aux, bool enabled);
void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux);
void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux);
void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_hpd_enable(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_hpd_disable(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_hpd_intr_enable(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_hpd_intr_disable(struct drm_dp_aux *msm_dp_aux);
+u32 msm_dp_aux_get_hpd_intr_status(struct drm_dp_aux *msm_dp_aux);
+u32 msm_dp_aux_is_link_connected(struct drm_dp_aux *msm_dp_aux);
+
struct phy;
-struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog,
+struct drm_dp_aux *msm_dp_aux_get(struct device *dev,
struct phy *phy,
- bool is_edp);
+ bool is_edp,
+ void __iomem *aux_base);
void msm_dp_aux_put(struct drm_dp_aux *aux);
#endif /*__DP_AUX_H_*/
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
deleted file mode 100644
index 7b7eadb2f83b..000000000000
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ /dev/null
@@ -1,1298 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
-
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-#include <linux/platform_device.h>
-#include <linux/rational.h>
-#include <drm/display/drm_dp_helper.h>
-#include <drm/drm_print.h>
-
-#include "dp_catalog.h"
-#include "dp_reg.h"
-
-#define POLLING_SLEEP_US 1000
-#define POLLING_TIMEOUT_US 10000
-
-#define SCRAMBLER_RESET_COUNT_VALUE 0xFC
-
-#define DP_INTERRUPT_STATUS_ACK_SHIFT 1
-#define DP_INTERRUPT_STATUS_MASK_SHIFT 2
-
-#define DP_INTF_CONFIG_DATABUS_WIDEN BIT(4)
-
-#define DP_INTERRUPT_STATUS1 \
- (DP_INTR_AUX_XFER_DONE| \
- DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
- DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
- DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
- DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR)
-
-#define DP_INTERRUPT_STATUS1_ACK \
- (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_ACK_SHIFT)
-#define DP_INTERRUPT_STATUS1_MASK \
- (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_MASK_SHIFT)
-
-#define DP_INTERRUPT_STATUS2 \
- (DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \
- DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED)
-
-#define DP_INTERRUPT_STATUS2_ACK \
- (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_ACK_SHIFT)
-#define DP_INTERRUPT_STATUS2_MASK \
- (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_MASK_SHIFT)
-
-#define DP_INTERRUPT_STATUS4 \
- (PSR_UPDATE_INT | PSR_CAPTURE_INT | PSR_EXIT_INT | \
- PSR_UPDATE_ERROR_INT | PSR_WAKE_ERROR_INT)
-
-#define DP_INTERRUPT_MASK4 \
- (PSR_UPDATE_MASK | PSR_CAPTURE_MASK | PSR_EXIT_MASK | \
- PSR_UPDATE_ERROR_MASK | PSR_WAKE_ERROR_MASK)
-
-#define DP_DEFAULT_AHB_OFFSET 0x0000
-#define DP_DEFAULT_AHB_SIZE 0x0200
-#define DP_DEFAULT_AUX_OFFSET 0x0200
-#define DP_DEFAULT_AUX_SIZE 0x0200
-#define DP_DEFAULT_LINK_OFFSET 0x0400
-#define DP_DEFAULT_LINK_SIZE 0x0C00
-#define DP_DEFAULT_P0_OFFSET 0x1000
-#define DP_DEFAULT_P0_SIZE 0x0400
-
-struct dss_io_region {
- size_t len;
- void __iomem *base;
-};
-
-struct dss_io_data {
- struct dss_io_region ahb;
- struct dss_io_region aux;
- struct dss_io_region link;
- struct dss_io_region p0;
-};
-
-struct msm_dp_catalog_private {
- struct device *dev;
- struct drm_device *drm_dev;
- struct dss_io_data io;
- struct msm_dp_catalog msm_dp_catalog;
-};
-
-void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- struct dss_io_data *dss = &catalog->io;
-
- msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb");
- msm_disp_snapshot_add_block(disp_state, dss->aux.len, dss->aux.base, "dp_aux");
- msm_disp_snapshot_add_block(disp_state, dss->link.len, dss->link.base, "dp_link");
- msm_disp_snapshot_add_block(disp_state, dss->p0.len, dss->p0.base, "dp_p0");
-}
-
-static inline u32 msm_dp_read_aux(struct msm_dp_catalog_private *catalog, u32 offset)
-{
- return readl_relaxed(catalog->io.aux.base + offset);
-}
-
-static inline void msm_dp_write_aux(struct msm_dp_catalog_private *catalog,
- u32 offset, u32 data)
-{
- /*
- * To make sure aux reg writes happens before any other operation,
- * this function uses writel() instread of writel_relaxed()
- */
- writel(data, catalog->io.aux.base + offset);
-}
-
-static inline u32 msm_dp_read_ahb(const struct msm_dp_catalog_private *catalog, u32 offset)
-{
- return readl_relaxed(catalog->io.ahb.base + offset);
-}
-
-static inline void msm_dp_write_ahb(struct msm_dp_catalog_private *catalog,
- u32 offset, u32 data)
-{
- /*
- * To make sure phy reg writes happens before any other operation,
- * this function uses writel() instread of writel_relaxed()
- */
- writel(data, catalog->io.ahb.base + offset);
-}
-
-static inline void msm_dp_write_p0(struct msm_dp_catalog_private *catalog,
- u32 offset, u32 data)
-{
- /*
- * To make sure interface reg writes happens before any other operation,
- * this function uses writel() instread of writel_relaxed()
- */
- writel(data, catalog->io.p0.base + offset);
-}
-
-static inline u32 msm_dp_read_p0(struct msm_dp_catalog_private *catalog,
- u32 offset)
-{
- /*
- * To make sure interface reg writes happens before any other operation,
- * this function uses writel() instread of writel_relaxed()
- */
- return readl_relaxed(catalog->io.p0.base + offset);
-}
-
-static inline u32 msm_dp_read_link(struct msm_dp_catalog_private *catalog, u32 offset)
-{
- return readl_relaxed(catalog->io.link.base + offset);
-}
-
-static inline void msm_dp_write_link(struct msm_dp_catalog_private *catalog,
- u32 offset, u32 data)
-{
- /*
- * To make sure link reg writes happens before any other operation,
- * this function uses writel() instread of writel_relaxed()
- */
- writel(data, catalog->io.link.base + offset);
-}
-
-/* aux related catalog functions */
-u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- return msm_dp_read_aux(catalog, REG_DP_AUX_DATA);
-}
-
-int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_aux(catalog, REG_DP_AUX_DATA, data);
- return 0;
-}
-
-int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
- return 0;
-}
-
-int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read)
-{
- u32 data;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- if (read) {
- data = msm_dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL);
- data &= ~DP_AUX_TRANS_CTRL_GO;
- msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
- } else {
- msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0);
- }
- return 0;
-}
-
-int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS);
- msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
- msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
- msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
- return 0;
-}
-
-/**
- * msm_dp_catalog_aux_reset() - reset AUX controller
- *
- * @msm_dp_catalog: DP catalog structure
- *
- * return: void
- *
- * This function reset AUX controller
- *
- * NOTE: reset AUX controller will also clear any pending HPD related interrupts
- *
- */
-void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog)
-{
- u32 aux_ctrl;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL);
-
- aux_ctrl |= DP_AUX_CTRL_RESET;
- msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
- usleep_range(1000, 1100); /* h/w recommended delay */
-
- aux_ctrl &= ~DP_AUX_CTRL_RESET;
- msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
-}
-
-void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable)
-{
- u32 aux_ctrl;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL);
-
- if (enable) {
- msm_dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff);
- msm_dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff);
- aux_ctrl |= DP_AUX_CTRL_ENABLE;
- } else {
- aux_ctrl &= ~DP_AUX_CTRL_ENABLE;
- }
-
- msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
-}
-
-int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog,
- unsigned long wait_us)
-{
- u32 state;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- /* poll for hpd connected status every 2ms and timeout after wait_us */
- return readl_poll_timeout(catalog->io.aux.base +
- REG_DP_DP_HPD_INT_STATUS,
- state, state & DP_DP_HPD_STATE_STATUS_CONNECTED,
- min(wait_us, 2000), wait_us);
-}
-
-u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 intr, intr_ack;
-
- intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS);
- intr &= ~DP_INTERRUPT_STATUS1_MASK;
- intr_ack = (intr & DP_INTERRUPT_STATUS1)
- << DP_INTERRUPT_STATUS_ACK_SHIFT;
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack |
- DP_INTERRUPT_STATUS1_MASK);
-
- return intr;
-
-}
-
-/* controller related catalog functions */
-void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog,
- u32 msm_dp_tu, u32 valid_boundary,
- u32 valid_boundary2)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary);
- msm_dp_write_link(catalog, REG_DP_TU, msm_dp_tu);
- msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2);
-}
-
-void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL, state);
-}
-
-void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 cfg)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- drm_dbg_dp(catalog->drm_dev, "DP_CONFIGURATION_CTRL=0x%x\n", cfg);
-
- msm_dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg);
-}
-
-void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */
- u32 ln_mapping;
-
- ln_mapping = ln_0 << LANE0_MAPPING_SHIFT;
- ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT;
- ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT;
- ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT;
-
- msm_dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
- ln_mapping);
-}
-
-void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog,
- bool enable)
-{
- u32 val;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- val = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
-
- if (enable)
- val |= DP_MAINLINK_CTRL_ENABLE;
- else
- val &= ~DP_MAINLINK_CTRL_ENABLE;
-
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, val);
-}
-
-void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog,
- bool enable)
-{
- u32 mainlink_ctrl;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- drm_dbg_dp(catalog->drm_dev, "enable=%d\n", enable);
- if (enable) {
- /*
- * To make sure link reg writes happens before other operation,
- * msm_dp_write_link() function uses writel()
- */
- mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
-
- mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET |
- DP_MAINLINK_CTRL_ENABLE);
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
-
- mainlink_ctrl |= DP_MAINLINK_CTRL_RESET;
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
-
- mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET;
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
-
- mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE |
- DP_MAINLINK_FB_BOUNDARY_SEL);
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
- } else {
- mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
- mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE;
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
- }
-}
-
-void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog,
- u32 colorimetry_cfg,
- u32 test_bits_depth)
-{
- u32 misc_val;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- misc_val = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0);
-
- /* clear bpp bits */
- misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT);
- misc_val |= colorimetry_cfg << DP_MISC0_COLORIMETRY_CFG_SHIFT;
- misc_val |= test_bits_depth << DP_MISC0_TEST_BITS_DEPTH_SHIFT;
- /* Configure clock to synchronous mode */
- misc_val |= DP_MISC0_SYNCHRONOUS_CLK;
-
- drm_dbg_dp(catalog->drm_dev, "misc settings = 0x%x\n", misc_val);
- msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val);
-}
-
-void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog)
-{
- u32 mainlink_ctrl, hw_revision;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
-
- hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog);
- if (hw_revision >= DP_HW_VERSION_1_2)
- mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_SDE_PERIPH_UPDATE;
- else
- mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_UPDATE_SDP;
-
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
-}
-
-void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog,
- u32 rate, u32 stream_rate_khz,
- bool is_ycbcr_420)
-{
- u32 pixel_m, pixel_n;
- u32 mvid, nvid, pixel_div = 0, dispcc_input_rate;
- u32 const nvid_fixed = DP_LINK_CONSTANT_N_VALUE;
- u32 const link_rate_hbr2 = 540000;
- u32 const link_rate_hbr3 = 810000;
- unsigned long den, num;
-
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- if (rate == link_rate_hbr3)
- pixel_div = 6;
- else if (rate == 162000 || rate == 270000)
- pixel_div = 2;
- else if (rate == link_rate_hbr2)
- pixel_div = 4;
- else
- DRM_ERROR("Invalid pixel mux divider\n");
-
- dispcc_input_rate = (rate * 10) / pixel_div;
-
- rational_best_approximation(dispcc_input_rate, stream_rate_khz,
- (unsigned long)(1 << 16) - 1,
- (unsigned long)(1 << 16) - 1, &den, &num);
-
- den = ~(den - num);
- den = den & 0xFFFF;
- pixel_m = num;
- pixel_n = den;
-
- mvid = (pixel_m & 0xFFFF) * 5;
- nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
-
- if (nvid < nvid_fixed) {
- u32 temp;
-
- temp = (nvid_fixed / nvid) * nvid;
- mvid = (nvid_fixed / nvid) * mvid;
- nvid = temp;
- }
-
- if (is_ycbcr_420)
- mvid /= 2;
-
- if (link_rate_hbr2 == rate)
- nvid *= 2;
-
- if (link_rate_hbr3 == rate)
- nvid *= 3;
-
- drm_dbg_dp(catalog->drm_dev, "mvid=0x%x, nvid=0x%x\n", mvid, nvid);
- msm_dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid);
- msm_dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid);
- msm_dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0);
-}
-
-int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog,
- u32 state_bit)
-{
- int bit, ret;
- u32 data;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- bit = BIT(state_bit - 1);
- drm_dbg_dp(catalog->drm_dev, "hw: bit=%d train=%d\n", bit, state_bit);
- msm_dp_catalog_ctrl_state_ctrl(msm_dp_catalog, bit);
-
- bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
-
- /* Poll for mainlink ready status */
- ret = readx_poll_timeout(readl, catalog->io.link.base +
- REG_DP_MAINLINK_READY,
- data, data & bit,
- POLLING_SLEEP_US, POLLING_TIMEOUT_US);
- if (ret < 0) {
- DRM_ERROR("set state_bit for link_train=%d failed\n", state_bit);
- return ret;
- }
- return 0;
-}
-
-/**
- * msm_dp_catalog_hw_revision() - retrieve DP hw revision
- *
- * @msm_dp_catalog: DP catalog structure
- *
- * Return: DP controller hw revision
- *
- */
-u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog)
-{
- const struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- return msm_dp_read_ahb(catalog, REG_DP_HW_VERSION);
-}
-
-/**
- * msm_dp_catalog_ctrl_reset() - reset DP controller
- *
- * @msm_dp_catalog: DP catalog structure
- *
- * return: void
- *
- * This function reset the DP controller
- *
- * NOTE: reset DP controller will also clear any pending HPD related interrupts
- *
- */
-void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog)
-{
- u32 sw_reset;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- sw_reset = msm_dp_read_ahb(catalog, REG_DP_SW_RESET);
-
- sw_reset |= DP_SW_RESET;
- msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
- usleep_range(1000, 1100); /* h/w recommended delay */
-
- sw_reset &= ~DP_SW_RESET;
- msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
-}
-
-bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog)
-{
- u32 data;
- int ret;
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- /* Poll for mainlink ready status */
- ret = readl_poll_timeout(catalog->io.link.base +
- REG_DP_MAINLINK_READY,
- data, data & DP_MAINLINK_READY_FOR_VIDEO,
- POLLING_SLEEP_US, POLLING_TIMEOUT_US);
- if (ret < 0) {
- DRM_ERROR("mainlink not ready\n");
- return false;
- }
-
- return true;
-}
-
-void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog,
- bool enable)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- if (enable) {
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS,
- DP_INTERRUPT_STATUS1_MASK);
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
- DP_INTERRUPT_STATUS2_MASK);
- } else {
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00);
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00);
- }
-}
-
-void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog,
- u32 intr_mask, bool en)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- u32 config = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
-
- config = (en ? config | intr_mask : config & ~intr_mask);
-
- drm_dbg_dp(catalog->drm_dev, "intr_mask=%#x config=%#x\n",
- intr_mask, config);
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
- config & DP_DP_HPD_INT_MASK);
-}
-
-void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
-
- /* Configure REFTIMER and enable it */
- reftimer |= DP_DP_HPD_REFTIMER_ENABLE;
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
-
- /* Enable HPD */
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
-}
-
-void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
-
- reftimer &= ~DP_DP_HPD_REFTIMER_ENABLE;
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
-
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0);
-}
-
-static void msm_dp_catalog_enable_sdp(struct msm_dp_catalog_private *catalog)
-{
- /* trigger sdp */
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, UPDATE_SDP);
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x0);
-}
-
-void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 config;
-
- /* enable PSR1 function */
- config = msm_dp_read_link(catalog, REG_PSR_CONFIG);
- config |= PSR1_SUPPORTED;
- msm_dp_write_link(catalog, REG_PSR_CONFIG, config);
-
- msm_dp_write_ahb(catalog, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4);
- msm_dp_catalog_enable_sdp(catalog);
-}
-
-void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 cmd;
-
- cmd = msm_dp_read_link(catalog, REG_PSR_CMD);
-
- cmd &= ~(PSR_ENTER | PSR_EXIT);
-
- if (enter)
- cmd |= PSR_ENTER;
- else
- cmd |= PSR_EXIT;
-
- msm_dp_catalog_enable_sdp(catalog);
- msm_dp_write_link(catalog, REG_PSR_CMD, cmd);
-}
-
-u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 status;
-
- status = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
- drm_dbg_dp(catalog->drm_dev, "aux status: %#x\n", status);
- status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
- status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
-
- return status;
-}
-
-u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- int isr, mask;
-
- isr = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
- msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK,
- (isr & DP_DP_HPD_INT_MASK));
- mask = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
-
- /*
- * We only want to return interrupts that are unmasked to the caller.
- * However, the interrupt status field also contains other
- * informational bits about the HPD state status, so we only mask
- * out the part of the register that tells us about which interrupts
- * are pending.
- */
- return isr & (mask | ~DP_DP_HPD_INT_MASK);
-}
-
-u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 intr, intr_ack;
-
- intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS4);
- intr_ack = (intr & DP_INTERRUPT_STATUS4)
- << DP_INTERRUPT_STATUS_ACK_SHIFT;
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS4, intr_ack);
-
- return intr;
-}
-
-int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 intr, intr_ack;
-
- intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS2);
- intr &= ~DP_INTERRUPT_STATUS2_MASK;
- intr_ack = (intr & DP_INTERRUPT_STATUS2)
- << DP_INTERRUPT_STATUS_ACK_SHIFT;
- msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
- intr_ack | DP_INTERRUPT_STATUS2_MASK);
-
- return intr;
-}
-
-void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL,
- DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL);
- usleep_range(1000, 1100); /* h/w recommended delay */
- msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0);
-}
-
-void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog,
- u32 pattern)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 value = 0x0;
-
- /* Make sure to clear the current pattern before starting a new one */
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
-
- drm_dbg_dp(catalog->drm_dev, "pattern: %#x\n", pattern);
- switch (pattern) {
- case DP_PHY_TEST_PATTERN_D10_2:
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_TRAINING_PATTERN1);
- break;
- case DP_PHY_TEST_PATTERN_ERROR_COUNT:
- value &= ~(1 << 16);
- msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
- value);
- value |= SCRAMBLER_RESET_COUNT_VALUE;
- msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
- value);
- msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
- DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
- break;
- case DP_PHY_TEST_PATTERN_PRBS7:
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_PRBS7);
- break;
- case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN);
- /* 00111110000011111000001111100000 */
- msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
- 0x3E0F83E0);
- /* 00001111100000111110000011111000 */
- msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
- 0x0F83E0F8);
- /* 1111100000111110 */
- msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
- 0x0000F83E);
- break;
- case DP_PHY_TEST_PATTERN_CP2520:
- value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
- value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER;
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
-
- value = DP_HBR2_ERM_PATTERN;
- msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
- value);
- value |= SCRAMBLER_RESET_COUNT_VALUE;
- msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
- value);
- msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
- DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
- value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
- value |= DP_MAINLINK_CTRL_ENABLE;
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
- break;
- case DP_PHY_TEST_PATTERN_SEL_MASK:
- msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL,
- DP_MAINLINK_CTRL_ENABLE);
- msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
- DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
- break;
- default:
- drm_dbg_dp(catalog->drm_dev,
- "No valid test pattern requested: %#x\n", pattern);
- break;
- }
-}
-
-u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- return msm_dp_read_link(catalog, REG_DP_MAINLINK_READY);
-}
-
-/* panel related catalog functions */
-int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total,
- u32 sync_start, u32 width_blanking, u32 msm_dp_active)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 reg;
-
- msm_dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, total);
- msm_dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, sync_start);
- msm_dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking);
- msm_dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, msm_dp_active);
-
- reg = msm_dp_read_p0(catalog, MMSS_DP_INTF_CONFIG);
-
- if (msm_dp_catalog->wide_bus_en)
- reg |= DP_INTF_CONFIG_DATABUS_WIDEN;
- else
- reg &= ~DP_INTF_CONFIG_DATABUS_WIDEN;
-
-
- DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", msm_dp_catalog->wide_bus_en, reg);
-
- msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg);
- return 0;
-}
-
-static void msm_dp_catalog_panel_send_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp)
-{
- struct msm_dp_catalog_private *catalog;
- u32 header[2];
- u32 val;
- int i;
-
- catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header);
-
- msm_dp_write_link(catalog, MMSS_DP_GENERIC0_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_GENERIC0_1, header[1]);
-
- for (i = 0; i < sizeof(vsc_sdp->db); i += 4) {
- val = ((vsc_sdp->db[i]) | (vsc_sdp->db[i + 1] << 8) | (vsc_sdp->db[i + 2] << 16) |
- (vsc_sdp->db[i + 3] << 24));
- msm_dp_write_link(catalog, MMSS_DP_GENERIC0_2 + i, val);
- }
-}
-
-static void msm_dp_catalog_panel_update_sdp(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog;
- u32 hw_revision;
-
- catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
-
- hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog);
- if (hw_revision < DP_HW_VERSION_1_2 && hw_revision >= DP_HW_VERSION_1_0) {
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x01);
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x00);
- }
-}
-
-void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp)
-{
- struct msm_dp_catalog_private *catalog;
- u32 cfg, cfg2, misc;
-
- catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
-
- cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG);
- cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2);
- misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0);
-
- cfg |= GEN0_SDP_EN;
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
-
- cfg2 |= GENERIC0_SDPSIZE_VALID;
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
-
- msm_dp_catalog_panel_send_vsc_sdp(msm_dp_catalog, vsc_sdp);
-
- /* indicates presence of VSC (BIT(6) of MISC1) */
- misc |= DP_MISC1_VSC_SDP;
-
- drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=1\n");
-
- pr_debug("misc settings = 0x%x\n", misc);
- msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
-
- msm_dp_catalog_panel_update_sdp(msm_dp_catalog);
-}
-
-void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog;
- u32 cfg, cfg2, misc;
-
- catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
-
- cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG);
- cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2);
- misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0);
-
- cfg &= ~GEN0_SDP_EN;
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
-
- cfg2 &= ~GENERIC0_SDPSIZE_VALID;
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
-
- /* switch back to MSA */
- misc &= ~DP_MISC1_VSC_SDP;
-
- drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=0\n");
-
- pr_debug("misc settings = 0x%x\n", misc);
- msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
-
- msm_dp_catalog_panel_update_sdp(msm_dp_catalog);
-}
-
-void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
- struct drm_display_mode *drm_mode)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 hsync_period, vsync_period;
- u32 display_v_start, display_v_end;
- u32 hsync_start_x, hsync_end_x;
- u32 v_sync_width;
- u32 hsync_ctl;
- u32 display_hctl;
-
- /* TPG config parameters*/
- hsync_period = drm_mode->htotal;
- vsync_period = drm_mode->vtotal;
-
- display_v_start = ((drm_mode->vtotal - drm_mode->vsync_start) *
- hsync_period);
- display_v_end = ((vsync_period - (drm_mode->vsync_start -
- drm_mode->vdisplay))
- * hsync_period) - 1;
-
- display_v_start += drm_mode->htotal - drm_mode->hsync_start;
- display_v_end -= (drm_mode->hsync_start - drm_mode->hdisplay);
-
- hsync_start_x = drm_mode->htotal - drm_mode->hsync_start;
- hsync_end_x = hsync_period - (drm_mode->hsync_start -
- drm_mode->hdisplay) - 1;
-
- v_sync_width = drm_mode->vsync_end - drm_mode->vsync_start;
-
- hsync_ctl = (hsync_period << 16) |
- (drm_mode->hsync_end - drm_mode->hsync_start);
- display_hctl = (hsync_end_x << 16) | hsync_start_x;
-
-
- msm_dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
- hsync_period);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width *
- hsync_period);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0);
- msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end);
- msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
- msm_dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0);
-
- msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL,
- DP_TPG_CHECKERED_RECT_PATTERN);
- msm_dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG,
- DP_TPG_VIDEO_CONFIG_BPP_8BIT |
- DP_TPG_VIDEO_CONFIG_RGB);
- msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE,
- DP_BIST_ENABLE_DPBIST_EN);
- msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN,
- DP_TIMING_ENGINE_EN_EN);
- drm_dbg_dp(catalog->drm_dev, "%s: enabled tpg\n", __func__);
-}
-
-void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
- msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0);
- msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0);
-}
-
-static void __iomem *msm_dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
-{
- struct resource *res;
- void __iomem *base;
-
- base = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
- if (!IS_ERR(base))
- *len = resource_size(res);
-
- return base;
-}
-
-static int msm_dp_catalog_get_io(struct msm_dp_catalog_private *catalog)
-{
- struct platform_device *pdev = to_platform_device(catalog->dev);
- struct dss_io_data *dss = &catalog->io;
-
- dss->ahb.base = msm_dp_ioremap(pdev, 0, &dss->ahb.len);
- if (IS_ERR(dss->ahb.base))
- return PTR_ERR(dss->ahb.base);
-
- dss->aux.base = msm_dp_ioremap(pdev, 1, &dss->aux.len);
- if (IS_ERR(dss->aux.base)) {
- /*
- * The initial binding had a single reg, but in order to
- * support variation in the sub-region sizes this was split.
- * msm_dp_ioremap() will fail with -EINVAL here if only a single
- * reg is specified, so fill in the sub-region offsets and
- * lengths based on this single region.
- */
- if (PTR_ERR(dss->aux.base) == -EINVAL) {
- if (dss->ahb.len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) {
- DRM_ERROR("legacy memory region not large enough\n");
- return -EINVAL;
- }
-
- dss->ahb.len = DP_DEFAULT_AHB_SIZE;
- dss->aux.base = dss->ahb.base + DP_DEFAULT_AUX_OFFSET;
- dss->aux.len = DP_DEFAULT_AUX_SIZE;
- dss->link.base = dss->ahb.base + DP_DEFAULT_LINK_OFFSET;
- dss->link.len = DP_DEFAULT_LINK_SIZE;
- dss->p0.base = dss->ahb.base + DP_DEFAULT_P0_OFFSET;
- dss->p0.len = DP_DEFAULT_P0_SIZE;
- } else {
- DRM_ERROR("unable to remap aux region: %pe\n", dss->aux.base);
- return PTR_ERR(dss->aux.base);
- }
- } else {
- dss->link.base = msm_dp_ioremap(pdev, 2, &dss->link.len);
- if (IS_ERR(dss->link.base)) {
- DRM_ERROR("unable to remap link region: %pe\n", dss->link.base);
- return PTR_ERR(dss->link.base);
- }
-
- dss->p0.base = msm_dp_ioremap(pdev, 3, &dss->p0.len);
- if (IS_ERR(dss->p0.base)) {
- DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base);
- return PTR_ERR(dss->p0.base);
- }
- }
-
- return 0;
-}
-
-struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev)
-{
- struct msm_dp_catalog_private *catalog;
- int ret;
-
- catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL);
- if (!catalog)
- return ERR_PTR(-ENOMEM);
-
- catalog->dev = dev;
-
- ret = msm_dp_catalog_get_io(catalog);
- if (ret)
- return ERR_PTR(ret);
-
- return &catalog->msm_dp_catalog;
-}
-
-void msm_dp_catalog_write_audio_stream(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 header[2];
-
- msm_dp_utils_pack_sdp_header(sdp_hdr, header);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_STREAM_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_STREAM_1, header[1]);
-}
-
-void msm_dp_catalog_write_audio_timestamp(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 header[2];
-
- msm_dp_utils_pack_sdp_header(sdp_hdr, header);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_TIMESTAMP_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_TIMESTAMP_1, header[1]);
-}
-
-void msm_dp_catalog_write_audio_infoframe(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 header[2];
-
- msm_dp_utils_pack_sdp_header(sdp_hdr, header);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_INFOFRAME_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_INFOFRAME_1, header[1]);
-}
-
-void msm_dp_catalog_write_audio_copy_mgmt(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- u32 header[2];
-
- msm_dp_utils_pack_sdp_header(sdp_hdr, header);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_COPYMANAGEMENT_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_COPYMANAGEMENT_1, header[1]);
-}
-
-void msm_dp_catalog_write_audio_isrc(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- struct dp_sdp_header tmp = *sdp_hdr;
- u32 header[2];
- u32 reg;
-
- /* XXX: is it necessary to preserve this field? */
- reg = msm_dp_read_link(catalog, MMSS_DP_AUDIO_ISRC_1);
- tmp.HB3 = FIELD_GET(HEADER_3_MASK, reg);
-
- msm_dp_utils_pack_sdp_header(&tmp, header);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_ISRC_0, header[0]);
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_ISRC_1, header[1]);
-}
-
-void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *msm_dp_catalog, u32 select)
-{
- struct msm_dp_catalog_private *catalog;
- u32 acr_ctrl;
-
- if (!msm_dp_catalog)
- return;
-
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
-
- drm_dbg_dp(catalog->drm_dev, "select: %#x, acr_ctrl: %#x\n",
- select, acr_ctrl);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
-}
-
-void msm_dp_catalog_audio_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable)
-{
- struct msm_dp_catalog_private *catalog;
- u32 audio_ctrl;
-
- if (!msm_dp_catalog)
- return;
-
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- audio_ctrl = msm_dp_read_link(catalog, MMSS_DP_AUDIO_CFG);
-
- if (enable)
- audio_ctrl |= BIT(0);
- else
- audio_ctrl &= ~BIT(0);
-
- drm_dbg_dp(catalog->drm_dev, "dp_audio_cfg = 0x%x\n", audio_ctrl);
-
- msm_dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl);
- /* make sure audio engine is disabled */
- wmb();
-}
-
-void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog;
- u32 sdp_cfg = 0;
- u32 sdp_cfg2 = 0;
-
- if (!msm_dp_catalog)
- return;
-
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- sdp_cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG);
- /* AUDIO_TIMESTAMP_SDP_EN */
- sdp_cfg |= BIT(1);
- /* AUDIO_STREAM_SDP_EN */
- sdp_cfg |= BIT(2);
- /* AUDIO_COPY_MANAGEMENT_SDP_EN */
- sdp_cfg |= BIT(5);
- /* AUDIO_ISRC_SDP_EN */
- sdp_cfg |= BIT(6);
- /* AUDIO_INFOFRAME_SDP_EN */
- sdp_cfg |= BIT(20);
-
- drm_dbg_dp(catalog->drm_dev, "sdp_cfg = 0x%x\n", sdp_cfg);
-
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg);
-
- sdp_cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2);
- /* IFRM_REGSRC -> Do not use reg values */
- sdp_cfg2 &= ~BIT(0);
- /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
- sdp_cfg2 &= ~BIT(1);
-
- drm_dbg_dp(catalog->drm_dev, "sdp_cfg2 = 0x%x\n", sdp_cfg2);
-
- msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
-}
-
-void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *msm_dp_catalog, u32 safe_to_exit_level)
-{
- struct msm_dp_catalog_private *catalog;
- u32 mainlink_levels;
-
- if (!msm_dp_catalog)
- return;
-
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- mainlink_levels = msm_dp_read_link(catalog, REG_DP_MAINLINK_LEVELS);
- mainlink_levels &= 0xFE0;
- mainlink_levels |= safe_to_exit_level;
-
- drm_dbg_dp(catalog->drm_dev,
- "mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
- mainlink_levels, safe_to_exit_level);
-
- msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
deleted file mode 100644
index 6678b0ac9a67..000000000000
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_CATALOG_H_
-#define _DP_CATALOG_H_
-
-#include <drm/drm_modes.h>
-
-#include "dp_utils.h"
-#include "disp/msm_disp_snapshot.h"
-
-/* interrupts */
-#define DP_INTR_HPD BIT(0)
-#define DP_INTR_AUX_XFER_DONE BIT(3)
-#define DP_INTR_WRONG_ADDR BIT(6)
-#define DP_INTR_TIMEOUT BIT(9)
-#define DP_INTR_NACK_DEFER BIT(12)
-#define DP_INTR_WRONG_DATA_CNT BIT(15)
-#define DP_INTR_I2C_NACK BIT(18)
-#define DP_INTR_I2C_DEFER BIT(21)
-#define DP_INTR_PLL_UNLOCKED BIT(24)
-#define DP_INTR_AUX_ERROR BIT(27)
-
-#define DP_INTR_READY_FOR_VIDEO BIT(0)
-#define DP_INTR_IDLE_PATTERN_SENT BIT(3)
-#define DP_INTR_FRAME_END BIT(6)
-#define DP_INTR_CRC_UPDATED BIT(9)
-
-#define DP_HW_VERSION_1_0 0x10000000
-#define DP_HW_VERSION_1_2 0x10020000
-
-struct msm_dp_catalog {
- bool wide_bus_en;
-};
-
-/* Debug module */
-void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state);
-
-/* AUX APIs */
-u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog);
-int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data);
-int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data);
-int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read);
-int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable);
-int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog,
- unsigned long wait_us);
-u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog);
-
-/* DP Controller APIs */
-void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state);
-void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 config);
-void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog, bool enable);
-void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable);
-void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog, u32 cc, u32 tb);
-void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog, u32 rate,
- u32 stream_rate_khz, bool is_ycbcr_420);
-int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog, u32 pattern);
-u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog);
-bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog, bool enable);
-void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog,
- u32 intr_mask, bool en);
-void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter);
-u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog);
-u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog);
-int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog);
-u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog,
- u32 msm_dp_tu, u32 valid_boundary,
- u32 valid_boundary2);
-void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog,
- u32 pattern);
-u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog);
-
-/* DP Panel APIs */
-int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total,
- u32 sync_start, u32 width_blanking, u32 msm_dp_active);
-void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp);
-void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
- struct drm_display_mode *drm_mode);
-void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog);
-
-struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev);
-
-/* DP Audio APIs */
-void msm_dp_catalog_write_audio_stream(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr);
-void msm_dp_catalog_write_audio_timestamp(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr);
-void msm_dp_catalog_write_audio_infoframe(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr);
-void msm_dp_catalog_write_audio_copy_mgmt(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr);
-void msm_dp_catalog_write_audio_isrc(struct msm_dp_catalog *msm_dp_catalog,
- struct dp_sdp_header *sdp_hdr);
-void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *catalog, u32 select);
-void msm_dp_catalog_audio_enable(struct msm_dp_catalog *catalog, bool enable);
-void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *catalog);
-void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *catalog, u32 safe_to_exit_level);
-
-#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index a50bfafbb4ea..c42fd2c17a32 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -6,14 +6,18 @@
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/types.h>
+#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/iopoll.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-dp.h>
#include <linux/pm_opp.h>
+#include <linux/rational.h>
#include <linux/string_choices.h>
#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_device.h>
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
@@ -21,11 +25,46 @@
#include "dp_ctrl.h"
#include "dp_link.h"
+#define POLLING_SLEEP_US 1000
+#define POLLING_TIMEOUT_US 10000
+
#define DP_KHZ_TO_HZ 1000
#define IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES (30 * HZ / 1000) /* 30 ms */
#define PSR_OPERATION_COMPLETION_TIMEOUT_JIFFIES (300 * HZ / 1000) /* 300 ms */
#define WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES (HZ / 2)
+#define DP_INTERRUPT_STATUS_ACK_SHIFT 1
+#define DP_INTERRUPT_STATUS_MASK_SHIFT 2
+
+#define DP_INTERRUPT_STATUS1 \
+ (DP_INTR_AUX_XFER_DONE| \
+ DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
+ DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
+ DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
+ DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR)
+
+#define DP_INTERRUPT_STATUS1_ACK \
+ (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_ACK_SHIFT)
+#define DP_INTERRUPT_STATUS1_MASK \
+ (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_MASK_SHIFT)
+
+#define DP_INTERRUPT_STATUS2 \
+ (DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \
+ DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED)
+
+#define DP_INTERRUPT_STATUS2_ACK \
+ (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_ACK_SHIFT)
+#define DP_INTERRUPT_STATUS2_MASK \
+ (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_MASK_SHIFT)
+
+#define DP_INTERRUPT_STATUS4 \
+ (PSR_UPDATE_INT | PSR_CAPTURE_INT | PSR_EXIT_INT | \
+ PSR_UPDATE_ERROR_INT | PSR_WAKE_ERROR_INT)
+
+#define DP_INTERRUPT_MASK4 \
+ (PSR_UPDATE_MASK | PSR_CAPTURE_MASK | PSR_EXIT_MASK | \
+ PSR_UPDATE_ERROR_MASK | PSR_WAKE_ERROR_MASK)
+
#define DP_CTRL_INTR_READY_FOR_VIDEO BIT(0)
#define DP_CTRL_INTR_IDLE_PATTERN_SENT BIT(3)
@@ -77,7 +116,8 @@ struct msm_dp_ctrl_private {
struct drm_dp_aux *aux;
struct msm_dp_panel *panel;
struct msm_dp_link *link;
- struct msm_dp_catalog *catalog;
+ void __iomem *ahb_base;
+ void __iomem *link_base;
struct phy *phy;
@@ -95,11 +135,43 @@ struct msm_dp_ctrl_private {
struct completion psr_op_comp;
struct completion video_comp;
+ u32 hw_revision;
+
bool core_clks_on;
bool link_clks_on;
bool stream_clks_on;
};
+static inline u32 msm_dp_read_ahb(const struct msm_dp_ctrl_private *ctrl, u32 offset)
+{
+ return readl_relaxed(ctrl->ahb_base + offset);
+}
+
+static inline void msm_dp_write_ahb(struct msm_dp_ctrl_private *ctrl,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure phy reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, ctrl->ahb_base + offset);
+}
+
+static inline u32 msm_dp_read_link(struct msm_dp_ctrl_private *ctrl, u32 offset)
+{
+ return readl_relaxed(ctrl->link_base + offset);
+}
+
+static inline void msm_dp_write_link(struct msm_dp_ctrl_private *ctrl,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure link reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, ctrl->link_base + offset);
+}
+
static int msm_dp_aux_link_configure(struct drm_dp_aux *aux,
struct msm_dp_link_info *link)
{
@@ -119,6 +191,179 @@ static int msm_dp_aux_link_configure(struct drm_dp_aux *aux,
return 0;
}
+/*
+ * NOTE: resetting DP controller will also clear any pending HPD related interrupts
+ */
+void msm_dp_ctrl_reset(struct msm_dp_ctrl *msm_dp_ctrl)
+{
+ struct msm_dp_ctrl_private *ctrl =
+ container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
+ u32 sw_reset;
+
+ sw_reset = msm_dp_read_ahb(ctrl, REG_DP_SW_RESET);
+
+ sw_reset |= DP_SW_RESET;
+ msm_dp_write_ahb(ctrl, REG_DP_SW_RESET, sw_reset);
+ usleep_range(1000, 1100); /* h/w recommended delay */
+
+ sw_reset &= ~DP_SW_RESET;
+ msm_dp_write_ahb(ctrl, REG_DP_SW_RESET, sw_reset);
+
+ if (!ctrl->hw_revision) {
+ ctrl->hw_revision = msm_dp_read_ahb(ctrl, REG_DP_HW_VERSION);
+ ctrl->panel->hw_revision = ctrl->hw_revision;
+ }
+}
+
+static u32 msm_dp_ctrl_get_aux_interrupt(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 intr, intr_ack;
+
+ intr = msm_dp_read_ahb(ctrl, REG_DP_INTR_STATUS);
+ intr &= ~DP_INTERRUPT_STATUS1_MASK;
+ intr_ack = (intr & DP_INTERRUPT_STATUS1)
+ << DP_INTERRUPT_STATUS_ACK_SHIFT;
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS,
+ intr_ack | DP_INTERRUPT_STATUS1_MASK);
+
+ return intr;
+
+}
+
+static u32 msm_dp_ctrl_get_interrupt(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 intr, intr_ack;
+
+ intr = msm_dp_read_ahb(ctrl, REG_DP_INTR_STATUS2);
+ intr &= ~DP_INTERRUPT_STATUS2_MASK;
+ intr_ack = (intr & DP_INTERRUPT_STATUS2)
+ << DP_INTERRUPT_STATUS_ACK_SHIFT;
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS2,
+ intr_ack | DP_INTERRUPT_STATUS2_MASK);
+
+ return intr;
+}
+
+void msm_dp_ctrl_enable_irq(struct msm_dp_ctrl *msm_dp_ctrl)
+{
+ struct msm_dp_ctrl_private *ctrl =
+ container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
+
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS,
+ DP_INTERRUPT_STATUS1_MASK);
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS2,
+ DP_INTERRUPT_STATUS2_MASK);
+}
+
+void msm_dp_ctrl_disable_irq(struct msm_dp_ctrl *msm_dp_ctrl)
+{
+ struct msm_dp_ctrl_private *ctrl =
+ container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
+
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS, 0x00);
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS2, 0x00);
+}
+
+static u32 msm_dp_ctrl_get_psr_interrupt(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 intr, intr_ack;
+
+ intr = msm_dp_read_ahb(ctrl, REG_DP_INTR_STATUS4);
+ intr_ack = (intr & DP_INTERRUPT_STATUS4)
+ << DP_INTERRUPT_STATUS_ACK_SHIFT;
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_STATUS4, intr_ack);
+
+ return intr;
+}
+
+static void msm_dp_ctrl_config_psr_interrupt(struct msm_dp_ctrl_private *ctrl)
+{
+ msm_dp_write_ahb(ctrl, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4);
+}
+
+static void msm_dp_ctrl_psr_mainlink_enable(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 val;
+
+ val = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+ val |= DP_MAINLINK_CTRL_ENABLE;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, val);
+}
+
+static void msm_dp_ctrl_psr_mainlink_disable(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 val;
+
+ val = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+ val &= ~DP_MAINLINK_CTRL_ENABLE;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, val);
+}
+
+static void msm_dp_ctrl_mainlink_enable(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 mainlink_ctrl;
+
+ drm_dbg_dp(ctrl->drm_dev, "enable\n");
+
+ mainlink_ctrl = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+
+ mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET |
+ DP_MAINLINK_CTRL_ENABLE);
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+ mainlink_ctrl |= DP_MAINLINK_CTRL_RESET;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+ mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+ mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE |
+ DP_MAINLINK_FB_BOUNDARY_SEL);
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+}
+
+static void msm_dp_ctrl_mainlink_disable(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 mainlink_ctrl;
+
+ drm_dbg_dp(ctrl->drm_dev, "disable\n");
+
+ mainlink_ctrl = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+ mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+}
+
+static void msm_dp_setup_peripheral_flush(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 mainlink_ctrl;
+
+ mainlink_ctrl = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+
+ if (ctrl->hw_revision >= DP_HW_VERSION_1_2)
+ mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_SDE_PERIPH_UPDATE;
+ else
+ mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_UPDATE_SDP;
+
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+}
+
+static bool msm_dp_ctrl_mainlink_ready(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 data;
+ int ret;
+
+ /* Poll for mainlink ready status */
+ ret = readl_poll_timeout(ctrl->link_base + REG_DP_MAINLINK_READY,
+ data, data & DP_MAINLINK_READY_FOR_VIDEO,
+ POLLING_SLEEP_US, POLLING_TIMEOUT_US);
+ if (ret < 0) {
+ DRM_ERROR("mainlink not ready\n");
+ return false;
+ }
+
+ return true;
+}
+
void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl)
{
struct msm_dp_ctrl_private *ctrl;
@@ -126,7 +371,7 @@ void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
reinit_completion(&ctrl->idle_comp);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, DP_STATE_CTRL_PUSH_IDLE);
if (!wait_for_completion_timeout(&ctrl->idle_comp,
IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES))
@@ -171,23 +416,50 @@ static void msm_dp_ctrl_config_ctrl(struct msm_dp_ctrl_private *ctrl)
if (ctrl->panel->psr_cap.version)
config |= DP_CONFIGURATION_CTRL_SEND_VSC;
- msm_dp_catalog_ctrl_config_ctrl(ctrl->catalog, config);
+ drm_dbg_dp(ctrl->drm_dev, "DP_CONFIGURATION_CTRL=0x%x\n", config);
+
+ msm_dp_write_link(ctrl, REG_DP_CONFIGURATION_CTRL, config);
+}
+
+static void msm_dp_ctrl_lane_mapping(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */
+ u32 ln_mapping;
+
+ ln_mapping = ln_0 << LANE0_MAPPING_SHIFT;
+ ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT;
+ ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT;
+ ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT;
+
+ msm_dp_write_link(ctrl, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
+ ln_mapping);
}
static void msm_dp_ctrl_configure_source_params(struct msm_dp_ctrl_private *ctrl)
{
- u32 cc, tb;
+ u32 colorimetry_cfg, test_bits_depth, misc_val;
- msm_dp_catalog_ctrl_lane_mapping(ctrl->catalog);
- msm_dp_catalog_setup_peripheral_flush(ctrl->catalog);
+ msm_dp_ctrl_lane_mapping(ctrl);
+ msm_dp_setup_peripheral_flush(ctrl);
msm_dp_ctrl_config_ctrl(ctrl);
- tb = msm_dp_link_get_test_bits_depth(ctrl->link,
- ctrl->panel->msm_dp_mode.bpp);
- cc = msm_dp_link_get_colorimetry_config(ctrl->link);
- msm_dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb);
- msm_dp_panel_timing_cfg(ctrl->panel);
+ test_bits_depth = msm_dp_link_get_test_bits_depth(ctrl->link, ctrl->panel->msm_dp_mode.bpp);
+ colorimetry_cfg = msm_dp_link_get_colorimetry_config(ctrl->link);
+
+ misc_val = msm_dp_read_link(ctrl, REG_DP_MISC1_MISC0);
+
+ /* clear bpp bits */
+ misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT);
+ misc_val |= colorimetry_cfg << DP_MISC0_COLORIMETRY_CFG_SHIFT;
+ misc_val |= test_bits_depth << DP_MISC0_TEST_BITS_DEPTH_SHIFT;
+ /* Configure clock to synchronous mode */
+ misc_val |= DP_MISC0_SYNCHRONOUS_CLK;
+
+ drm_dbg_dp(ctrl->drm_dev, "misc settings = 0x%x\n", misc_val);
+ msm_dp_write_link(ctrl, REG_DP_MISC1_MISC0, misc_val);
+
+ msm_dp_panel_timing_cfg(ctrl->panel, ctrl->msm_dp_ctrl.wide_bus_en);
}
/*
@@ -1003,8 +1275,9 @@ static void msm_dp_ctrl_setup_tr_unit(struct msm_dp_ctrl_private *ctrl)
pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n",
msm_dp_tu, valid_boundary, valid_boundary2);
- msm_dp_catalog_ctrl_update_transfer_unit(ctrl->catalog,
- msm_dp_tu, valid_boundary, valid_boundary2);
+ msm_dp_write_link(ctrl, REG_DP_VALID_BOUNDARY, valid_boundary);
+ msm_dp_write_link(ctrl, REG_DP_TU, msm_dp_tu);
+ msm_dp_write_link(ctrl, REG_DP_VALID_BOUNDARY_2, valid_boundary2);
}
static int msm_dp_ctrl_wait4video_ready(struct msm_dp_ctrl_private *ctrl)
@@ -1113,6 +1386,30 @@ static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl,
return ret == 1;
}
+static int msm_dp_ctrl_set_pattern_state_bit(struct msm_dp_ctrl_private *ctrl,
+ u32 state_bit)
+{
+ int bit, ret;
+ u32 data;
+
+ bit = BIT(state_bit - 1);
+ drm_dbg_dp(ctrl->drm_dev, "hw: bit=%d train=%d\n", bit, state_bit);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, bit);
+
+ bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
+
+ /* Poll for mainlink ready status */
+ ret = readx_poll_timeout(readl, ctrl->link_base + REG_DP_MAINLINK_READY,
+ data, data & bit,
+ POLLING_SLEEP_US, POLLING_TIMEOUT_US);
+ if (ret < 0) {
+ DRM_ERROR("set state_bit for link_train=%d failed\n", state_bit);
+ return ret;
+ }
+
+ return 0;
+}
+
static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
int *training_step, enum drm_dp_phy dp_phy)
{
@@ -1124,11 +1421,11 @@ static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
delay_us = drm_dp_read_clock_recovery_delay(ctrl->aux,
ctrl->panel->dpcd, dp_phy, false);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0);
*training_step = DP_TRAINING_1;
- ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1);
+ ret = msm_dp_ctrl_set_pattern_state_bit(ctrl, 1);
if (ret)
return ret;
msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
@@ -1242,7 +1539,7 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
delay_us = drm_dp_read_channel_eq_delay(ctrl->aux,
ctrl->panel->dpcd, dp_phy, false);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0);
*training_step = DP_TRAINING_2;
@@ -1257,7 +1554,7 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
state_ctrl_bit = 2;
}
- ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit);
+ ret = msm_dp_ctrl_set_pattern_state_bit(ctrl, state_ctrl_bit);
if (ret)
return ret;
@@ -1359,7 +1656,7 @@ static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
}
end:
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0);
return ret;
}
@@ -1369,7 +1666,7 @@ static int msm_dp_ctrl_setup_main_link(struct msm_dp_ctrl_private *ctrl,
{
int ret = 0;
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+ msm_dp_ctrl_mainlink_enable(ctrl);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
return ret;
@@ -1502,33 +1799,55 @@ static int msm_dp_ctrl_enable_mainlink_clocks(struct msm_dp_ctrl_private *ctrl)
return ret;
}
-void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable)
+static void msm_dp_ctrl_enable_sdp(struct msm_dp_ctrl_private *ctrl)
{
- struct msm_dp_ctrl_private *ctrl;
+ /* trigger sdp */
+ msm_dp_write_link(ctrl, MMSS_DP_SDP_CFG3, UPDATE_SDP);
+ msm_dp_write_link(ctrl, MMSS_DP_SDP_CFG3, 0x0);
+}
- ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
+static void msm_dp_ctrl_psr_enter(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 cmd;
- msm_dp_catalog_ctrl_reset(ctrl->catalog);
+ cmd = msm_dp_read_link(ctrl, REG_PSR_CMD);
- /*
- * all dp controller programmable registers will not
- * be reset to default value after DP_SW_RESET
- * therefore interrupt mask bits have to be updated
- * to enable/disable interrupts
- */
- msm_dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
+ cmd &= ~(PSR_ENTER | PSR_EXIT);
+ cmd |= PSR_ENTER;
+
+ msm_dp_ctrl_enable_sdp(ctrl);
+ msm_dp_write_link(ctrl, REG_PSR_CMD, cmd);
+}
+
+static void msm_dp_ctrl_psr_exit(struct msm_dp_ctrl_private *ctrl)
+{
+ u32 cmd;
+
+ cmd = msm_dp_read_link(ctrl, REG_PSR_CMD);
+
+ cmd &= ~(PSR_ENTER | PSR_EXIT);
+ cmd |= PSR_EXIT;
+
+ msm_dp_ctrl_enable_sdp(ctrl);
+ msm_dp_write_link(ctrl, REG_PSR_CMD, cmd);
}
void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl)
{
- u8 cfg;
struct msm_dp_ctrl_private *ctrl = container_of(msm_dp_ctrl,
struct msm_dp_ctrl_private, msm_dp_ctrl);
+ u32 cfg;
if (!ctrl->panel->psr_cap.version)
return;
- msm_dp_catalog_ctrl_config_psr(ctrl->catalog);
+ /* enable PSR1 function */
+ cfg = msm_dp_read_link(ctrl, REG_PSR_CONFIG);
+ cfg |= PSR1_SUPPORTED;
+ msm_dp_write_link(ctrl, REG_PSR_CONFIG, cfg);
+
+ msm_dp_ctrl_config_psr_interrupt(ctrl);
+ msm_dp_ctrl_enable_sdp(ctrl);
cfg = DP_PSR_ENABLE;
drm_dp_dpcd_write(ctrl->aux, DP_PSR_EN_CFG, &cfg, 1);
@@ -1554,29 +1873,37 @@ void msm_dp_ctrl_set_psr(struct msm_dp_ctrl *msm_dp_ctrl, bool enter)
*/
if (enter) {
reinit_completion(&ctrl->psr_op_comp);
- msm_dp_catalog_ctrl_set_psr(ctrl->catalog, true);
+ msm_dp_ctrl_psr_enter(ctrl);
if (!wait_for_completion_timeout(&ctrl->psr_op_comp,
PSR_OPERATION_COMPLETION_TIMEOUT_JIFFIES)) {
DRM_ERROR("PSR_ENTRY timedout\n");
- msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false);
+ msm_dp_ctrl_psr_exit(ctrl);
return;
}
msm_dp_ctrl_push_idle(msm_dp_ctrl);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0);
- msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, false);
+ msm_dp_ctrl_psr_mainlink_disable(ctrl);
} else {
- msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, true);
+ msm_dp_ctrl_psr_mainlink_enable(ctrl);
- msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+ msm_dp_ctrl_psr_exit(ctrl);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, DP_STATE_CTRL_SEND_VIDEO);
msm_dp_ctrl_wait4video_ready(ctrl);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0);
}
}
+static void msm_dp_ctrl_phy_reset(struct msm_dp_ctrl_private *ctrl)
+{
+ msm_dp_write_ahb(ctrl, REG_DP_PHY_CTRL,
+ DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL);
+ usleep_range(1000, 1100); /* h/w recommended delay */
+ msm_dp_write_ahb(ctrl, REG_DP_PHY_CTRL, 0x0);
+}
+
void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl)
{
struct msm_dp_ctrl_private *ctrl;
@@ -1585,7 +1912,7 @@ void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- msm_dp_catalog_ctrl_phy_reset(ctrl->catalog);
+ msm_dp_ctrl_phy_reset(ctrl);
phy_init(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
@@ -1600,7 +1927,7 @@ void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- msm_dp_catalog_ctrl_phy_reset(ctrl->catalog);
+ msm_dp_ctrl_phy_reset(ctrl);
phy_exit(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
@@ -1611,7 +1938,7 @@ static int msm_dp_ctrl_reinitialize_mainlink(struct msm_dp_ctrl_private *ctrl)
struct phy *phy = ctrl->phy;
int ret = 0;
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_ctrl_mainlink_disable(ctrl);
ctrl->phy_opts.dp.lanes = ctrl->link->link_params.num_lanes;
phy_configure(phy, &ctrl->phy_opts);
/*
@@ -1642,9 +1969,9 @@ static int msm_dp_ctrl_deinitialize_mainlink(struct msm_dp_ctrl_private *ctrl)
phy = ctrl->phy;
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_ctrl_mainlink_disable(ctrl);
- msm_dp_catalog_ctrl_reset(ctrl->catalog);
+ msm_dp_ctrl_reset(&ctrl->msm_dp_ctrl);
dev_pm_opp_set_rate(ctrl->dev, 0);
msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
@@ -1676,13 +2003,96 @@ static int msm_dp_ctrl_link_maintenance(struct msm_dp_ctrl_private *ctrl)
msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, DP_STATE_CTRL_SEND_VIDEO);
ret = msm_dp_ctrl_wait4video_ready(ctrl);
end:
return ret;
}
+#define SCRAMBLER_RESET_COUNT_VALUE 0xFC
+
+static void msm_dp_ctrl_send_phy_pattern(struct msm_dp_ctrl_private *ctrl,
+ u32 pattern)
+{
+ u32 value = 0x0;
+
+ /* Make sure to clear the current pattern before starting a new one */
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, 0x0);
+
+ drm_dbg_dp(ctrl->drm_dev, "pattern: %#x\n", pattern);
+ switch (pattern) {
+ case DP_PHY_TEST_PATTERN_D10_2:
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_TRAINING_PATTERN1);
+ break;
+
+ case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+ value &= ~(1 << 16);
+ msm_dp_write_link(ctrl, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ value |= SCRAMBLER_RESET_COUNT_VALUE;
+ msm_dp_write_link(ctrl, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_LEVELS,
+ DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
+ break;
+
+ case DP_PHY_TEST_PATTERN_PRBS7:
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_PRBS7);
+ break;
+
+ case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN);
+ /* 00111110000011111000001111100000 */
+ msm_dp_write_link(ctrl, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
+ 0x3E0F83E0);
+ /* 00001111100000111110000011111000 */
+ msm_dp_write_link(ctrl, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
+ 0x0F83E0F8);
+ /* 1111100000111110 */
+ msm_dp_write_link(ctrl, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
+ 0x0000F83E);
+ break;
+
+ case DP_PHY_TEST_PATTERN_CP2520:
+ value = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+ value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, value);
+
+ value = DP_HBR2_ERM_PATTERN;
+ msm_dp_write_link(ctrl, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ value |= SCRAMBLER_RESET_COUNT_VALUE;
+ msm_dp_write_link(ctrl, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_LEVELS,
+ DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
+ value = msm_dp_read_link(ctrl, REG_DP_MAINLINK_CTRL);
+ value |= DP_MAINLINK_CTRL_ENABLE;
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL, value);
+ break;
+
+ case DP_PHY_TEST_PATTERN_SEL_MASK:
+ msm_dp_write_link(ctrl, REG_DP_MAINLINK_CTRL,
+ DP_MAINLINK_CTRL_ENABLE);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
+ break;
+
+ default:
+ drm_dbg_dp(ctrl->drm_dev,
+ "No valid test pattern requested: %#x\n", pattern);
+ break;
+ }
+}
+
static bool msm_dp_ctrl_send_phy_test_pattern(struct msm_dp_ctrl_private *ctrl)
{
bool success = false;
@@ -1697,11 +2107,11 @@ static bool msm_dp_ctrl_send_phy_test_pattern(struct msm_dp_ctrl_private *ctrl)
DRM_ERROR("Failed to set v/p levels\n");
return false;
}
- msm_dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested);
+ msm_dp_ctrl_send_phy_pattern(ctrl, pattern_requested);
msm_dp_ctrl_update_phy_vx_px(ctrl, DP_PHY_DPRX);
msm_dp_link_send_test_response(ctrl->link);
- pattern_sent = msm_dp_catalog_ctrl_read_phy_pattern(ctrl->catalog);
+ pattern_sent = msm_dp_read_link(ctrl, REG_DP_MAINLINK_READY);
switch (pattern_sent) {
case MR_LINK_TRAINING1:
@@ -1898,7 +2308,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
break;
} else if (training_step == DP_TRAINING_1) {
/* link train_1 failed */
- if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
+ if (!msm_dp_aux_is_link_connected(ctrl->aux))
break;
drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
@@ -1923,7 +2333,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
}
} else if (training_step == DP_TRAINING_2) {
/* link train_2 failed */
- if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
+ if (!msm_dp_aux_is_link_connected(ctrl->aux))
break;
drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
@@ -1980,6 +2390,62 @@ static int msm_dp_ctrl_link_retrain(struct msm_dp_ctrl_private *ctrl)
return msm_dp_ctrl_setup_main_link(ctrl, &training_step);
}
+static void msm_dp_ctrl_config_msa(struct msm_dp_ctrl_private *ctrl,
+ u32 rate, u32 stream_rate_khz,
+ bool is_ycbcr_420)
+{
+ u32 pixel_m, pixel_n;
+ u32 mvid, nvid, pixel_div = 0, dispcc_input_rate;
+ u32 const nvid_fixed = DP_LINK_CONSTANT_N_VALUE;
+ u32 const link_rate_hbr2 = 540000;
+ u32 const link_rate_hbr3 = 810000;
+ unsigned long den, num;
+
+ if (rate == link_rate_hbr3)
+ pixel_div = 6;
+ else if (rate == 162000 || rate == 270000)
+ pixel_div = 2;
+ else if (rate == link_rate_hbr2)
+ pixel_div = 4;
+ else
+ DRM_ERROR("Invalid pixel mux divider\n");
+
+ dispcc_input_rate = (rate * 10) / pixel_div;
+
+ rational_best_approximation(dispcc_input_rate, stream_rate_khz,
+ (unsigned long)(1 << 16) - 1,
+ (unsigned long)(1 << 16) - 1, &den, &num);
+
+ den = ~(den - num);
+ den = den & 0xFFFF;
+ pixel_m = num;
+ pixel_n = den;
+
+ mvid = (pixel_m & 0xFFFF) * 5;
+ nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
+
+ if (nvid < nvid_fixed) {
+ u32 temp;
+
+ temp = (nvid_fixed / nvid) * nvid;
+ mvid = (nvid_fixed / nvid) * mvid;
+ nvid = temp;
+ }
+
+ if (is_ycbcr_420)
+ mvid /= 2;
+
+ if (link_rate_hbr2 == rate)
+ nvid *= 2;
+
+ if (link_rate_hbr3 == rate)
+ nvid *= 3;
+
+ drm_dbg_dp(ctrl->drm_dev, "mvid=0x%x, nvid=0x%x\n", mvid, nvid);
+ msm_dp_write_link(ctrl, REG_DP_SOFTWARE_MVID, mvid);
+ msm_dp_write_link(ctrl, REG_DP_SOFTWARE_NVID, nvid);
+}
+
int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train)
{
int ret = 0;
@@ -2045,20 +2511,22 @@ int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train
msm_dp_ctrl_configure_source_params(ctrl);
- msm_dp_catalog_ctrl_config_msa(ctrl->catalog,
+ msm_dp_ctrl_config_msa(ctrl,
ctrl->link->link_params.rate,
pixel_rate_orig,
ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420);
+ msm_dp_panel_clear_dsc_dto(ctrl->panel);
+
msm_dp_ctrl_setup_tr_unit(ctrl);
- msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+ msm_dp_write_link(ctrl, REG_DP_STATE_CTRL, DP_STATE_CTRL_SEND_VIDEO);
ret = msm_dp_ctrl_wait4video_ready(ctrl);
if (ret)
return ret;
- mainlink_ready = msm_dp_catalog_ctrl_mainlink_ready(ctrl->catalog);
+ mainlink_ready = msm_dp_ctrl_mainlink_ready(ctrl);
drm_dbg_dp(ctrl->drm_dev,
"mainlink %s\n", mainlink_ready ? "READY" : "NOT READY");
@@ -2074,12 +2542,12 @@ void msm_dp_ctrl_off_link_stream(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
+ msm_dp_panel_disable_vsc_sdp(ctrl->panel);
/* set dongle to D3 (power off) mode */
msm_dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true);
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_ctrl_mainlink_disable(ctrl);
if (ctrl->stream_clks_on) {
clk_disable_unprepare(ctrl->pixel_clk);
@@ -2107,7 +2575,7 @@ void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_ctrl_mainlink_disable(ctrl);
dev_pm_opp_set_rate(ctrl->dev, 0);
msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
@@ -2129,11 +2597,11 @@ void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
+ msm_dp_panel_disable_vsc_sdp(ctrl->panel);
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_ctrl_mainlink_disable(ctrl);
- msm_dp_catalog_ctrl_reset(ctrl->catalog);
+ msm_dp_ctrl_reset(&ctrl->msm_dp_ctrl);
if (ctrl->stream_clks_on) {
clk_disable_unprepare(ctrl->pixel_clk);
@@ -2160,7 +2628,7 @@ irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl)
ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
if (ctrl->panel->psr_cap.version) {
- isr = msm_dp_catalog_ctrl_read_psr_interrupt_status(ctrl->catalog);
+ isr = msm_dp_ctrl_get_psr_interrupt(ctrl);
if (isr)
complete(&ctrl->psr_op_comp);
@@ -2175,8 +2643,7 @@ irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "PSR frame capture done\n");
}
- isr = msm_dp_catalog_ctrl_get_interrupt(ctrl->catalog);
-
+ isr = msm_dp_ctrl_get_interrupt(ctrl);
if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) {
drm_dbg_dp(ctrl->drm_dev, "dp_video_ready\n");
@@ -2190,6 +2657,11 @@ irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl)
ret = IRQ_HANDLED;
}
+ /* DP aux isr */
+ isr = msm_dp_ctrl_get_aux_interrupt(ctrl);
+ if (isr)
+ ret |= msm_dp_aux_isr(ctrl->aux, isr);
+
return ret;
}
@@ -2245,14 +2717,14 @@ static int msm_dp_ctrl_clk_init(struct msm_dp_ctrl *msm_dp_ctrl)
struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link,
struct msm_dp_panel *panel, struct drm_dp_aux *aux,
- struct msm_dp_catalog *catalog,
- struct phy *phy)
+ struct phy *phy,
+ void __iomem *ahb_base,
+ void __iomem *link_base)
{
struct msm_dp_ctrl_private *ctrl;
int ret;
- if (!dev || !panel || !aux ||
- !link || !catalog) {
+ if (!dev || !panel || !aux || !link) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
@@ -2283,9 +2755,10 @@ struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link
ctrl->panel = panel;
ctrl->aux = aux;
ctrl->link = link;
- ctrl->catalog = catalog;
ctrl->dev = dev;
ctrl->phy = phy;
+ ctrl->ahb_base = ahb_base;
+ ctrl->link_base = link_base;
ret = msm_dp_ctrl_clk_init(&ctrl->msm_dp_ctrl);
if (ret) {
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index b7abfedbf574..124b9b21bb7f 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -9,7 +9,6 @@
#include "dp_aux.h"
#include "dp_panel.h"
#include "dp_link.h"
-#include "dp_catalog.h"
struct msm_dp_ctrl {
bool wide_bus_en;
@@ -25,12 +24,15 @@ void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl);
irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_handle_sink_request(struct msm_dp_ctrl *msm_dp_ctrl);
-struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link,
- struct msm_dp_panel *panel, struct drm_dp_aux *aux,
- struct msm_dp_catalog *catalog,
- struct phy *phy);
-
-void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable);
+struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev,
+ struct msm_dp_link *link,
+ struct msm_dp_panel *panel,
+ struct drm_dp_aux *aux,
+ struct phy *phy,
+ void __iomem *ahb_base,
+ void __iomem *link_base);
+
+void msm_dp_ctrl_reset(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_irq_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl);
@@ -41,4 +43,7 @@ void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl);
int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl);
void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_enable_irq(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_disable_irq(struct msm_dp_ctrl *msm_dp_ctrl);
+
#endif /* _DP_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 22fd946ee201..cf3838fcd154 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -5,11 +5,12 @@
#define pr_fmt(fmt)"[drm-dp] %s: " fmt, __func__
+#ifdef CONFIG_DEBUG_FS
+
#include <linux/debugfs.h>
#include <drm/drm_connector.h>
#include <drm/drm_file.h>
-#include "dp_catalog.h"
#include "dp_aux.h"
#include "dp_ctrl.h"
#include "dp_debug.h"
@@ -235,3 +236,5 @@ int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel,
return 0;
}
+
+#endif
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index a48e6db4f156..d87d47cc7ec3 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -19,7 +19,6 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "dp_ctrl.h"
-#include "dp_catalog.h"
#include "dp_aux.h"
#include "dp_reg.h"
#include "dp_link.h"
@@ -87,7 +86,6 @@ struct msm_dp_display_private {
struct drm_device *drm_dev;
- struct msm_dp_catalog *catalog;
struct drm_dp_aux *aux;
struct msm_dp_link *link;
struct msm_dp_panel *panel;
@@ -112,6 +110,18 @@ struct msm_dp_display_private {
bool wide_bus_supported;
struct msm_dp_audio *audio;
+
+ void __iomem *ahb_base;
+ size_t ahb_len;
+
+ void __iomem *aux_base;
+ size_t aux_len;
+
+ void __iomem *link_base;
+ size_t link_len;
+
+ void __iomem *p0_base;
+ size_t p0_len;
};
struct msm_dp_desc {
@@ -282,9 +292,7 @@ static int msm_dp_display_bind(struct device *dev, struct device *master,
struct drm_device *drm = priv->dev;
dp->msm_dp_display.drm_dev = drm;
- priv->dp[dp->id] = &dp->msm_dp_display;
-
-
+ priv->kms->dp[dp->id] = &dp->msm_dp_display;
dp->drm_dev = drm;
dp->aux->drm_dev = drm;
@@ -318,7 +326,7 @@ static void msm_dp_display_unbind(struct device *dev, struct device *master,
msm_dp_aux_unregister(dp->aux);
dp->drm_dev = NULL;
dp->aux->drm_dev = NULL;
- priv->dp[dp->id] = NULL;
+ priv->kms->dp[dp->id] = NULL;
}
static const struct component_ops msm_dp_display_comp_ops = {
@@ -462,7 +470,8 @@ static void msm_dp_display_host_init(struct msm_dp_display_private *dp)
dp->phy_initialized);
msm_dp_ctrl_core_clk_enable(dp->ctrl);
- msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, true);
+ msm_dp_ctrl_reset(dp->ctrl);
+ msm_dp_ctrl_enable_irq(dp->ctrl);
msm_dp_aux_init(dp->aux);
dp->core_initialized = true;
}
@@ -473,7 +482,8 @@ static void msm_dp_display_host_deinit(struct msm_dp_display_private *dp)
dp->msm_dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
- msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, false);
+ msm_dp_ctrl_reset(dp->ctrl);
+ msm_dp_ctrl_disable_irq(dp->ctrl);
msm_dp_aux_deinit(dp->aux);
msm_dp_ctrl_core_clk_disable(dp->ctrl);
dp->core_initialized = false;
@@ -754,21 +764,10 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
dp->msm_dp_display.is_edp ? PHY_SUBMODE_EDP : PHY_SUBMODE_DP);
if (rc) {
DRM_ERROR("failed to set phy submode, rc = %d\n", rc);
- dp->catalog = NULL;
goto error;
}
- dp->catalog = msm_dp_catalog_get(dev);
- if (IS_ERR(dp->catalog)) {
- rc = PTR_ERR(dp->catalog);
- DRM_ERROR("failed to initialize catalog, rc = %d\n", rc);
- dp->catalog = NULL;
- goto error;
- }
-
- dp->aux = msm_dp_aux_get(dev, dp->catalog,
- phy,
- dp->msm_dp_display.is_edp);
+ dp->aux = msm_dp_aux_get(dev, phy, dp->msm_dp_display.is_edp, dp->aux_base);
if (IS_ERR(dp->aux)) {
rc = PTR_ERR(dp->aux);
DRM_ERROR("failed to initialize aux, rc = %d\n", rc);
@@ -784,7 +783,7 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
goto error_link;
}
- dp->panel = msm_dp_panel_get(dev, dp->aux, dp->link, dp->catalog);
+ dp->panel = msm_dp_panel_get(dev, dp->aux, dp->link, dp->link_base, dp->p0_base);
if (IS_ERR(dp->panel)) {
rc = PTR_ERR(dp->panel);
DRM_ERROR("failed to initialize panel, rc = %d\n", rc);
@@ -793,8 +792,7 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
}
dp->ctrl = msm_dp_ctrl_get(dev, dp->link, dp->panel, dp->aux,
- dp->catalog,
- phy);
+ phy, dp->ahb_base, dp->link_base);
if (IS_ERR(dp->ctrl)) {
rc = PTR_ERR(dp->ctrl);
DRM_ERROR("failed to initialize ctrl, rc = %d\n", rc);
@@ -802,7 +800,7 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
goto error_ctrl;
}
- dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->catalog);
+ dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->link_base);
if (IS_ERR(dp->audio)) {
rc = PTR_ERR(dp->audio);
pr_err("failed to initialize audio, rc = %d\n", rc);
@@ -1025,7 +1023,14 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
return;
}
- msm_dp_catalog_snapshot(msm_dp_display->catalog, disp_state);
+ msm_disp_snapshot_add_block(disp_state, msm_dp_display->ahb_len,
+ msm_dp_display->ahb_base, "dp_ahb");
+ msm_disp_snapshot_add_block(disp_state, msm_dp_display->aux_len,
+ msm_dp_display->aux_base, "dp_aux");
+ msm_disp_snapshot_add_block(disp_state, msm_dp_display->link_len,
+ msm_dp_display->link_base, "dp_link");
+ msm_disp_snapshot_add_block(disp_state, msm_dp_display->p0_len,
+ msm_dp_display->p0_base, "dp_p0");
mutex_unlock(&msm_dp_display->event_mutex);
}
@@ -1148,7 +1153,7 @@ static irqreturn_t msm_dp_display_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- hpd_isr_status = msm_dp_catalog_hpd_get_intr_status(dp->catalog);
+ hpd_isr_status = msm_dp_aux_get_hpd_intr_status(dp->aux);
if (hpd_isr_status & 0x0F) {
drm_dbg_dp(dp->drm_dev, "type=%d isr=0x%x\n",
@@ -1175,9 +1180,6 @@ static irqreturn_t msm_dp_display_irq_handler(int irq, void *dev_id)
/* DP controller isr */
ret |= msm_dp_ctrl_isr(dp->ctrl);
- /* DP aux isr */
- ret |= msm_dp_aux_isr(dp->aux);
-
return ret;
}
@@ -1275,6 +1277,80 @@ static int msm_dp_display_get_connector_type(struct platform_device *pdev,
return connector_type;
}
+static void __iomem *msm_dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
+{
+ struct resource *res;
+ void __iomem *base;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
+ if (!IS_ERR(base))
+ *len = resource_size(res);
+
+ return base;
+}
+
+#define DP_DEFAULT_AHB_OFFSET 0x0000
+#define DP_DEFAULT_AHB_SIZE 0x0200
+#define DP_DEFAULT_AUX_OFFSET 0x0200
+#define DP_DEFAULT_AUX_SIZE 0x0200
+#define DP_DEFAULT_LINK_OFFSET 0x0400
+#define DP_DEFAULT_LINK_SIZE 0x0C00
+#define DP_DEFAULT_P0_OFFSET 0x1000
+#define DP_DEFAULT_P0_SIZE 0x0400
+
+static int msm_dp_display_get_io(struct msm_dp_display_private *display)
+{
+ struct platform_device *pdev = display->msm_dp_display.pdev;
+
+ display->ahb_base = msm_dp_ioremap(pdev, 0, &display->ahb_len);
+ if (IS_ERR(display->ahb_base))
+ return PTR_ERR(display->ahb_base);
+
+ display->aux_base = msm_dp_ioremap(pdev, 1, &display->aux_len);
+ if (IS_ERR(display->aux_base)) {
+ if (display->aux_base != ERR_PTR(-EINVAL)) {
+ DRM_ERROR("unable to remap aux region: %pe\n", display->aux_base);
+ return PTR_ERR(display->aux_base);
+ }
+
+ /*
+ * The initial binding had a single reg, but in order to
+ * support variation in the sub-region sizes this was split.
+ * msm_dp_ioremap() will fail with -EINVAL here if only a single
+ * reg is specified, so fill in the sub-region offsets and
+ * lengths based on this single region.
+ */
+ if (display->ahb_len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) {
+ DRM_ERROR("legacy memory region not large enough\n");
+ return -EINVAL;
+ }
+
+ display->ahb_len = DP_DEFAULT_AHB_SIZE;
+ display->aux_base = display->ahb_base + DP_DEFAULT_AUX_OFFSET;
+ display->aux_len = DP_DEFAULT_AUX_SIZE;
+ display->link_base = display->ahb_base + DP_DEFAULT_LINK_OFFSET;
+ display->link_len = DP_DEFAULT_LINK_SIZE;
+ display->p0_base = display->ahb_base + DP_DEFAULT_P0_OFFSET;
+ display->p0_len = DP_DEFAULT_P0_SIZE;
+
+ return 0;
+ }
+
+ display->link_base = msm_dp_ioremap(pdev, 2, &display->link_len);
+ if (IS_ERR(display->link_base)) {
+ DRM_ERROR("unable to remap link region: %pe\n", display->link_base);
+ return PTR_ERR(display->link_base);
+ }
+
+ display->p0_base = msm_dp_ioremap(pdev, 3, &display->p0_len);
+ if (IS_ERR(display->p0_base)) {
+ DRM_ERROR("unable to remap p0 region: %pe\n", display->p0_base);
+ return PTR_ERR(display->p0_base);
+ }
+
+ return 0;
+}
+
static int msm_dp_display_probe(struct platform_device *pdev)
{
int rc = 0;
@@ -1301,6 +1377,10 @@ static int msm_dp_display_probe(struct platform_device *pdev)
dp->msm_dp_display.is_edp =
(dp->msm_dp_display.connector_type == DRM_MODE_CONNECTOR_eDP);
+ rc = msm_dp_display_get_io(dp);
+ if (rc)
+ return rc;
+
rc = msm_dp_init_sub_modules(dp);
if (rc) {
DRM_ERROR("init sub module failed\n");
@@ -1363,7 +1443,7 @@ static int msm_dp_pm_runtime_suspend(struct device *dev)
if (dp->msm_dp_display.is_edp) {
msm_dp_display_host_phy_exit(dp);
- msm_dp_catalog_ctrl_hpd_disable(dp->catalog);
+ msm_dp_aux_hpd_disable(dp->aux);
}
msm_dp_display_host_deinit(dp);
@@ -1384,7 +1464,7 @@ static int msm_dp_pm_runtime_resume(struct device *dev)
*/
msm_dp_display_host_init(dp);
if (dp->msm_dp_display.is_edp) {
- msm_dp_catalog_ctrl_hpd_enable(dp->catalog);
+ msm_dp_aux_hpd_enable(dp->aux);
msm_dp_display_host_phy_init(dp);
}
@@ -1646,8 +1726,6 @@ void msm_dp_bridge_mode_set(struct drm_bridge *drm_bridge,
/* populate wide_bus_support to different layers */
msm_dp_display->ctrl->wide_bus_en =
msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported;
- msm_dp_display->catalog->wide_bus_en =
- msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported;
}
void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge)
@@ -1671,10 +1749,8 @@ void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge)
return;
}
- msm_dp_catalog_ctrl_hpd_enable(dp->catalog);
-
- /* enable HDP interrupts */
- msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true);
+ msm_dp_aux_hpd_enable(dp->aux);
+ msm_dp_aux_hpd_intr_enable(dp->aux);
msm_dp_display->internal_hpd = true;
mutex_unlock(&dp->event_mutex);
@@ -1687,9 +1763,9 @@ void msm_dp_bridge_hpd_disable(struct drm_bridge *bridge)
struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
mutex_lock(&dp->event_mutex);
- /* disable HDP interrupts */
- msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
- msm_dp_catalog_ctrl_hpd_disable(dp->catalog);
+
+ msm_dp_aux_hpd_intr_disable(dp->aux);
+ msm_dp_aux_hpd_disable(dp->aux);
msm_dp_display->internal_hpd = false;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index f222d7ccaa88..9a461ab2f32f 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -20,7 +20,8 @@
* @bridge: Pointer to drm bridge structure
* Returns: Bridge's 'is connected' status
*/
-static enum drm_connector_status msm_dp_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+msm_dp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct msm_dp *dp;
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 92a9077959b3..66e1bbd80db3 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
+#include <drm/drm_device.h>
#include <drm/drm_print.h>
#include "dp_reg.h"
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 4e8ab75c771b..15b7f6c7146e 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -4,6 +4,7 @@
*/
#include "dp_panel.h"
+#include "dp_reg.h"
#include "dp_utils.h"
#include <drm/drm_connector.h>
@@ -11,6 +12,10 @@
#include <drm/drm_of.h>
#include <drm/drm_print.h>
+#include <linux/io.h>
+
+#define DP_INTF_CONFIG_DATABUS_WIDEN BIT(4)
+
#define DP_MAX_NUM_DP_LANES 4
#define DP_LINK_RATE_HBR2 540000 /* kbytes */
@@ -20,10 +25,46 @@ struct msm_dp_panel_private {
struct msm_dp_panel msm_dp_panel;
struct drm_dp_aux *aux;
struct msm_dp_link *link;
- struct msm_dp_catalog *catalog;
+ void __iomem *link_base;
+ void __iomem *p0_base;
bool panel_on;
};
+static inline u32 msm_dp_read_link(struct msm_dp_panel_private *panel, u32 offset)
+{
+ return readl_relaxed(panel->link_base + offset);
+}
+
+static inline void msm_dp_write_link(struct msm_dp_panel_private *panel,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure link reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, panel->link_base + offset);
+}
+
+static inline void msm_dp_write_p0(struct msm_dp_panel_private *panel,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure interface reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, panel->p0_base + offset);
+}
+
+static inline u32 msm_dp_read_p0(struct msm_dp_panel_private *panel,
+ u32 offset)
+{
+ /*
+ * To make sure interface reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ return readl_relaxed(panel->p0_base + offset);
+}
+
static void msm_dp_panel_read_psr_cap(struct msm_dp_panel_private *panel)
{
ssize_t rlen;
@@ -172,7 +213,7 @@ int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel,
if (!msm_dp_panel->drm_edid) {
DRM_ERROR("panel edid read failed\n");
/* check edid read fail is due to unplug */
- if (!msm_dp_catalog_link_is_connected(panel->catalog)) {
+ if (!msm_dp_aux_is_link_connected(panel->aux)) {
rc = -ETIMEDOUT;
goto end;
}
@@ -252,9 +293,85 @@ void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel)
}
}
+static void msm_dp_panel_tpg_enable(struct msm_dp_panel *msm_dp_panel,
+ struct drm_display_mode *drm_mode)
+{
+ struct msm_dp_panel_private *panel =
+ container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+ u32 hsync_period, vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 v_sync_width;
+ u32 hsync_ctl;
+ u32 display_hctl;
+
+ /* TPG config parameters*/
+ hsync_period = drm_mode->htotal;
+ vsync_period = drm_mode->vtotal;
+
+ display_v_start = ((drm_mode->vtotal - drm_mode->vsync_start) *
+ hsync_period);
+ display_v_end = ((vsync_period - (drm_mode->vsync_start -
+ drm_mode->vdisplay))
+ * hsync_period) - 1;
+
+ display_v_start += drm_mode->htotal - drm_mode->hsync_start;
+ display_v_end -= (drm_mode->hsync_start - drm_mode->hdisplay);
+
+ hsync_start_x = drm_mode->htotal - drm_mode->hsync_start;
+ hsync_end_x = hsync_period - (drm_mode->hsync_start -
+ drm_mode->hdisplay) - 1;
+
+ v_sync_width = drm_mode->vsync_end - drm_mode->vsync_start;
+
+ hsync_ctl = (hsync_period << 16) |
+ (drm_mode->hsync_end - drm_mode->hsync_start);
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+
+ msm_dp_write_p0(panel, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
+ hsync_period);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width *
+ hsync_period);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_ACTIVE_HCTL, 0);
+ msm_dp_write_p0(panel, MMSS_INTF_DISPLAY_V_START_F0, display_v_start);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end);
+ msm_dp_write_p0(panel, MMSS_INTF_DISPLAY_V_START_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
+ msm_dp_write_p0(panel, MMSS_DP_INTF_POLARITY_CTL, 0);
+
+ msm_dp_write_p0(panel, MMSS_DP_TPG_MAIN_CONTROL,
+ DP_TPG_CHECKERED_RECT_PATTERN);
+ msm_dp_write_p0(panel, MMSS_DP_TPG_VIDEO_CONFIG,
+ DP_TPG_VIDEO_CONFIG_BPP_8BIT |
+ DP_TPG_VIDEO_CONFIG_RGB);
+ msm_dp_write_p0(panel, MMSS_DP_BIST_ENABLE,
+ DP_BIST_ENABLE_DPBIST_EN);
+ msm_dp_write_p0(panel, MMSS_DP_TIMING_ENGINE_EN,
+ DP_TIMING_ENGINE_EN_EN);
+ drm_dbg_dp(panel->drm_dev, "%s: enabled tpg\n", __func__);
+}
+
+static void msm_dp_panel_tpg_disable(struct msm_dp_panel *msm_dp_panel)
+{
+ struct msm_dp_panel_private *panel =
+ container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+
+ msm_dp_write_p0(panel, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
+ msm_dp_write_p0(panel, MMSS_DP_BIST_ENABLE, 0x0);
+ msm_dp_write_p0(panel, MMSS_DP_TIMING_ENGINE_EN, 0x0);
+}
+
void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable)
{
- struct msm_dp_catalog *catalog;
struct msm_dp_panel_private *panel;
if (!msm_dp_panel) {
@@ -263,7 +380,6 @@ void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable)
}
panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
- catalog = panel->catalog;
if (!panel->panel_on) {
drm_dbg_dp(panel->drm_dev,
@@ -272,18 +388,109 @@ void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable)
}
if (!enable) {
- msm_dp_catalog_panel_tpg_disable(catalog);
+ msm_dp_panel_tpg_disable(msm_dp_panel);
return;
}
- drm_dbg_dp(panel->drm_dev, "calling catalog tpg_enable\n");
- msm_dp_catalog_panel_tpg_enable(catalog, &panel->msm_dp_panel.msm_dp_mode.drm_mode);
+ drm_dbg_dp(panel->drm_dev, "calling panel's tpg_enable\n");
+ msm_dp_panel_tpg_enable(msm_dp_panel, &panel->msm_dp_panel.msm_dp_mode.drm_mode);
+}
+
+void msm_dp_panel_clear_dsc_dto(struct msm_dp_panel *msm_dp_panel)
+{
+ struct msm_dp_panel_private *panel =
+ container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+
+ msm_dp_write_p0(panel, MMSS_DP_DSC_DTO, 0x0);
+}
+
+static void msm_dp_panel_send_vsc_sdp(struct msm_dp_panel_private *panel, struct dp_sdp *vsc_sdp)
+{
+ u32 header[2];
+ u32 val;
+ int i;
+
+ msm_dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header);
+
+ msm_dp_write_link(panel, MMSS_DP_GENERIC0_0, header[0]);
+ msm_dp_write_link(panel, MMSS_DP_GENERIC0_1, header[1]);
+
+ for (i = 0; i < sizeof(vsc_sdp->db); i += 4) {
+ val = ((vsc_sdp->db[i]) | (vsc_sdp->db[i + 1] << 8) | (vsc_sdp->db[i + 2] << 16) |
+ (vsc_sdp->db[i + 3] << 24));
+ msm_dp_write_link(panel, MMSS_DP_GENERIC0_2 + i, val);
+ }
+}
+
+static void msm_dp_panel_update_sdp(struct msm_dp_panel_private *panel)
+{
+ u32 hw_revision = panel->msm_dp_panel.hw_revision;
+
+ if (hw_revision >= DP_HW_VERSION_1_0 &&
+ hw_revision < DP_HW_VERSION_1_2) {
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG3, UPDATE_SDP);
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG3, 0x0);
+ }
+}
+
+void msm_dp_panel_enable_vsc_sdp(struct msm_dp_panel *msm_dp_panel, struct dp_sdp *vsc_sdp)
+{
+ struct msm_dp_panel_private *panel =
+ container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+ u32 cfg, cfg2, misc;
+
+ cfg = msm_dp_read_link(panel, MMSS_DP_SDP_CFG);
+ cfg2 = msm_dp_read_link(panel, MMSS_DP_SDP_CFG2);
+ misc = msm_dp_read_link(panel, REG_DP_MISC1_MISC0);
+
+ cfg |= GEN0_SDP_EN;
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG, cfg);
+
+ cfg2 |= GENERIC0_SDPSIZE_VALID;
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG2, cfg2);
+
+ msm_dp_panel_send_vsc_sdp(panel, vsc_sdp);
+
+ /* indicates presence of VSC (BIT(6) of MISC1) */
+ misc |= DP_MISC1_VSC_SDP;
+
+ drm_dbg_dp(panel->drm_dev, "vsc sdp enable=1\n");
+
+ pr_debug("misc settings = 0x%x\n", misc);
+ msm_dp_write_link(panel, REG_DP_MISC1_MISC0, misc);
+
+ msm_dp_panel_update_sdp(panel);
+}
+
+void msm_dp_panel_disable_vsc_sdp(struct msm_dp_panel *msm_dp_panel)
+{
+ struct msm_dp_panel_private *panel =
+ container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+ u32 cfg, cfg2, misc;
+
+ cfg = msm_dp_read_link(panel, MMSS_DP_SDP_CFG);
+ cfg2 = msm_dp_read_link(panel, MMSS_DP_SDP_CFG2);
+ misc = msm_dp_read_link(panel, REG_DP_MISC1_MISC0);
+
+ cfg &= ~GEN0_SDP_EN;
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG, cfg);
+
+ cfg2 &= ~GENERIC0_SDPSIZE_VALID;
+ msm_dp_write_link(panel, MMSS_DP_SDP_CFG2, cfg2);
+
+ /* switch back to MSA */
+ misc &= ~DP_MISC1_VSC_SDP;
+
+ drm_dbg_dp(panel->drm_dev, "vsc sdp enable=0\n");
+
+ pr_debug("misc settings = 0x%x\n", misc);
+ msm_dp_write_link(panel, REG_DP_MISC1_MISC0, misc);
+
+ msm_dp_panel_update_sdp(panel);
}
static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel)
{
- struct msm_dp_catalog *catalog;
- struct msm_dp_panel_private *panel;
struct msm_dp_display_mode *msm_dp_mode;
struct drm_dp_vsc_sdp vsc_sdp_data;
struct dp_sdp vsc_sdp;
@@ -294,8 +501,6 @@ static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel)
return -EINVAL;
}
- panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
- catalog = panel->catalog;
msm_dp_mode = &msm_dp_panel->msm_dp_mode;
memset(&vsc_sdp_data, 0, sizeof(vsc_sdp_data));
@@ -322,24 +527,23 @@ static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel)
return len;
}
- msm_dp_catalog_panel_enable_vsc_sdp(catalog, &vsc_sdp);
+ msm_dp_panel_enable_vsc_sdp(msm_dp_panel, &vsc_sdp);
return 0;
}
-int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel)
+int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel, bool wide_bus_en)
{
u32 data, total_ver, total_hor;
- struct msm_dp_catalog *catalog;
struct msm_dp_panel_private *panel;
struct drm_display_mode *drm_mode;
u32 width_blanking;
u32 sync_start;
u32 msm_dp_active;
u32 total;
+ u32 reg;
panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
- catalog = panel->catalog;
drm_mode = &panel->msm_dp_panel.msm_dp_mode.drm_mode;
drm_dbg_dp(panel->drm_dev, "width=%d hporch= %d %d %d\n",
@@ -382,7 +586,20 @@ int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel)
msm_dp_active = data;
- msm_dp_catalog_panel_timing_cfg(catalog, total, sync_start, width_blanking, msm_dp_active);
+ msm_dp_write_link(panel, REG_DP_TOTAL_HOR_VER, total);
+ msm_dp_write_link(panel, REG_DP_START_HOR_VER_FROM_SYNC, sync_start);
+ msm_dp_write_link(panel, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking);
+ msm_dp_write_link(panel, REG_DP_ACTIVE_HOR_VER, msm_dp_active);
+
+ reg = msm_dp_read_p0(panel, MMSS_DP_INTF_CONFIG);
+ if (wide_bus_en)
+ reg |= DP_INTF_CONFIG_DATABUS_WIDEN;
+ else
+ reg &= ~DP_INTF_CONFIG_DATABUS_WIDEN;
+
+ drm_dbg_dp(panel->drm_dev, "wide_bus_en=%d reg=%#x\n", wide_bus_en, reg);
+
+ msm_dp_write_p0(panel, MMSS_DP_INTF_CONFIG, reg);
if (msm_dp_panel->msm_dp_mode.out_fmt_is_yuv_420)
msm_dp_panel_setup_vsc_sdp_yuv_420(msm_dp_panel);
@@ -486,13 +703,15 @@ static int msm_dp_panel_parse_dt(struct msm_dp_panel *msm_dp_panel)
}
struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux,
- struct msm_dp_link *link, struct msm_dp_catalog *catalog)
+ struct msm_dp_link *link,
+ void __iomem *link_base,
+ void __iomem *p0_base)
{
struct msm_dp_panel_private *panel;
struct msm_dp_panel *msm_dp_panel;
int ret;
- if (!dev || !catalog || !aux || !link) {
+ if (!dev || !aux || !link) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
@@ -503,8 +722,9 @@ struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux
panel->dev = dev;
panel->aux = aux;
- panel->catalog = catalog;
panel->link = link;
+ panel->link_base = link_base;
+ panel->p0_base = p0_base;
msm_dp_panel = &panel->msm_dp_panel;
msm_dp_panel->max_bw_code = DP_LINK_BW_8_1;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 4906f4f09f24..d2cf401506dc 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -6,6 +6,7 @@
#ifndef _DP_PANEL_H_
#define _DP_PANEL_H_
+#include <drm/drm_modes.h>
#include <drm/msm_drm.h>
#include "dp_aux.h"
@@ -38,6 +39,7 @@ struct msm_dp_panel {
struct msm_dp_panel_psr psr_cap;
bool video_test;
bool vsc_sdp_supported;
+ u32 hw_revision;
u32 max_dp_lanes;
u32 max_dp_link_rate;
@@ -47,7 +49,7 @@ struct msm_dp_panel {
int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel);
int msm_dp_panel_deinit(struct msm_dp_panel *msm_dp_panel);
-int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel);
+int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel, bool wide_bus_en);
int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector);
u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_max_bpp,
@@ -57,6 +59,11 @@ int msm_dp_panel_get_modes(struct msm_dp_panel *msm_dp_panel,
void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel);
void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable);
+void msm_dp_panel_clear_dsc_dto(struct msm_dp_panel *msm_dp_panel);
+
+void msm_dp_panel_enable_vsc_sdp(struct msm_dp_panel *msm_dp_panel, struct dp_sdp *vsc_sdp);
+void msm_dp_panel_disable_vsc_sdp(struct msm_dp_panel *msm_dp_panel);
+
/**
* is_link_rate_valid() - validates the link rate
* @lane_rate: link rate requested by the sink
@@ -85,6 +92,8 @@ static inline bool is_lane_count_valid(u32 lane_count)
}
struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux,
- struct msm_dp_link *link, struct msm_dp_catalog *catalog);
+ struct msm_dp_link *link,
+ void __iomem *link_base,
+ void __iomem *p0_base);
void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel);
#endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index 3835c7f5cb98..7c44d4e2cf13 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -11,6 +11,8 @@
/* DP_TX Registers */
#define REG_DP_HW_VERSION (0x00000000)
+#define DP_HW_VERSION_1_0 0x10000000
+#define DP_HW_VERSION_1_2 0x10020000
#define REG_DP_SW_RESET (0x00000010)
#define DP_SW_RESET (0x00000001)
@@ -21,8 +23,25 @@
#define REG_DP_CLK_CTRL (0x00000018)
#define REG_DP_CLK_ACTIVE (0x0000001C)
+
#define REG_DP_INTR_STATUS (0x00000020)
+#define DP_INTR_HPD BIT(0)
+#define DP_INTR_AUX_XFER_DONE BIT(3)
+#define DP_INTR_WRONG_ADDR BIT(6)
+#define DP_INTR_TIMEOUT BIT(9)
+#define DP_INTR_NACK_DEFER BIT(12)
+#define DP_INTR_WRONG_DATA_CNT BIT(15)
+#define DP_INTR_I2C_NACK BIT(18)
+#define DP_INTR_I2C_DEFER BIT(21)
+#define DP_INTR_PLL_UNLOCKED BIT(24)
+#define DP_INTR_AUX_ERROR BIT(27)
+
#define REG_DP_INTR_STATUS2 (0x00000024)
+#define DP_INTR_READY_FOR_VIDEO BIT(0)
+#define DP_INTR_IDLE_PATTERN_SENT BIT(3)
+#define DP_INTR_FRAME_END BIT(6)
+#define DP_INTR_CRC_UPDATED BIT(9)
+
#define REG_DP_INTR_STATUS3 (0x00000028)
#define REG_DP_INTR_STATUS4 (0x0000002C)
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 296215877613..d8bb40ef820e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -136,7 +136,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
msm_dsi->next_bridge = ext_bridge;
}
- priv->dsi[msm_dsi->id] = msm_dsi;
+ priv->kms->dsi[msm_dsi->id] = msm_dsi;
return 0;
}
@@ -148,7 +148,7 @@ static void dsi_unbind(struct device *dev, struct device *master,
struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
msm_dsi_tx_buf_free(msm_dsi->host);
- priv->dsi[msm_dsi->id] = NULL;
+ priv->kms->dsi[msm_dsi->id] = NULL;
}
static const struct component_ops dsi_ops = {
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 87496db203d6..93c028a122f3 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -98,6 +98,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi);
int msm_dsi_runtime_suspend(struct device *dev);
int msm_dsi_runtime_resume(struct device *dev);
int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_set_rate_6g_v2_9(struct msm_dsi_host *msm_host);
int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host);
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
@@ -115,6 +116,7 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host);
+int dsi_clk_init_6g_v2_9(struct msm_dsi_host *msm_host);
int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 7675558ae2e5..fed8e9b67011 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -273,6 +273,18 @@ static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
.calc_clk_rate = dsi_calc_clk_rate_6g,
};
+static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_9_host_ops = {
+ .link_clk_set_rate = dsi_link_clk_set_rate_6g_v2_9,
+ .link_clk_enable = dsi_link_clk_enable_6g,
+ .link_clk_disable = dsi_link_clk_disable_6g,
+ .clk_init_ver = dsi_clk_init_6g_v2_9,
+ .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+ .tx_buf_get = dsi_tx_buf_get_6g,
+ .tx_buf_put = dsi_tx_buf_put_6g,
+ .dma_base_get = dsi_dma_base_get_6g,
+ .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
{MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064,
&apq8064_dsi_cfg, &msm_dsi_v2_host_ops},
@@ -318,6 +330,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sm8550_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_8_0,
&sm8650_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_9_0,
+ &sm8650_dsi_cfg, &msm_dsi_6g_v2_9_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 65b0705fac0e..38f303f2ed04 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -31,6 +31,7 @@
#define MSM_DSI_6G_VER_MINOR_V2_6_0 0x20060000
#define MSM_DSI_6G_VER_MINOR_V2_7_0 0x20070000
#define MSM_DSI_6G_VER_MINOR_V2_8_0 0x20080000
+#define MSM_DSI_6G_VER_MINOR_V2_9_0 0x20090000
#define MSM_DSI_V2_VER_MINOR_8064 0x0
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4d75529c0e85..e0de545d4077 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -119,6 +119,15 @@ struct msm_dsi_host {
struct clk *pixel_clk;
struct clk *byte_intf_clk;
+ /*
+ * Clocks which needs to be properly parented between DISPCC and DSI PHY
+ * PLL:
+ */
+ struct clk *byte_src_clk;
+ struct clk *pixel_src_clk;
+ struct clk *dsi_pll_byte_clk;
+ struct clk *dsi_pll_pixel_clk;
+
unsigned long byte_clk_rate;
unsigned long byte_intf_clk_rate;
unsigned long pixel_clk_rate;
@@ -143,7 +152,7 @@ struct msm_dsi_host {
/* DSI 6G TX buffer*/
struct drm_gem_object *tx_gem_obj;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
/* DSI v2 TX buffer */
void *tx_buf;
@@ -269,6 +278,38 @@ int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
return ret;
}
+int dsi_clk_init_6g_v2_9(struct msm_dsi_host *msm_host)
+{
+ struct device *dev = &msm_host->pdev->dev;
+ int ret;
+
+ ret = dsi_clk_init_6g_v2(msm_host);
+ if (ret)
+ return ret;
+
+ msm_host->byte_src_clk = devm_clk_get(dev, "byte_src");
+ if (IS_ERR(msm_host->byte_src_clk))
+ return dev_err_probe(dev, PTR_ERR(msm_host->byte_src_clk),
+ "can't get byte_src clock\n");
+
+ msm_host->dsi_pll_byte_clk = devm_clk_get(dev, "dsi_pll_byte");
+ if (IS_ERR(msm_host->dsi_pll_byte_clk))
+ return dev_err_probe(dev, PTR_ERR(msm_host->dsi_pll_byte_clk),
+ "can't get dsi_pll_byte clock\n");
+
+ msm_host->pixel_src_clk = devm_clk_get(dev, "pixel_src");
+ if (IS_ERR(msm_host->pixel_src_clk))
+ return dev_err_probe(dev, PTR_ERR(msm_host->pixel_src_clk),
+ "can't get pixel_src clock\n");
+
+ msm_host->dsi_pll_pixel_clk = devm_clk_get(dev, "dsi_pll_pixel");
+ if (IS_ERR(msm_host->dsi_pll_pixel_clk))
+ return dev_err_probe(dev, PTR_ERR(msm_host->dsi_pll_pixel_clk),
+ "can't get dsi_pll_pixel clock\n");
+
+ return 0;
+}
+
static int dsi_clk_init(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
@@ -370,6 +411,26 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
return 0;
}
+int dsi_link_clk_set_rate_6g_v2_9(struct msm_dsi_host *msm_host)
+{
+ struct device *dev = &msm_host->pdev->dev;
+ int ret;
+
+ /*
+ * DSI PHY PLLs have to be enabled to allow reparenting to them, so
+ * cannot use assigned-clock-parents.
+ */
+ ret = clk_set_parent(msm_host->byte_src_clk, msm_host->dsi_pll_byte_clk);
+ if (ret)
+ dev_err(dev, "Failed to parent byte_src -> dsi_pll_byte: %d\n", ret);
+
+ ret = clk_set_parent(msm_host->pixel_src_clk, msm_host->dsi_pll_pixel_clk);
+ if (ret)
+ dev_err(dev, "Failed to parent pixel_src -> dsi_pll_pixel: %d\n", ret);
+
+ return dsi_link_clk_set_rate_6g(msm_host);
+}
+
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -1146,10 +1207,10 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
uint64_t iova;
u8 *data;
- msm_host->aspace = msm_gem_address_space_get(priv->kms->aspace);
+ msm_host->vm = drm_gpuvm_get(priv->kms->vm);
data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
- msm_host->aspace,
+ msm_host->vm,
&msm_host->tx_gem_obj, &iova);
if (IS_ERR(data)) {
@@ -1193,10 +1254,10 @@ void msm_dsi_tx_buf_free(struct mipi_dsi_host *host)
return;
if (msm_host->tx_gem_obj) {
- msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->aspace);
- msm_gem_address_space_put(msm_host->aspace);
+ msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->vm);
+ drm_gpuvm_put(msm_host->vm);
msm_host->tx_gem_obj = NULL;
- msm_host->aspace = NULL;
+ msm_host->vm = NULL;
}
if (msm_host->tx_buf)
@@ -1327,7 +1388,7 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
return -EINVAL;
return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
- priv->kms->aspace, dma_base);
+ priv->kms->vm, dma_base);
}
int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 5973d7325699..221f12db5f8b 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -597,6 +597,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_4nm_8550_cfgs },
{ .compatible = "qcom,sm8650-dsi-phy-4nm",
.data = &dsi_phy_4nm_8650_cfgs },
+ { .compatible = "qcom,sm8750-dsi-phy-3nm",
+ .data = &dsi_phy_3nm_8750_cfgs },
#endif
{}
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 7ea608f620fe..c558f8df1684 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -63,6 +63,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8775p_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_sar2130p_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_3nm_8750_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_zero;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index c19890358b74..8c98f91a5930 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -51,6 +51,8 @@
#define DSI_PHY_7NM_QUIRK_V4_3 BIT(3)
/* Hardware is V5.2 */
#define DSI_PHY_7NM_QUIRK_V5_2 BIT(4)
+/* Hardware is V7.0 */
+#define DSI_PHY_7NM_QUIRK_V7_0 BIT(5)
struct dsi_pll_config {
bool enable_ssc;
@@ -129,9 +131,30 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
dec_multiple = div_u64(pll_freq * multiplier, divider);
dec = div_u64_rem(dec_multiple, multiplier, &frac);
- if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1)
+ if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1) {
config->pll_clock_inverters = 0x28;
- else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ } else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
+ if (pll_freq < 163000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq < 175000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq < 325000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq < 350000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq < 650000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq < 700000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq < 1300000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq < 2500000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq < 4000000000ULL)
+ config->pll_clock_inverters = 0x00;
+ else
+ config->pll_clock_inverters = 0x40;
+ } else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
if (pll_freq <= 1300000000ULL)
config->pll_clock_inverters = 0xa0;
else if (pll_freq <= 2500000000ULL)
@@ -250,7 +273,8 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
vco_config_1 = 0x01;
}
- if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
+ (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
if (pll->vco_current_rate < 1557000000ULL)
vco_config_1 = 0x08;
else
@@ -620,6 +644,7 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+ void __iomem *base = phy->base;
u32 data = 0x0; /* internal PLL */
DBG("DSI PLL%d", pll_7nm->phy->id);
@@ -629,6 +654,9 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
break;
case MSM_DSI_PHY_MASTER:
pll_7nm->slave = pll_7nm_list[(pll_7nm->phy->id + 1) % DSI_MAX];
+ /* v7.0: Enable ATB_EN0 and alternate clock output to external phy */
+ if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)
+ writel(0x07, base + REG_DSI_7nm_PHY_CMN_CTRL_5);
break;
case MSM_DSI_PHY_SLAVE:
data = 0x1; /* external PLL */
@@ -907,7 +935,8 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* Request for REFGEN READY */
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) ||
- (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
writel(0x1, phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10);
udelay(500);
}
@@ -941,7 +970,20 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
lane_ctrl0 = 0x1f;
}
- if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
+ if (phy->cphy_mode) {
+ /* TODO: different for second phy */
+ vreg_ctrl_0 = 0x57;
+ vreg_ctrl_1 = 0x41;
+ glbl_rescode_top_ctrl = 0x3d;
+ glbl_rescode_bot_ctrl = 0x38;
+ } else {
+ vreg_ctrl_0 = 0x56;
+ vreg_ctrl_1 = 0x19;
+ glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3c : 0x03;
+ glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x3c;
+ }
+ } else if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
if (phy->cphy_mode) {
vreg_ctrl_0 = 0x45;
vreg_ctrl_1 = 0x41;
@@ -1003,6 +1045,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* program CMN_CTRL_4 for minor_ver 2 chipsets*/
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0) ||
(readl(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0) & (0xf0)) == 0x20)
writel(0x04, base + REG_DSI_7nm_PHY_CMN_CTRL_4);
@@ -1117,7 +1160,8 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
/* Turn off REFGEN Vote */
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) ||
- (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
writel(0x0, base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10);
wmb();
/* Delay to ensure HW removes vote before PHY shut down */
@@ -1384,3 +1428,26 @@ const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs = {
.num_dsi_phy = 2,
.quirks = DSI_PHY_7NM_QUIRK_V5_2,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_3nm_8750_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_98000uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_98000uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae95000, 0xae97000 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V7_0,
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 2fd388b892dc..5afac09c0d33 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -15,6 +15,7 @@
#include <drm/drm_of.h>
#include <drm/display/drm_hdmi_state_helper.h>
+#include "msm_kms.h"
#include "hdmi.h"
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -244,7 +245,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
err = msm_hdmi_init(hdmi);
if (err)
return err;
- priv->hdmi = hdmi;
+ priv->kms->hdmi = hdmi;
return 0;
}
@@ -254,9 +255,9 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
{
struct msm_drm_private *priv = dev_get_drvdata(master);
- if (priv->hdmi) {
- msm_hdmi_destroy(priv->hdmi);
- priv->hdmi = NULL;
+ if (priv->kms->hdmi) {
+ msm_hdmi_destroy(priv->kms->hdmi);
+ priv->kms->hdmi = NULL;
}
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index d5e572d10d6a..02cfd46df594 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -200,12 +200,12 @@ struct hdmi_codec_daifmt;
struct hdmi_codec_params;
int msm_hdmi_audio_update(struct hdmi *hdmi);
-int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+int msm_hdmi_bridge_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params);
-void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector,
- struct drm_bridge *bridge);
+void msm_hdmi_bridge_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/*
* hdmi bridge:
@@ -215,7 +215,7 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi);
void msm_hdmi_hpd_irq(struct drm_bridge *bridge);
enum drm_connector_status msm_hdmi_bridge_detect(
- struct drm_bridge *bridge);
+ struct drm_bridge *bridge, struct drm_connector *connector);
void msm_hdmi_hpd_enable(struct drm_bridge *bridge);
void msm_hdmi_hpd_disable(struct drm_bridge *bridge);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index b9ec14ef2c20..d9a8dc9dae8f 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -122,8 +122,8 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
return 0;
}
-int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
- struct drm_bridge *bridge,
+int msm_hdmi_bridge_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
@@ -163,8 +163,8 @@ int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
return msm_hdmi_audio_update(hdmi);
}
-void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector,
- struct drm_bridge *bridge)
+void msm_hdmi_bridge_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 53a7ce8cc7bc..46fd58646d32 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -475,7 +475,7 @@ msm_hdmi_hotplug_work(struct work_struct *work)
container_of(work, struct hdmi_bridge, hpd_work);
struct drm_bridge *bridge = &hdmi_bridge->base;
- drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge));
+ drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge, hdmi_bridge->hdmi->connector));
}
/* initialize bridge */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
index 407e6c449ee0..114b0d507700 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
@@ -177,8 +177,8 @@ static enum drm_connector_status detect_gpio(struct hdmi *hdmi)
connector_status_disconnected;
}
-enum drm_connector_status msm_hdmi_bridge_detect(
- struct drm_bridge *bridge)
+enum drm_connector_status
+msm_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 6af72162cda4..bbda865addae 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -117,6 +117,36 @@ static const struct file_operations msm_gpu_fops = {
.release = msm_gpu_release,
};
+#ifdef CONFIG_DRM_MSM_KMS
+static int msm_fb_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_framebuffer *fb, *fbdev_fb = NULL;
+
+ if (dev->fb_helper && dev->fb_helper->fb) {
+ seq_puts(m, "fbcon ");
+ fbdev_fb = dev->fb_helper->fb;
+ msm_framebuffer_describe(fbdev_fb, m);
+ }
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ if (fb == fbdev_fb)
+ continue;
+
+ seq_puts(m, "user ");
+ msm_framebuffer_describe(fb, m);
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static struct drm_info_list msm_kms_debugfs_list[] = {
+ { "fb", msm_fb_show },
+};
+
/*
* Display Snapshot:
*/
@@ -180,6 +210,27 @@ static const struct file_operations msm_kms_fops = {
.release = msm_kms_release,
};
+static void msm_debugfs_kms_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ drm_debugfs_create_files(msm_kms_debugfs_list,
+ ARRAY_SIZE(msm_kms_debugfs_list),
+ minor->debugfs_root, minor);
+ debugfs_create_file("kms", 0400, minor->debugfs_root,
+ dev, &msm_kms_fops);
+
+ if (priv->kms->funcs->debugfs_init)
+ priv->kms->funcs->debugfs_init(priv->kms, minor);
+
+}
+#else /* ! CONFIG_DRM_MSM_KMS */
+static void msm_debugfs_kms_init(struct drm_minor *minor)
+{
+}
+#endif
+
/*
* Other debugfs:
*/
@@ -267,47 +318,23 @@ static int msm_mm_show(struct seq_file *m, void *arg)
return 0;
}
-static int msm_fb_show(struct seq_file *m, void *arg)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_framebuffer *fb, *fbdev_fb = NULL;
-
- if (dev->fb_helper && dev->fb_helper->fb) {
- seq_printf(m, "fbcon ");
- fbdev_fb = dev->fb_helper->fb;
- msm_framebuffer_describe(fbdev_fb, m);
- }
-
- mutex_lock(&dev->mode_config.fb_lock);
- list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
- if (fb == fbdev_fb)
- continue;
-
- seq_printf(m, "user ");
- msm_framebuffer_describe(fb, m);
- }
- mutex_unlock(&dev->mode_config.fb_lock);
-
- return 0;
-}
-
static struct drm_info_list msm_debugfs_list[] = {
{"gem", msm_gem_show},
{ "mm", msm_mm_show },
};
-static struct drm_info_list msm_kms_debugfs_list[] = {
- { "fb", msm_fb_show },
-};
-
static int late_init_minor(struct drm_minor *minor)
{
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
int ret;
if (!minor)
return 0;
+ if (!priv->gpu_pdev)
+ return 0;
+
ret = msm_rd_debugfs_init(minor);
if (ret) {
DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
@@ -375,20 +402,12 @@ void msm_debugfs_init(struct drm_minor *minor)
if (priv->gpu_pdev)
msm_debugfs_gpu_init(minor);
- if (priv->kms) {
- drm_debugfs_create_files(msm_kms_debugfs_list,
- ARRAY_SIZE(msm_kms_debugfs_list),
- minor->debugfs_root, minor);
- debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
- dev, &msm_kms_fops);
- }
+ if (priv->kms)
+ msm_debugfs_kms_init(minor);
debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
dev, &shrink_fops);
- if (priv->kms && priv->kms->funcs->debugfs_init)
- priv->kms->funcs->debugfs_init(priv->kms, minor);
-
fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root,
&fail_gem_alloc);
fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d007687c2446..9dcc7a596a11 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -11,7 +11,6 @@
#include <linux/of_address.h>
#include <linux/uaccess.h>
-#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -41,17 +40,12 @@
* - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
* - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
* - 1.12.0 - Add MSM_INFO_SET_METADATA and MSM_INFO_GET_METADATA
+ * - 1.13.0 - Add VM_BIND
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 12
+#define MSM_VERSION_MINOR 13
#define MSM_VERSION_PATCHLEVEL 0
-static void msm_deinit_vram(struct drm_device *ddev);
-
-static char *vram = "16m";
-MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
-module_param(vram, charp, 0);
-
bool dumpstate;
MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
module_param(dumpstate, bool, 0600);
@@ -60,10 +54,19 @@ static bool modeset = true;
MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
module_param(modeset, bool, 0600);
+static bool separate_gpu_kms;
+MODULE_PARM_DESC(separate_gpu_drm, "Use separate DRM device for the GPU (0=single DRM device for both GPU and display (default), 1=two DRM devices)");
+module_param(separate_gpu_kms, bool, 0400);
+
DECLARE_FAULT_ATTR(fail_gem_alloc);
DECLARE_FAULT_ATTR(fail_gem_iova);
-static int msm_drm_uninit(struct device *dev)
+bool msm_gpu_no_components(void)
+{
+ return separate_gpu_kms;
+}
+
+static int msm_drm_uninit(struct device *dev, const struct component_ops *gpu_ops)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_drm_private *priv = platform_get_drvdata(pdev);
@@ -79,16 +82,9 @@ static int msm_drm_uninit(struct device *dev)
if (ddev->registered) {
drm_dev_unregister(ddev);
if (priv->kms)
- drm_atomic_helper_shutdown(ddev);
+ msm_drm_kms_unregister(dev);
}
- /* We must cancel and cleanup any pending vblank enable/disable
- * work before msm_irq_uninstall() to avoid work re-enabling an
- * irq after uninstall has disabled it.
- */
-
- flush_workqueue(priv->wq);
-
msm_gem_shrinker_cleanup(ddev);
msm_perf_debugfs_cleanup(priv);
@@ -97,120 +93,19 @@ static int msm_drm_uninit(struct device *dev)
if (priv->kms)
msm_drm_kms_uninit(dev);
- msm_deinit_vram(ddev);
-
- component_unbind_all(dev, ddev);
+ if (gpu_ops)
+ gpu_ops->unbind(dev, dev, NULL);
+ else
+ component_unbind_all(dev, ddev);
ddev->dev_private = NULL;
drm_dev_put(ddev);
- destroy_workqueue(priv->wq);
-
return 0;
}
-bool msm_use_mmu(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
-
- /*
- * a2xx comes with its own MMU
- * On other platforms IOMMU can be declared specified either for the
- * MDP/DPU device or for its parent, MDSS device.
- */
- return priv->is_a2xx ||
- device_iommu_mapped(dev->dev) ||
- device_iommu_mapped(dev->dev->parent);
-}
-
-static int msm_init_vram(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct device_node *node;
- unsigned long size = 0;
- int ret = 0;
-
- /* In the device-tree world, we could have a 'memory-region'
- * phandle, which gives us a link to our "vram". Allocating
- * is all nicely abstracted behind the dma api, but we need
- * to know the entire size to allocate it all in one go. There
- * are two cases:
- * 1) device with no IOMMU, in which case we need exclusive
- * access to a VRAM carveout big enough for all gpu
- * buffers
- * 2) device with IOMMU, but where the bootloader puts up
- * a splash screen. In this case, the VRAM carveout
- * need only be large enough for fbdev fb. But we need
- * exclusive access to the buffer to avoid the kernel
- * using those pages for other purposes (which appears
- * as corruption on screen before we have a chance to
- * load and do initial modeset)
- */
-
- node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
- if (node) {
- struct resource r;
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
- if (ret)
- return ret;
- size = r.end - r.start + 1;
- DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
-
- /* if we have no IOMMU, then we need to use carveout allocator.
- * Grab the entire DMA chunk carved out in early startup in
- * mach-msm:
- */
- } else if (!msm_use_mmu(dev)) {
- DRM_INFO("using %s VRAM carveout\n", vram);
- size = memparse(vram, NULL);
- }
-
- if (size) {
- unsigned long attrs = 0;
- void *p;
-
- priv->vram.size = size;
-
- drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
- spin_lock_init(&priv->vram.lock);
-
- attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
- attrs |= DMA_ATTR_WRITE_COMBINE;
-
- /* note that for no-kernel-mapping, the vaddr returned
- * is bogus, but non-null if allocation succeeded:
- */
- p = dma_alloc_attrs(dev->dev, size,
- &priv->vram.paddr, GFP_KERNEL, attrs);
- if (!p) {
- DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
- priv->vram.paddr = 0;
- return -ENOMEM;
- }
-
- DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
- (uint32_t)priv->vram.paddr,
- (uint32_t)(priv->vram.paddr + size));
- }
-
- return ret;
-}
-
-static void msm_deinit_vram(struct drm_device *ddev)
-{
- struct msm_drm_private *priv = ddev->dev_private;
- unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
-
- if (!priv->vram.paddr)
- return;
-
- drm_mm_takedown(&priv->vram.mm);
- dma_free_attrs(ddev->dev, priv->vram.size, NULL, priv->vram.paddr,
- attrs);
-}
-
-static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+static int msm_drm_init(struct device *dev, const struct drm_driver *drv,
+ const struct component_ops *gpu_ops)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev;
@@ -227,12 +122,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
ddev->dev_private = priv;
priv->dev = ddev;
- priv->wq = alloc_ordered_workqueue("msm", 0);
- if (!priv->wq) {
- ret = -ENOMEM;
- goto err_put_dev;
- }
-
INIT_LIST_HEAD(&priv->objects);
mutex_init(&priv->obj_lock);
@@ -257,19 +146,18 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (priv->kms_init) {
ret = drmm_mode_config_init(ddev);
if (ret)
- goto err_destroy_wq;
+ goto err_put_dev;
}
- ret = msm_init_vram(ddev);
- if (ret)
- goto err_destroy_wq;
-
dma_set_max_seg_size(dev, UINT_MAX);
/* Bind all our sub-components: */
- ret = component_bind_all(dev, ddev);
+ if (gpu_ops)
+ ret = gpu_ops->bind(dev, dev, NULL);
+ else
+ ret = component_bind_all(dev, ddev);
if (ret)
- goto err_deinit_vram;
+ goto err_put_dev;
ret = msm_gem_shrinker_init(ddev);
if (ret)
@@ -279,11 +167,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
ret = msm_drm_kms_init(dev, drv);
if (ret)
goto err_msm_uninit;
- } else {
- /* valid only for the dummy headless case, where of_node=NULL */
- WARN_ON(dev->of_node);
- ddev->driver_features &= ~DRIVER_MODESET;
- ddev->driver_features &= ~DRIVER_ATOMIC;
}
ret = drm_dev_register(ddev, 0);
@@ -294,22 +177,16 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
- if (priv->kms_init) {
- drm_kms_helper_poll_init(ddev);
- drm_client_setup(ddev, NULL);
- }
+ if (priv->kms_init)
+ msm_drm_kms_post_init(dev);
return 0;
err_msm_uninit:
- msm_drm_uninit(dev);
+ msm_drm_uninit(dev, gpu_ops);
return ret;
-err_deinit_vram:
- msm_deinit_vram(ddev);
-err_destroy_wq:
- destroy_workqueue(priv->wq);
err_put_dev:
drm_dev_put(ddev);
@@ -333,11 +210,42 @@ static void load_gpu(struct drm_device *dev)
mutex_unlock(&init_lock);
}
+/**
+ * msm_context_vm - lazily create the context's VM
+ *
+ * @dev: the drm device
+ * @ctx: the context
+ *
+ * The VM is lazily created, so that userspace has a chance to opt-in to having
+ * a userspace managed VM before the VM is created.
+ *
+ * Note that this does not return a reference to the VM. Once the VM is created,
+ * it exists for the lifetime of the context.
+ */
+struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx)
+{
+ static DEFINE_MUTEX(init_lock);
+ struct msm_drm_private *priv = dev->dev_private;
+
+ /* Once ctx->vm is created it is valid for the lifetime of the context: */
+ if (ctx->vm)
+ return ctx->vm;
+
+ mutex_lock(&init_lock);
+ if (!ctx->vm) {
+ ctx->vm = msm_gpu_create_private_vm(
+ priv->gpu, current, !ctx->userspace_managed_vm);
+
+ }
+ mutex_unlock(&init_lock);
+
+ return ctx->vm;
+}
+
static int context_init(struct drm_device *dev, struct drm_file *file)
{
static atomic_t ident = ATOMIC_INIT(0);
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_file_private *ctx;
+ struct msm_context *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -349,7 +257,6 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
kref_init(&ctx->ref);
msm_submitqueue_init(dev, ctx);
- ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
file->driver_priv = ctx;
ctx->seqno = atomic_inc_return(&ident);
@@ -367,23 +274,24 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
return context_init(dev, file);
}
-static void context_close(struct msm_file_private *ctx)
+static void context_close(struct msm_context *ctx)
{
+ ctx->closed = true;
msm_submitqueue_close(ctx);
- msm_file_private_put(ctx);
+ msm_context_put(ctx);
}
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_file_private *ctx = file->driver_priv;
+ struct msm_context *ctx = file->driver_priv;
/*
* It is not possible to set sysprof param to non-zero if gpu
* is not initialized:
*/
if (priv->gpu)
- msm_file_private_set_sysprof(ctx, priv->gpu, 0);
+ msm_context_set_sysprof(ctx, priv->gpu, 0);
context_close(ctx);
}
@@ -515,11 +423,14 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
uint64_t *iova)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_file_private *ctx = file->driver_priv;
+ struct msm_context *ctx = file->driver_priv;
if (!priv->gpu)
return -EINVAL;
+ if (msm_context_is_vmbind(ctx))
+ return UERR(EINVAL, dev, "VM_BIND is enabled");
+
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;
@@ -527,7 +438,7 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
* Don't pin the memory here - just get an address so that userspace can
* be productive
*/
- return msm_gem_get_iova(obj, ctx->aspace, iova);
+ return msm_gem_get_iova(obj, msm_context_vm(dev, ctx), iova);
}
static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
@@ -535,19 +446,23 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
uint64_t iova)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_file_private *ctx = file->driver_priv;
+ struct msm_context *ctx = file->driver_priv;
+ struct drm_gpuvm *vm = msm_context_vm(dev, ctx);
if (!priv->gpu)
return -EINVAL;
+ if (msm_context_is_vmbind(ctx))
+ return UERR(EINVAL, dev, "VM_BIND is enabled");
+
/* Only supported if per-process address space is supported: */
- if (priv->gpu->aspace == ctx->aspace)
+ if (priv->gpu->vm == vm)
return UERR(EOPNOTSUPP, dev, "requires per-process pgtables");
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;
- return msm_gem_set_iova(obj, ctx->aspace, iova);
+ return msm_gem_set_iova(obj, vm, iova);
}
static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
@@ -555,6 +470,7 @@ static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
u32 metadata_size)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ void *new_metadata;
void *buf;
int ret;
@@ -572,8 +488,14 @@ static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
if (ret)
goto out;
- msm_obj->metadata =
+ new_metadata =
krealloc(msm_obj->metadata, metadata_size, GFP_KERNEL);
+ if (!new_metadata) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ msm_obj->metadata = new_metadata;
msm_obj->metadata_size = metadata_size;
memcpy(msm_obj->metadata, buf, metadata_size);
@@ -872,6 +794,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_VM_BIND, msm_ioctl_vm_bind, DRM_RENDER_ALLOW),
};
static void msm_show_fdinfo(struct drm_printer *p, struct drm_file *file)
@@ -893,13 +816,45 @@ static const struct file_operations fops = {
.show_fdinfo = drm_show_fdinfo,
};
+#define DRIVER_FEATURES_GPU ( \
+ DRIVER_GEM | \
+ DRIVER_GEM_GPUVA | \
+ DRIVER_RENDER | \
+ DRIVER_SYNCOBJ | \
+ DRIVER_SYNCOBJ_TIMELINE | \
+ 0 )
+
+#define DRIVER_FEATURES_KMS ( \
+ DRIVER_GEM | \
+ DRIVER_ATOMIC | \
+ DRIVER_MODESET | \
+ 0 )
+
static const struct drm_driver msm_driver = {
- .driver_features = DRIVER_GEM |
- DRIVER_RENDER |
- DRIVER_ATOMIC |
- DRIVER_MODESET |
- DRIVER_SYNCOBJ_TIMELINE |
- DRIVER_SYNCOBJ,
+ .driver_features = DRIVER_FEATURES_GPU | DRIVER_FEATURES_KMS,
+ .open = msm_open,
+ .postclose = msm_postclose,
+ .dumb_create = msm_gem_dumb_create,
+ .dumb_map_offset = msm_gem_dumb_map_offset,
+ .gem_prime_import = msm_gem_prime_import,
+ .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = msm_debugfs_init,
+#endif
+ MSM_FBDEV_DRIVER_OPS,
+ .show_fdinfo = msm_show_fdinfo,
+ .ioctls = msm_ioctls,
+ .num_ioctls = ARRAY_SIZE(msm_ioctls),
+ .fops = &fops,
+ .name = "msm",
+ .desc = "MSM Snapdragon DRM",
+ .major = MSM_VERSION_MAJOR,
+ .minor = MSM_VERSION_MINOR,
+ .patchlevel = MSM_VERSION_PATCHLEVEL,
+};
+
+static const struct drm_driver msm_kms_driver = {
+ .driver_features = DRIVER_FEATURES_KMS,
.open = msm_open,
.postclose = msm_postclose,
.dumb_create = msm_gem_dumb_create,
@@ -910,6 +865,23 @@ static const struct drm_driver msm_driver = {
#endif
MSM_FBDEV_DRIVER_OPS,
.show_fdinfo = msm_show_fdinfo,
+ .fops = &fops,
+ .name = "msm-kms",
+ .desc = "MSM Snapdragon DRM",
+ .major = MSM_VERSION_MAJOR,
+ .minor = MSM_VERSION_MINOR,
+ .patchlevel = MSM_VERSION_PATCHLEVEL,
+};
+
+static const struct drm_driver msm_gpu_driver = {
+ .driver_features = DRIVER_FEATURES_GPU,
+ .open = msm_open,
+ .postclose = msm_postclose,
+ .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = msm_debugfs_init,
+#endif
+ .show_fdinfo = msm_show_fdinfo,
.ioctls = msm_ioctls,
.num_ioctls = ARRAY_SIZE(msm_ioctls),
.fops = &fops,
@@ -1044,12 +1016,16 @@ static int add_gpu_components(struct device *dev,
static int msm_drm_bind(struct device *dev)
{
- return msm_drm_init(dev, &msm_driver);
+ return msm_drm_init(dev,
+ msm_gpu_no_components() ?
+ &msm_kms_driver :
+ &msm_driver,
+ NULL);
}
static void msm_drm_unbind(struct device *dev)
{
- msm_drm_uninit(dev);
+ msm_drm_uninit(dev, NULL);
}
const struct component_master_ops msm_drm_ops = {
@@ -1080,9 +1056,11 @@ int msm_drv_probe(struct device *master_dev,
return ret;
}
- ret = add_gpu_components(master_dev, &match);
- if (ret)
- return ret;
+ if (!msm_gpu_no_components()) {
+ ret = add_gpu_components(master_dev, &match);
+ if (ret)
+ return ret;
+ }
/* on all devices that I am aware of, iommu's which can map
* any address the cpu can see are used:
@@ -1098,29 +1076,34 @@ int msm_drv_probe(struct device *master_dev,
return 0;
}
-/*
- * Platform driver:
- * Used only for headlesss GPU instances
- */
-
-static int msm_pdev_probe(struct platform_device *pdev)
+int msm_gpu_probe(struct platform_device *pdev,
+ const struct component_ops *ops)
{
- return msm_drv_probe(&pdev->dev, NULL, NULL);
+ struct msm_drm_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ /* on all devices that I am aware of, iommu's which can map
+ * any address the cpu can see are used:
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
+ if (ret)
+ return ret;
+
+ return msm_drm_init(&pdev->dev, &msm_gpu_driver, ops);
}
-static void msm_pdev_remove(struct platform_device *pdev)
+void msm_gpu_remove(struct platform_device *pdev,
+ const struct component_ops *ops)
{
- component_master_del(&pdev->dev, &msm_drm_ops);
+ msm_drm_uninit(&pdev->dev, ops);
}
-static struct platform_driver msm_platform_driver = {
- .probe = msm_pdev_probe,
- .remove = msm_pdev_remove,
- .driver = {
- .name = "msm",
- },
-};
-
static int __init msm_drm_register(void)
{
if (!modeset)
@@ -1135,13 +1118,13 @@ static int __init msm_drm_register(void)
adreno_register();
msm_mdp4_register();
msm_mdss_register();
- return platform_driver_register(&msm_platform_driver);
+
+ return 0;
}
static void __exit msm_drm_unregister(void)
{
DBG("fini");
- platform_driver_unregister(&msm_platform_driver);
msm_mdss_unregister();
msm_mdp4_unregister();
msm_dp_unregister();
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index c8afb1ea6040..985db9febd98 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -48,8 +48,6 @@ struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
struct msm_fence_context;
-struct msm_gem_address_space;
-struct msm_gem_vma;
struct msm_disp_state;
#define MAX_CRTCS 8
@@ -72,12 +70,6 @@ enum msm_dsi_controller {
#define MSM_GPU_MAX_RINGS 4
-/* Commit/Event thread specific structure */
-struct msm_drm_thread {
- struct drm_device *dev;
- struct kthread_worker *worker;
-};
-
struct msm_drm_private {
struct drm_device *dev;
@@ -88,16 +80,6 @@ struct msm_drm_private {
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
- /* possibly this should be in the kms component, but it is
- * shared by both mdp4 and mdp5..
- */
- struct hdmi *hdmi;
-
- /* DSI is shared by mdp4 and mdp5 */
- struct msm_dsi *dsi[MSM_DSI_CONTROLLER_COUNT];
-
- struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT];
-
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
@@ -177,23 +159,6 @@ struct msm_drm_private {
struct mutex lock;
} lru;
- struct workqueue_struct *wq;
-
- unsigned int num_crtcs;
-
- struct msm_drm_thread event_thread[MAX_CRTCS];
-
- /* VRAM carveout, used when no IOMMU: */
- struct {
- unsigned long size;
- dma_addr_t paddr;
- /* NOTE: mm managed at the page level, size is in # of pages
- * and position mm_node->start is in # of pages:
- */
- struct drm_mm mm;
- spinlock_t lock; /* Protects drm_mm node allocation/removal */
- } vram;
-
struct notifier_block vmap_notifier;
struct shrinker *shrinker;
@@ -264,11 +229,13 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev);
+struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev);
bool msm_use_mmu(struct drm_device *dev);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
- struct drm_file *file);
+ struct drm_file *file);
+int msm_ioctl_vm_bind(struct drm_device *dev, void *data,
+ struct drm_file *file);
#ifdef CONFIG_DEBUG_FS
unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan);
@@ -280,25 +247,25 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
+struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev, struct dma_buf *buf);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
+struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags);
int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj);
-int msm_framebuffer_prepare(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace, bool needs_dirtyfb);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace, bool needed_dirtyfb);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace, int plane);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb, bool needs_dirtyfb);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb, bool needed_dirtyfb);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
- struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
+ struct drm_file *file, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
int w, int h, int p, uint32_t format);
-#ifdef CONFIG_DRM_FBDEV_EMULATION
+#ifdef CONFIG_DRM_MSM_KMS_FBDEV
int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
#define MSM_FBDEV_DRIVER_OPS \
@@ -383,6 +350,7 @@ static inline const char *msm_dsi_get_te_source(struct msm_dsi *msm_dsi)
}
#endif
+struct msm_dp;
#ifdef CONFIG_DRM_MSM_DP
int __init msm_dp_register(void);
void __exit msm_dp_unregister(void);
@@ -576,6 +544,10 @@ extern const struct component_master_ops msm_drm_ops;
int msm_kms_pm_prepare(struct device *dev);
void msm_kms_pm_complete(struct device *dev);
+int msm_gpu_probe(struct platform_device *pdev,
+ const struct component_ops *ops);
+void msm_gpu_remove(struct platform_device *pdev,
+ const struct component_ops *ops);
int msm_drv_probe(struct device *dev,
int (*kms_init)(struct drm_device *dev),
struct msm_kms *kms);
@@ -583,4 +555,6 @@ void msm_kms_shutdown(struct platform_device *pdev);
bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver);
+bool msm_gpu_no_components(void);
+
#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 09268e416843..1eff615ff9bf 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -30,6 +30,7 @@ struct msm_framebuffer {
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
static int msm_framebuffer_dirtyfb(struct drm_framebuffer *fb,
@@ -75,20 +76,22 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
/* prepare/pin all the fb's bo's for scanout.
*/
-int msm_framebuffer_prepare(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace,
- bool needs_dirtyfb)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb, bool needs_dirtyfb)
{
+ struct msm_drm_private *priv = fb->dev->dev_private;
+ struct drm_gpuvm *vm = priv->kms->vm;
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = fb->format->num_planes;
if (needs_dirtyfb)
refcount_inc(&msm_fb->dirtyfb);
- atomic_inc(&msm_fb->prepare_count);
+ if (atomic_inc_return(&msm_fb->prepare_count) > 1)
+ return 0;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
+ msm_gem_vma_get(fb->obj[i]);
+ ret = msm_gem_get_and_pin_iova(fb->obj[i], vm, &msm_fb->iova[i]);
drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)\n",
fb->base.id, i, msm_fb->iova[i], ret);
if (ret)
@@ -98,25 +101,28 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
return 0;
}
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace,
- bool needed_dirtyfb)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb, bool needed_dirtyfb)
{
+ struct msm_drm_private *priv = fb->dev->dev_private;
+ struct drm_gpuvm *vm = priv->kms->vm;
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
if (needed_dirtyfb)
refcount_dec(&msm_fb->dirtyfb);
- for (i = 0; i < n; i++)
- msm_gem_unpin_iova(fb->obj[i], aspace);
+ if (atomic_dec_return(&msm_fb->prepare_count))
+ return;
+
+ memset(msm_fb->iova, 0, sizeof(msm_fb->iova));
- if (!atomic_dec_return(&msm_fb->prepare_count))
- memset(msm_fb->iova, 0, sizeof(msm_fb->iova));
+ for (i = 0; i < n; i++) {
+ msm_gem_unpin_iova(fb->obj[i], vm);
+ msm_gem_vma_put(fb->obj[i]);
+ }
}
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
- struct msm_gem_address_space *aspace, int plane)
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
return msm_fb->iova[plane] + fb->offsets[plane];
@@ -134,10 +140,9 @@ const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
}
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
- struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
+ struct drm_file *file, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info = drm_get_format_info(dev,
- mode_cmd);
struct drm_gem_object *bos[4] = {0};
struct drm_framebuffer *fb;
int ret, i, n = info->num_planes;
@@ -150,7 +155,7 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
}
}
- fb = msm_framebuffer_init(dev, mode_cmd, bos);
+ fb = msm_framebuffer_init(dev, info, mode_cmd, bos);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto out_unref;
@@ -165,10 +170,9 @@ out_unref:
}
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
- const struct drm_format_info *info = drm_get_format_info(dev,
- mode_cmd);
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_framebuffer *msm_fb = NULL;
@@ -222,7 +226,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
msm_fb->base.obj[i] = bos[i];
}
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
if (ret) {
@@ -271,7 +275,10 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
msm_gem_object_set_name(bo, "stolenfb");
- fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
+ fb = msm_framebuffer_init(dev,
+ drm_get_format_info(dev, mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd, &bo);
if (IS_ERR(fb)) {
DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c62249b1ab3d..b5969374d53f 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -122,7 +122,7 @@ int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
- ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
+ ret = msm_gem_get_and_pin_iova(bo, priv->kms->vm, &paddr);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 2995e80fec3b..33d3354c6102 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -17,24 +17,9 @@
#include <trace/events/gpu_mem.h>
#include "msm_drv.h"
-#include "msm_fence.h"
#include "msm_gem.h"
#include "msm_gpu.h"
-#include "msm_mmu.h"
-
-static dma_addr_t physaddr(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
- return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
- priv->vram.paddr;
-}
-
-static bool use_pages(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- return !msm_obj->vram_node;
-}
+#include "msm_kms.h"
static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
{
@@ -44,7 +29,7 @@ static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
static void update_ctx_mem(struct drm_file *file, ssize_t size)
{
- struct msm_file_private *ctx = file->driver_priv;
+ struct msm_context *ctx = file->driver_priv;
uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
rcu_read_lock(); /* Locks file->pid! */
@@ -55,13 +40,73 @@ static void update_ctx_mem(struct drm_file *file, ssize_t size)
static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
{
+ msm_gem_vma_get(obj);
update_ctx_mem(file, obj->size);
return 0;
}
+static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ bool close, const char *reason);
+
static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
{
+ struct msm_context *ctx = file->driver_priv;
+ struct drm_exec exec;
+
update_ctx_mem(file, -obj->size);
+ msm_gem_vma_put(obj);
+
+ /*
+ * If VM isn't created yet, nothing to cleanup. And in fact calling
+ * put_iova_spaces() with vm=NULL would be bad, in that it will tear-
+ * down the mappings of shared buffers in other contexts.
+ */
+ if (!ctx->vm)
+ return;
+
+ /*
+ * VM_BIND does not depend on implicit teardown of VMAs on handle
+ * close, but instead on implicit teardown of the VM when the device
+ * is closed (see msm_gem_vm_close())
+ */
+ if (msm_context_is_vmbind(ctx))
+ return;
+
+ /*
+ * TODO we might need to kick this to a queue to avoid blocking
+ * in CLOSE ioctl
+ */
+ dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_BOOKKEEP, false,
+ MAX_SCHEDULE_TIMEOUT);
+
+ msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm);
+ put_iova_spaces(obj, ctx->vm, true, "close");
+ drm_exec_fini(&exec); /* drop locks */
+}
+
+/*
+ * Get/put for kms->vm VMA
+ */
+
+void msm_gem_vma_get(struct drm_gem_object *obj)
+{
+ atomic_inc(&to_msm_bo(obj)->vma_ref);
+}
+
+void msm_gem_vma_put(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct drm_exec exec;
+
+ if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
+ return;
+
+ if (!priv->kms)
+ return;
+
+ msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm);
+ put_iova_spaces(obj, priv->kms->vm, true, "vma_put");
+ drm_exec_fini(&exec); /* drop locks */
}
/*
@@ -135,36 +180,6 @@ static void update_lru(struct drm_gem_object *obj)
mutex_unlock(&priv->lru.lock);
}
-/* allocate pages from VRAM carveout, used when no IOMMU: */
-static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
- dma_addr_t paddr;
- struct page **p;
- int ret, i;
-
- p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- spin_lock(&priv->vram.lock);
- ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
- spin_unlock(&priv->vram.lock);
- if (ret) {
- kvfree(p);
- return ERR_PTR(ret);
- }
-
- paddr = physaddr(obj);
- for (i = 0; i < npages; i++) {
- p[i] = pfn_to_page(__phys_to_pfn(paddr));
- paddr += PAGE_SIZE;
- }
-
- return p;
-}
-
static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -176,10 +191,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
struct page **p;
int npages = obj->size >> PAGE_SHIFT;
- if (use_pages(obj))
- p = drm_gem_get_pages(obj);
- else
- p = get_pages_vram(obj, npages);
+ p = drm_gem_get_pages(obj);
if (IS_ERR(p)) {
DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
@@ -212,22 +224,17 @@ static struct page **get_pages(struct drm_gem_object *obj)
return msm_obj->pages;
}
-static void put_pages_vram(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
-
- spin_lock(&priv->vram.lock);
- drm_mm_remove_node(msm_obj->vram_node);
- spin_unlock(&priv->vram.lock);
-
- kvfree(msm_obj->pages);
-}
-
static void put_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ /*
+ * Skip gpuvm in the object free path to avoid a WARN_ON() splat.
+ * See explaination in msm_gem_assert_locked()
+ */
+ if (kref_read(&obj->refcount))
+ drm_gpuvm_bo_gem_evict(obj, true);
+
if (msm_obj->pages) {
if (msm_obj->sgt) {
/* For non-cached buffers, ensure the new
@@ -244,18 +251,14 @@ static void put_pages(struct drm_gem_object *obj)
update_device_mem(obj->dev->dev_private, -obj->size);
- if (use_pages(obj))
- drm_gem_put_pages(obj, msm_obj->pages, true, false);
- else
- put_pages_vram(obj);
+ drm_gem_put_pages(obj, msm_obj->pages, true, false);
msm_obj->pages = NULL;
update_lru(obj);
}
}
-static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj,
- unsigned madv)
+struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -397,48 +400,31 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
return offset;
}
-static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+static struct drm_gpuva *lookup_vma(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
+ struct drm_gpuvm_bo *vm_bo;
msm_gem_assert_locked(obj);
- vma = msm_gem_vma_new(aspace);
- if (!vma)
- return ERR_PTR(-ENOMEM);
-
- list_add_tail(&vma->list, &msm_obj->vmas);
-
- return vma;
-}
-
-static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct drm_gpuva *vma;
- msm_gem_assert_locked(obj);
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ if (vma->vm == vm) {
+ /* lookup_vma() should only be used in paths
+ * with at most one vma per vm
+ */
+ GEM_WARN_ON(!list_is_singular(&vm_bo->list.gpuva));
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- if (vma->aspace == aspace)
- return vma;
+ return vma;
+ }
+ }
}
return NULL;
}
-static void del_vma(struct msm_gem_vma *vma)
-{
- if (!vma)
- return;
-
- list_del(&vma->list);
- kfree(vma);
-}
-
/*
* If close is true, this also closes the VMA (releasing the allocated
* iova range) in addition to removing the iommu mapping. In the eviction
@@ -446,71 +432,54 @@ static void del_vma(struct msm_gem_vma *vma)
* mapping.
*/
static void
-put_iova_spaces(struct drm_gem_object *obj, bool close)
+put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ bool close, const char *reason)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
+ struct drm_gpuvm_bo *vm_bo, *tmp;
msm_gem_assert_locked(obj);
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- if (vma->aspace) {
- msm_gem_vma_purge(vma);
+ drm_gem_for_each_gpuvm_bo_safe (vm_bo, tmp, obj) {
+ struct drm_gpuva *vma, *vmatmp;
+
+ if (vm && vm_bo->vm != vm)
+ continue;
+
+ drm_gpuvm_bo_get(vm_bo);
+
+ drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) {
+ msm_gem_vma_unmap(vma, reason);
if (close)
msm_gem_vma_close(vma);
}
- }
-}
-/* Called with msm_obj locked */
-static void
-put_iova_vmas(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma, *tmp;
-
- msm_gem_assert_locked(obj);
-
- list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
- del_vma(vma);
+ drm_gpuvm_bo_put(vm_bo);
}
}
-static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace,
- u64 range_start, u64 range_end)
+static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm, u64 range_start,
+ u64 range_end)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
msm_gem_assert_locked(obj);
- vma = lookup_vma(obj, aspace);
+ vma = lookup_vma(obj, vm);
if (!vma) {
- int ret;
-
- vma = add_vma(obj, aspace);
- if (IS_ERR(vma))
- return vma;
-
- ret = msm_gem_vma_init(vma, obj->size,
- range_start, range_end);
- if (ret) {
- del_vma(vma);
- return ERR_PTR(ret);
- }
+ vma = msm_gem_vma_new(vm, obj, 0, range_start, range_end);
} else {
- GEM_WARN_ON(vma->iova < range_start);
- GEM_WARN_ON((vma->iova + obj->size) > range_end);
+ GEM_WARN_ON(vma->va.addr < range_start);
+ GEM_WARN_ON((vma->va.addr + obj->size) > range_end);
}
return vma;
}
-int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
+int msm_gem_prot(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct page **pages;
int prot = IOMMU_READ;
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
@@ -522,13 +491,22 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
prot |= IOMMU_CACHE;
+ return prot;
+}
+
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+ int prot = msm_gem_prot(obj);
+
msm_gem_assert_locked(obj);
pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
if (IS_ERR(pages))
return PTR_ERR(pages);
- return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
+ return msm_gem_vma_map(vma, prot, msm_obj->sgt);
}
void msm_gem_unpin_locked(struct drm_gem_object *obj)
@@ -560,28 +538,31 @@ void msm_gem_unpin_active(struct drm_gem_object *obj)
update_lru_active(obj);
}
-struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
{
- return get_vma_locked(obj, aspace, 0, U64_MAX);
+ return get_vma_locked(obj, vm, 0, U64_MAX);
}
static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova,
- u64 range_start, u64 range_end)
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
int ret;
msm_gem_assert_locked(obj);
- vma = get_vma_locked(obj, aspace, range_start, range_end);
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
+ return -EINVAL;
+
+ vma = get_vma_locked(obj, vm, range_start, range_end);
if (IS_ERR(vma))
return PTR_ERR(vma);
ret = msm_gem_pin_vma_locked(obj, vma);
if (!ret) {
- *iova = vma->iova;
+ *iova = vma->va.addr;
pin_obj_locked(obj);
}
@@ -593,58 +574,59 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
* limits iova to specified range (in pages)
*/
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova,
- u64 range_start, u64 range_end)
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
+ struct drm_exec exec;
int ret;
- msm_gem_lock(obj);
- ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
- msm_gem_unlock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ ret = get_and_pin_iova_range_locked(obj, vm, iova, range_start, range_end);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
/* get iova and pin it. Should have a matching put */
-int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova)
{
- return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
+ return msm_gem_get_and_pin_iova_range(obj, vm, iova, 0, U64_MAX);
}
/*
* Get an iova but don't pin it. Doesn't need a put because iovas are currently
* valid for the life of the object
*/
-int msm_gem_get_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
+ struct drm_exec exec;
int ret = 0;
- msm_gem_lock(obj);
- vma = get_vma_locked(obj, aspace, 0, U64_MAX);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ vma = get_vma_locked(obj, vm, 0, U64_MAX);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
} else {
- *iova = vma->iova;
+ *iova = vma->va.addr;
}
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
static int clear_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+ struct drm_gpuvm *vm)
{
- struct msm_gem_vma *vma = lookup_vma(obj, aspace);
+ struct drm_gpuva *vma = lookup_vma(obj, vm);
if (!vma)
return 0;
- msm_gem_vma_purge(vma);
+ msm_gem_vma_unmap(vma, NULL);
msm_gem_vma_close(vma);
- del_vma(vma);
return 0;
}
@@ -657,44 +639,54 @@ static int clear_iova(struct drm_gem_object *obj,
* Setting an iova of zero will clear the vma.
*/
int msm_gem_set_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t iova)
+ struct drm_gpuvm *vm, uint64_t iova)
{
+ struct drm_exec exec;
int ret = 0;
- msm_gem_lock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
if (!iova) {
- ret = clear_iova(obj, aspace);
+ ret = clear_iova(obj, vm);
} else {
- struct msm_gem_vma *vma;
- vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
+ struct drm_gpuva *vma;
+ vma = get_vma_locked(obj, vm, iova, iova + obj->size);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- } else if (GEM_WARN_ON(vma->iova != iova)) {
- clear_iova(obj, aspace);
+ } else if (GEM_WARN_ON(vma->va.addr != iova)) {
+ clear_iova(obj, vm);
ret = -EBUSY;
}
}
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
+static bool is_kms_vm(struct drm_gpuvm *vm)
+{
+ struct msm_drm_private *priv = vm->drm->dev_private;
+
+ return priv->kms && (priv->kms->vm == vm);
+}
+
/*
* Unpin a iova by updating the reference counts. The memory isn't actually
* purged until something else (shrinker, mm_notifier, destroy, etc) decides
* to get rid of it
*/
-void msm_gem_unpin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
+ struct drm_exec exec;
- msm_gem_lock(obj);
- vma = lookup_vma(obj, aspace);
- if (!GEM_WARN_ON(!vma)) {
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ vma = lookup_vma(obj, vm);
+ if (vma) {
msm_gem_unpin_locked(obj);
}
- msm_gem_unlock(obj);
+ if (!is_kms_vm(vm))
+ put_iova_spaces(obj, vm, true, "close");
+ drm_exec_fini(&exec); /* drop locks */
}
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -853,7 +845,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
GEM_WARN_ON(!is_purgeable(msm_obj));
/* Get rid of any iommu mapping(s): */
- put_iova_spaces(obj, true);
+ put_iova_spaces(obj, NULL, false, "purge");
msm_gem_vunmap(obj);
@@ -861,8 +853,6 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_pages(obj);
- put_iova_vmas(obj);
-
mutex_lock(&priv->lru.lock);
/* A one-way transition: */
msm_obj->madv = __MSM_MADV_PURGED;
@@ -893,7 +883,7 @@ void msm_gem_evict(struct drm_gem_object *obj)
GEM_WARN_ON(is_unevictable(msm_obj));
/* Get rid of any iommu mapping(s): */
- put_iova_spaces(obj, false);
+ put_iova_spaces(obj, NULL, false, "evict");
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
@@ -920,7 +910,7 @@ bool msm_gem_active(struct drm_gem_object *obj)
if (to_msm_bo(obj)->pin_count)
return true;
- return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
+ return !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP);
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@@ -959,11 +949,11 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_resv *robj = obj->resv;
- struct msm_gem_vma *vma;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
- msm_gem_lock(obj);
+ if (!msm_gem_trylock(obj))
+ return;
stats->all.count++;
stats->all.size += obj->size;
@@ -1002,31 +992,33 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
- if (!list_empty(&msm_obj->vmas)) {
+ if (!list_empty(&obj->gpuva.list)) {
+ struct drm_gpuvm_bo *vm_bo;
seq_puts(m, " vmas:");
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- const char *name, *comm;
- if (vma->aspace) {
- struct msm_gem_address_space *aspace = vma->aspace;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct drm_gpuva *vma;
+
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ const char *name, *comm;
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
struct task_struct *task =
- get_pid_task(aspace->pid, PIDTYPE_PID);
+ get_pid_task(vm->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
put_task_struct(task);
} else {
comm = NULL;
}
- name = aspace->name;
- } else {
- name = comm = NULL;
+ name = vm->base.name;
+
+ seq_printf(m, " [%s%s%s: vm=%p, %08llx, %smapped]",
+ name, comm ? ":" : "", comm ? comm : "",
+ vma->vm, vma->va.addr,
+ to_msm_vma(vma)->mapped ? "" : "un");
+ kfree(comm);
}
- seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s]",
- name, comm ? ":" : "", comm ? comm : "",
- vma->aspace, vma->iova,
- vma->mapped ? "mapped" : "unmapped");
- kfree(comm);
}
seq_puts(m, "\n");
@@ -1067,12 +1059,46 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct drm_exec exec;
mutex_lock(&priv->obj_lock);
list_del(&msm_obj->node);
mutex_unlock(&priv->obj_lock);
- put_iova_spaces(obj, true);
+ /*
+ * We need to lock any VMs the object is still attached to, but not
+ * the object itself (see explaination in msm_gem_assert_locked()),
+ * so just open-code this special case.
+ *
+ * Note that we skip the dance if we aren't attached to any VM. This
+ * is load bearing. The driver needs to support two usage models:
+ *
+ * 1. Legacy kernel managed VM: Userspace expects the VMA's to be
+ * implicitly torn down when the object is freed, the VMA's do
+ * not hold a hard reference to the BO.
+ *
+ * 2. VM_BIND, userspace managed VM: The VMA holds a reference to the
+ * BO. This can be dropped when the VM is closed and it's associated
+ * VMAs are torn down. (See msm_gem_vm_close()).
+ *
+ * In the latter case the last reference to a BO can be dropped while
+ * we already have the VM locked. It would have already been removed
+ * from the gpuva list, but lockdep doesn't know that. Or understand
+ * the differences between the two usage models.
+ */
+ if (!list_empty(&obj->gpuva.list)) {
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked (&exec) {
+ struct drm_gpuvm_bo *vm_bo;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ drm_exec_lock_obj(&exec,
+ drm_gpuvm_resv_obj(vm_bo->vm));
+ drm_exec_retry_on_contention(&exec);
+ }
+ }
+ put_iova_spaces(obj, NULL, true, "free");
+ drm_exec_fini(&exec); /* drop locks */
+ }
if (drm_gem_is_imported(obj)) {
GEM_WARN_ON(msm_obj->vaddr);
@@ -1082,13 +1108,18 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
*/
kvfree(msm_obj->pages);
- put_iova_vmas(obj);
-
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
msm_gem_vunmap(obj);
put_pages(obj);
- put_iova_vmas(obj);
+ }
+
+ if (msm_obj->flags & MSM_BO_NO_SHARE) {
+ struct drm_gem_object *r_obj =
+ container_of(obj->resv, struct drm_gem_object, _resv);
+
+ /* Drop reference we hold to shared resv obj: */
+ drm_gem_object_put(r_obj);
}
drm_gem_object_release(obj);
@@ -1123,6 +1154,15 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
if (name)
msm_gem_object_set_name(obj, "%s", name);
+ if (flags & MSM_BO_NO_SHARE) {
+ struct msm_context *ctx = file->driver_priv;
+ struct drm_gem_object *r_obj = drm_gpuvm_resv_obj(ctx->vm);
+
+ drm_gem_object_get(r_obj);
+
+ obj->resv = r_obj->resv;
+ }
+
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
@@ -1155,6 +1195,7 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.free = msm_gem_free_object,
.open = msm_gem_open,
.close = msm_gem_close,
+ .export = msm_gem_prime_export,
.pin = msm_gem_prime_pin,
.unpin = msm_gem_prime_unpin,
.get_sg_table = msm_gem_prime_get_sg_table,
@@ -1194,7 +1235,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->madv = MSM_MADV_WILLNEED;
INIT_LIST_HEAD(&msm_obj->node);
- INIT_LIST_HEAD(&msm_obj->vmas);
*obj = &msm_obj->base;
(*obj)->funcs = &msm_gem_object_funcs;
@@ -1207,19 +1247,10 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj = NULL;
- bool use_vram = false;
int ret;
size = PAGE_ALIGN(size);
- if (!msm_use_mmu(dev))
- use_vram = true;
- else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
- use_vram = true;
-
- if (GEM_WARN_ON(use_vram && !priv->vram.size))
- return ERR_PTR(-EINVAL);
-
/* Disallow zero sized objects as they make the underlying
* infrastructure grumpy
*/
@@ -1232,44 +1263,16 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
msm_obj = to_msm_bo(obj);
- if (use_vram) {
- struct msm_gem_vma *vma;
- struct page **pages;
-
- drm_gem_private_object_init(dev, obj, size);
-
- msm_gem_lock(obj);
-
- vma = add_vma(obj, NULL);
- msm_gem_unlock(obj);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto fail;
- }
-
- to_msm_bo(obj)->vram_node = &vma->node;
-
- msm_gem_lock(obj);
- pages = get_pages(obj);
- msm_gem_unlock(obj);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto fail;
- }
-
- vma->iova = physaddr(obj);
- } else {
- ret = drm_gem_object_init(dev, obj, size);
- if (ret)
- goto fail;
- /*
- * Our buffers are kept pinned, so allocating them from the
- * MOVABLE zone is a really bad idea, and conflicts with CMA.
- * See comments above new_inode() why this is required _and_
- * expected if you're going to pin these pages.
- */
- mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
- }
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto fail;
+ /*
+ * Our buffers are kept pinned, so allocating them from the
+ * MOVABLE zone is a really bad idea, and conflicts with CMA.
+ * See comments above new_inode() why this is required _and_
+ * expected if you're going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
@@ -1297,12 +1300,6 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
uint32_t size;
int ret, npages;
- /* if we don't have IOMMU, don't bother pretending we can import: */
- if (!msm_use_mmu(dev)) {
- DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
- return ERR_PTR(-EINVAL);
- }
-
size = PAGE_ALIGN(dmabuf->size);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
@@ -1348,9 +1345,9 @@ fail:
return ERR_PTR(ret);
}
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova)
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
+ struct drm_gpuvm *vm, struct drm_gem_object **bo,
+ uint64_t *iova)
{
void *vaddr;
struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
@@ -1360,14 +1357,14 @@ void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
return ERR_CAST(obj);
if (iova) {
- ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
+ ret = msm_gem_get_and_pin_iova(obj, vm, iova);
if (ret)
goto err;
}
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
- msm_gem_unpin_iova(obj, aspace);
+ msm_gem_unpin_iova(obj, vm);
ret = PTR_ERR(vaddr);
goto err;
}
@@ -1383,14 +1380,13 @@ err:
}
-void msm_gem_kernel_put(struct drm_gem_object *bo,
- struct msm_gem_address_space *aspace)
+void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm)
{
if (IS_ERR_OR_NULL(bo))
return;
msm_gem_put_vaddr(bo);
- msm_gem_unpin_iova(bo, aspace);
+ msm_gem_unpin_iova(bo, vm);
drm_gem_object_put(bo);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index ba5c4ff76292..88239da1cd72 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -7,9 +7,11 @@
#ifndef __MSM_GEM_H__
#define __MSM_GEM_H__
+#include "msm_mmu.h"
#include <linux/kref.h>
#include <linux/dma-resv.h>
#include "drm/drm_exec.h"
+#include "drm/drm_gpuvm.h"
#include "drm/gpu_scheduler.h"
#include "msm_drv.h"
@@ -22,56 +24,173 @@
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
#define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */
-struct msm_gem_address_space {
- const char *name;
- /* NOTE: mm managed at the page level, size is in # of pages
- * and position mm_node->start is in # of pages:
+/**
+ * struct msm_gem_vm_log_entry - An entry in the VM log
+ *
+ * For userspace managed VMs, a log of recent VM updates is tracked and
+ * captured in GPU devcore dumps, to aid debugging issues caused by (for
+ * example) incorrectly synchronized VM updates
+ */
+struct msm_gem_vm_log_entry {
+ const char *op;
+ uint64_t iova;
+ uint64_t range;
+ int queue_id;
+};
+
+/**
+ * struct msm_gem_vm - VM object
+ *
+ * A VM object representing a GPU (or display or GMU or ...) virtual address
+ * space.
+ *
+ * In the case of GPU, if per-process address spaces are supported, the address
+ * space is split into two VMs, which map to TTBR0 and TTBR1 in the SMMU. TTBR0
+ * is used for userspace objects, and is unique per msm_context/drm_file, while
+ * TTBR1 is the same for all processes. (The kernel controlled ringbuffer and
+ * a few other kernel controlled buffers live in TTBR1.)
+ *
+ * The GPU TTBR0 vm can be managed by userspace or by the kernel, depending on
+ * whether userspace supports VM_BIND. All other vm's are managed by the kernel.
+ * (Managed by kernel means the kernel is responsible for VA allocation.)
+ *
+ * Note that because VM_BIND allows a given BO to be mapped multiple times in
+ * a VM, and therefore have multiple VMA's in a VM, there is an extra object
+ * provided by drm_gpuvm infrastructure.. the drm_gpuvm_bo, which is not
+ * embedded in any larger driver structure. The GEM object holds a list of
+ * drm_gpuvm_bo, which in turn holds a list of msm_gem_vma. A linked vma
+ * holds a reference to the vm_bo, and drops it when the vma is unlinked.
+ * So we just need to call drm_gpuvm_bo_obtain() to return a ref to an
+ * existing vm_bo, or create a new one. Once the vma is linked, the ref
+ * to the vm_bo can be dropped (since the vma is holding one).
+ */
+struct msm_gem_vm {
+ /** @base: Inherit from drm_gpuvm. */
+ struct drm_gpuvm base;
+
+ /**
+ * @sched: Scheduler used for asynchronous VM_BIND request.
+ *
+ * Unused for kernel managed VMs (where all operations are synchronous).
+ */
+ struct drm_gpu_scheduler sched;
+
+ /**
+ * @prealloc_throttle: Used to throttle VM_BIND ops if too much pre-
+ * allocated memory is in flight.
+ *
+ * Because we have to pre-allocate pgtable pages for the worst case
+ * (ie. new mappings do not share any PTEs with existing mappings)
+ * we could end up consuming a lot of resources transiently. The
+ * prealloc_throttle puts an upper bound on that.
+ */
+ struct {
+ /** @wait: Notified when preallocated resources are released */
+ wait_queue_head_t wait;
+
+ /**
+ * @in_flight: The # of preallocated pgtable pages in-flight
+ * for queued VM_BIND jobs.
+ */
+ atomic_t in_flight;
+ } prealloc_throttle;
+
+ /**
+ * @mm: Memory management for kernel managed VA allocations
+ *
+ * Only used for kernel managed VMs, unused for user managed VMs.
+ *
+ * Protected by @mm_lock.
*/
struct drm_mm mm;
- spinlock_t lock; /* Protects drm_mm node allocation/removal */
+
+ /** @mmu: The mmu object which manages the pgtables */
struct msm_mmu *mmu;
- struct kref kref;
- /* For address spaces associated with a specific process, this
+ /** @mmu_lock: Protects access to the mmu */
+ struct mutex mmu_lock;
+
+ /**
+ * @pid: For address spaces associated with a specific process, this
* will be non-NULL:
*/
struct pid *pid;
- /* @faults: the number of GPU hangs associated with this address space */
+ /** @last_fence: Fence for last pending work scheduled on the VM */
+ struct dma_fence *last_fence;
+
+ /** @log: A log of recent VM updates */
+ struct msm_gem_vm_log_entry *log;
+
+ /** @log_shift: length of @log is (1 << @log_shift) */
+ uint32_t log_shift;
+
+ /** @log_idx: index of next @log entry to write */
+ uint32_t log_idx;
+
+ /** @faults: the number of GPU hangs associated with this address space */
int faults;
- /** @va_start: lowest possible address to allocate */
- uint64_t va_start;
+ /** @managed: is this a kernel managed VM? */
+ bool managed;
- /** @va_size: the size of the address space (in bytes) */
- uint64_t va_size;
+ /**
+ * @unusable: True if the VM has turned unusable because something
+ * bad happened during an asynchronous request.
+ *
+ * We don't try to recover from such failures, because this implies
+ * informing userspace about the specific operation that failed, and
+ * hoping the userspace driver can replay things from there. This all
+ * sounds very complicated for little gain.
+ *
+ * Instead, we should just flag the VM as unusable, and fail any
+ * further request targeting this VM.
+ *
+ * As an analogy, this would be mapped to a VK_ERROR_DEVICE_LOST
+ * situation, where the logical device needs to be re-created.
+ */
+ bool unusable;
};
+#define to_msm_vm(x) container_of(x, struct msm_gem_vm, base)
-struct msm_gem_address_space *
-msm_gem_address_space_get(struct msm_gem_address_space *aspace);
-
-void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
+struct drm_gpuvm *
+msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
+ u64 va_start, u64 va_size, bool managed);
-struct msm_gem_address_space *
-msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
- u64 va_start, u64 size);
+void msm_gem_vm_close(struct drm_gpuvm *gpuvm);
+void msm_gem_vm_unusable(struct drm_gpuvm *gpuvm);
struct msm_fence_context;
+#define MSM_VMA_DUMP (DRM_GPUVA_USERBITS << 0)
+
+/**
+ * struct msm_gem_vma - a VMA mapping
+ *
+ * Represents a combination of a GEM object plus a VM.
+ */
struct msm_gem_vma {
+ /** @base: inherit from drm_gpuva */
+ struct drm_gpuva base;
+
+ /**
+ * @node: mm node for VA allocation
+ *
+ * Only used by kernel managed VMs
+ */
struct drm_mm_node node;
- uint64_t iova;
- struct msm_gem_address_space *aspace;
- struct list_head list; /* node in msm_gem_object::vmas */
+
+ /** @mapped: Is this VMA mapped? */
bool mapped;
};
+#define to_msm_vma(x) container_of(x, struct msm_gem_vma, base)
-struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace);
-int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
- u64 range_start, u64 range_end);
-void msm_gem_vma_purge(struct msm_gem_vma *vma);
-int msm_gem_vma_map(struct msm_gem_vma *vma, int prot, struct sg_table *sgt, int size);
-void msm_gem_vma_close(struct msm_gem_vma *vma);
+struct drm_gpuva *
+msm_gem_vma_new(struct drm_gpuvm *vm, struct drm_gem_object *obj,
+ u64 offset, u64 range_start, u64 range_end);
+void msm_gem_vma_unmap(struct drm_gpuva *vma, const char *reason);
+int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt);
+void msm_gem_vma_close(struct drm_gpuva *vma);
struct msm_gem_object {
struct drm_gem_object base;
@@ -100,13 +219,6 @@ struct msm_gem_object {
struct sg_table *sgt;
void *vaddr;
- struct list_head vmas; /* list of msm_gem_vma */
-
- /* For physically contiguous buffers. Used when we don't have
- * an IOMMU. Also used for stolen/splashscreen buffer.
- */
- struct drm_mm_node *vram_node;
-
char name[32]; /* Identifier to print for the debugfs files */
/* userspace metadata backchannel */
@@ -119,27 +231,56 @@ struct msm_gem_object {
* Protected by LRU lock.
*/
int pin_count;
+
+ /**
+ * @vma_ref: Reference count of VMA users.
+ *
+ * With the vm_bo/vma holding a reference to the GEM object, we'd
+ * otherwise have to actively tear down a VMA when, for example,
+ * a buffer is unpinned for scanout, vs. the pre-drm_gpuvm approach
+ * where a VMA did not hold a reference to the BO, but instead was
+ * implicitly torn down when the BO was freed.
+ *
+ * To regain the lazy VMA teardown, we use the @vma_ref. It is
+ * incremented for any of the following:
+ *
+ * 1) the BO is exported as a dma_buf
+ * 2) the BO has open userspace handle
+ *
+ * All of those conditions will hold an reference to the BO,
+ * preventing it from being freed. So lazily keeping around the
+ * VMA will not prevent the BO from being freed. (Or rather, the
+ * reference loop is harmless in this case.)
+ *
+ * When the @vma_ref drops to zero, then kms->vm VMA will be
+ * torn down.
+ */
+ atomic_t vma_ref;
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
+void msm_gem_vma_get(struct drm_gem_object *obj);
+void msm_gem_vma_put(struct drm_gem_object *obj);
+
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
+int msm_gem_prot(struct drm_gem_object *obj);
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma);
void msm_gem_unpin_locked(struct drm_gem_object *obj);
void msm_gem_unpin_active(struct drm_gem_object *obj);
-struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace);
-int msm_gem_get_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova);
-int msm_gem_set_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t iova);
+struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm);
+int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova);
+int msm_gem_set_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t iova);
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova,
- u64 range_start, u64 range_end);
-int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova);
-void msm_gem_unpin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace);
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end);
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova);
+void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm);
void msm_gem_pin_obj_locked(struct drm_gem_object *obj);
+struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv);
struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj);
void msm_gem_unpin_pages_locked(struct drm_gem_object *obj);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -159,11 +300,10 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle, char *name);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova);
-void msm_gem_kernel_put(struct drm_gem_object *bo,
- struct msm_gem_address_space *aspace);
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
+ struct drm_gpuvm *vm, struct drm_gem_object **bo,
+ uint64_t *iova);
+void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
__printf(2, 3)
@@ -188,6 +328,12 @@ msm_gem_lock(struct drm_gem_object *obj)
dma_resv_lock(obj->resv, NULL);
}
+static inline bool __must_check
+msm_gem_trylock(struct drm_gem_object *obj)
+{
+ return dma_resv_trylock(obj->resv);
+}
+
static inline int
msm_gem_lock_interruptible(struct drm_gem_object *obj)
{
@@ -200,6 +346,37 @@ msm_gem_unlock(struct drm_gem_object *obj)
dma_resv_unlock(obj->resv);
}
+/**
+ * msm_gem_lock_vm_and_obj() - Helper to lock an obj + VM
+ * @exec: the exec context helper which will be initalized
+ * @obj: the GEM object to lock
+ * @vm: the VM to lock
+ *
+ * Operations which modify a VM frequently need to lock both the VM and
+ * the object being mapped/unmapped/etc. This helper uses drm_exec to
+ * acquire both locks, dealing with potential deadlock/backoff scenarios
+ * which arise when multiple locks are involved.
+ */
+static inline int
+msm_gem_lock_vm_and_obj(struct drm_exec *exec,
+ struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
+{
+ int ret = 0;
+
+ drm_exec_init(exec, 0, 2);
+ drm_exec_until_all_locked (exec) {
+ ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(vm));
+ if (!ret && (obj->resv != drm_gpuvm_resv(vm)))
+ ret = drm_exec_lock_obj(exec, obj);
+ drm_exec_retry_on_contention(exec);
+ if (GEM_WARN_ON(ret))
+ break;
+ }
+
+ return ret;
+}
+
static inline void
msm_gem_assert_locked(struct drm_gem_object *obj)
{
@@ -257,7 +434,7 @@ struct msm_gem_submit {
struct kref ref;
struct drm_device *dev;
struct msm_gpu *gpu;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
struct list_head node; /* node in ring submit list */
struct drm_exec exec;
uint32_t seqno; /* Sequence number of the submit on the ring */
@@ -297,6 +474,7 @@ struct msm_gem_submit {
struct drm_gem_object *obj;
uint32_t handle;
};
+ struct drm_gpuvm_bo *vm_bo;
uint64_t iova;
} bos[];
};
@@ -320,14 +498,4 @@ static inline void msm_gem_submit_put(struct msm_gem_submit *submit)
void msm_submit_retire(struct msm_gem_submit *submit);
-/* helper to determine of a buffer in submit should be dumped, used for both
- * devcoredump and debugfs cmdstream dumping:
- */
-static inline bool
-should_dump(struct msm_gem_submit *submit, int idx)
-{
- extern bool rd_full;
- return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
-}
-
#endif /* __MSM_GEM_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 2e37913d5a6a..c0a33ac839cb 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -6,6 +6,7 @@
#include <linux/dma-buf.h>
+#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
#include "msm_drv.h"
@@ -16,6 +17,9 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int npages = obj->size >> PAGE_SHIFT;
+ if (msm_obj->flags & MSM_BO_NO_SHARE)
+ return ERR_PTR(-EINVAL);
+
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
return ERR_PTR(-ENOMEM);
@@ -39,12 +43,70 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
msm_gem_put_vaddr_locked(obj);
}
+static void msm_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ msm_gem_vma_put(obj);
+ drm_gem_dmabuf_release(dma_buf);
+}
+
+static const struct dma_buf_ops msm_gem_prime_dmabuf_ops = {
+ .attach = drm_gem_map_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = msm_gem_dmabuf_release,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *buf)
+{
+ if (buf->ops == &msm_gem_prime_dmabuf_ops) {
+ struct drm_gem_object *obj = buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_get(obj);
+ return obj;
+ }
+ }
+
+ return drm_gem_prime_import(dev, buf);
+}
+
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
return msm_gem_import(dev, attach->dmabuf, sg);
}
+struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags)
+{
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
+ return ERR_PTR(-EPERM);
+
+ msm_gem_vma_get(obj);
+
+ struct drm_device *dev = obj->dev;
+ struct dma_buf_export_info exp_info = {
+ .exp_name = KBUILD_MODNAME, /* white lie for debug */
+ .owner = dev->driver->fops->owner,
+ .ops = &msm_gem_prime_dmabuf_ops,
+ .size = obj->size,
+ .flags = flags,
+ .priv = obj,
+ .resv = obj->resv,
+ };
+
+ return drm_gem_dmabuf_export(dev, &exp_info);
+}
+
int msm_gem_prime_pin(struct drm_gem_object *obj)
{
struct page **pages;
@@ -53,6 +115,9 @@ int msm_gem_prime_pin(struct drm_gem_object *obj)
if (drm_gem_is_imported(obj))
return 0;
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
+ return -EINVAL;
+
pages = msm_gem_pin_pages_locked(obj);
if (IS_ERR(pages))
ret = PTR_ERR(pages);
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 07ca4ddfe4e3..1039e3c0a47b 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -44,7 +44,76 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
}
static bool
-purge(struct drm_gem_object *obj)
+with_vm_locks(struct ww_acquire_ctx *ticket,
+ void (*fn)(struct drm_gem_object *obj),
+ struct drm_gem_object *obj)
+{
+ /*
+ * Track last locked entry for for unwinding locks in error and
+ * success paths
+ */
+ struct drm_gpuvm_bo *vm_bo, *last_locked = NULL;
+ int ret = 0;
+
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct dma_resv *resv = drm_gpuvm_resv(vm_bo->vm);
+
+ if (resv == obj->resv)
+ continue;
+
+ ret = dma_resv_lock(resv, ticket);
+
+ /*
+ * Since we already skip the case when the VM and obj
+ * share a resv (ie. _NO_SHARE objs), we don't expect
+ * to hit a double-locking scenario... which the lock
+ * unwinding cannot really cope with.
+ */
+ WARN_ON(ret == -EALREADY);
+
+ /*
+ * Don't bother with slow-lock / backoff / retry sequence,
+ * if we can't get the lock just give up and move on to
+ * the next object.
+ */
+ if (ret)
+ goto out_unlock;
+
+ /*
+ * Hold a ref to prevent the vm_bo from being freed
+ * and removed from the obj's gpuva list, as that would
+ * would result in missing the unlock below
+ */
+ drm_gpuvm_bo_get(vm_bo);
+
+ last_locked = vm_bo;
+ }
+
+ fn(obj);
+
+out_unlock:
+ if (last_locked) {
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct dma_resv *resv = drm_gpuvm_resv(vm_bo->vm);
+
+ if (resv == obj->resv)
+ continue;
+
+ dma_resv_unlock(resv);
+
+ /* Drop the ref taken while locking: */
+ drm_gpuvm_bo_put(vm_bo);
+
+ if (last_locked == vm_bo)
+ break;
+ }
+ }
+
+ return ret == 0;
+}
+
+static bool
+purge(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (!is_purgeable(to_msm_bo(obj)))
return false;
@@ -52,13 +121,11 @@ purge(struct drm_gem_object *obj)
if (msm_gem_active(obj))
return false;
- msm_gem_purge(obj);
-
- return true;
+ return with_vm_locks(ticket, msm_gem_purge, obj);
}
static bool
-evict(struct drm_gem_object *obj)
+evict(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (is_unevictable(to_msm_bo(obj)))
return false;
@@ -66,43 +133,42 @@ evict(struct drm_gem_object *obj)
if (msm_gem_active(obj))
return false;
- msm_gem_evict(obj);
-
- return true;
+ return with_vm_locks(ticket, msm_gem_evict, obj);
}
static bool
wait_for_idle(struct drm_gem_object *obj)
{
- enum dma_resv_usage usage = dma_resv_usage_rw(true);
+ enum dma_resv_usage usage = DMA_RESV_USAGE_BOOKKEEP;
return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0;
}
static bool
-active_purge(struct drm_gem_object *obj)
+active_purge(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (!wait_for_idle(obj))
return false;
- return purge(obj);
+ return purge(obj, ticket);
}
static bool
-active_evict(struct drm_gem_object *obj)
+active_evict(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (!wait_for_idle(obj))
return false;
- return evict(obj);
+ return evict(obj, ticket);
}
static unsigned long
msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv = shrinker->private_data;
+ struct ww_acquire_ctx ticket;
struct {
struct drm_gem_lru *lru;
- bool (*shrink)(struct drm_gem_object *obj);
+ bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket);
bool cond;
unsigned long freed;
unsigned long remaining;
@@ -122,8 +188,9 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
continue;
stages[i].freed =
drm_gem_lru_scan(stages[i].lru, nr,
- &stages[i].remaining,
- stages[i].shrink);
+ &stages[i].remaining,
+ stages[i].shrink,
+ &ticket);
nr -= stages[i].freed;
freed += stages[i].freed;
remaining += stages[i].remaining;
@@ -164,7 +231,7 @@ msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
static const int vmap_shrink_limit = 15;
static bool
-vmap_shrink(struct drm_gem_object *obj)
+vmap_shrink(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
{
if (!is_vunmapable(to_msm_bo(obj)))
return false;
@@ -192,7 +259,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
unmapped += drm_gem_lru_scan(lrus[idx],
vmap_shrink_limit - unmapped,
&remaining,
- vmap_shrink);
+ vmap_shrink,
+ NULL);
}
*(unsigned long *)ptr += unmapped;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index d4f71bb54e84..5f8e939a5906 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -4,6 +4,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/dma-fence-unwrap.h>
#include <linux/file.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
@@ -16,6 +17,7 @@
#include "msm_gpu.h"
#include "msm_gem.h"
#include "msm_gpu_trace.h"
+#include "msm_syncobj.h"
/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable
* error msgs for debugging, but we don't spam dmesg by default
@@ -30,7 +32,7 @@
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu,
struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
- uint32_t nr_cmds)
+ uint32_t nr_cmds, u64 drm_client_id)
{
static atomic_t ident = ATOMIC_INIT(0);
struct msm_gem_submit *submit;
@@ -54,7 +56,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return ERR_PTR(ret);
}
- ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue);
+ ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue,
+ drm_client_id);
if (ret) {
kfree(submit->hw_fence);
kfree(submit);
@@ -63,7 +66,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
kref_init(&submit->ref);
submit->dev = dev;
- submit->aspace = queue->ctx->aspace;
+ submit->vm = msm_context_vm(dev, queue->ctx);
submit->gpu = gpu;
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
@@ -191,6 +194,7 @@ out:
static int submit_lookup_cmds(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
+ struct msm_context *ctx = file->driver_priv;
unsigned i;
size_t sz;
int ret = 0;
@@ -222,6 +226,20 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
goto out;
}
+ if (msm_context_is_vmbind(ctx)) {
+ if (submit_cmd.nr_relocs) {
+ ret = SUBMIT_ERROR(EINVAL, submit, "nr_relocs must be zero");
+ goto out;
+ }
+
+ if (submit_cmd.submit_idx || submit_cmd.submit_offset) {
+ ret = SUBMIT_ERROR(EINVAL, submit, "submit_idx/offset must be zero");
+ goto out;
+ }
+
+ submit->cmd[i].iova = submit_cmd.iova;
+ }
+
submit->cmd[i].type = submit_cmd.type;
submit->cmd[i].size = submit_cmd.size / 4;
submit->cmd[i].offset = submit_cmd.submit_offset / 4;
@@ -256,24 +274,48 @@ out:
/* This is where we make sure all the bo's are reserved and pin'd: */
static int submit_lock_objects(struct msm_gem_submit *submit)
{
+ unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
+ struct drm_exec *exec = &submit->exec;
int ret;
- drm_exec_init(&submit->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, submit->nr_bos);
+ if (msm_context_is_vmbind(submit->queue->ctx)) {
+ flags |= DRM_EXEC_IGNORE_DUPLICATES;
+
+ drm_exec_init(&submit->exec, flags, submit->nr_bos);
+
+ drm_exec_until_all_locked (&submit->exec) {
+ ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ return ret;
+
+ ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ drm_exec_init(&submit->exec, flags, submit->nr_bos);
drm_exec_until_all_locked (&submit->exec) {
+ ret = drm_exec_lock_obj(&submit->exec,
+ drm_gpuvm_resv_obj(submit->vm));
+ drm_exec_retry_on_contention(&submit->exec);
+ if (ret)
+ return ret;
for (unsigned i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
drm_exec_retry_on_contention(&submit->exec);
if (ret)
- goto error;
+ return ret;
}
}
return 0;
-
-error:
- return ret;
}
static int submit_fence_sync(struct msm_gem_submit *submit)
@@ -308,10 +350,10 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
/* if locking succeeded, pin bo: */
- vma = msm_gem_get_vma_locked(obj, submit->aspace);
+ vma = msm_gem_get_vma_locked(obj, submit->vm);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
break;
@@ -321,7 +363,8 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
if (ret)
break;
- submit->bos[i].iova = vma->iova;
+ submit->bos[i].vm_bo = drm_gpuvm_bo_get(vma->vm_bo);
+ submit->bos[i].iova = vma->va.addr;
}
/*
@@ -358,9 +401,18 @@ static void submit_unpin_objects(struct msm_gem_submit *submit)
static void submit_attach_object_fences(struct msm_gem_submit *submit)
{
- int i;
+ struct msm_gem_vm *vm = to_msm_vm(submit->vm);
+ struct dma_fence *last_fence;
+
+ if (msm_context_is_vmbind(submit->queue->ctx)) {
+ drm_gpuvm_resv_add_fence(submit->vm, &submit->exec,
+ submit->user_fence,
+ DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_BOOKKEEP);
+ return;
+ }
- for (i = 0; i < submit->nr_bos; i++) {
+ for (unsigned i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
@@ -370,6 +422,10 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
dma_resv_add_fence(obj->resv, submit->user_fence,
DMA_RESV_USAGE_READ);
}
+
+ last_fence = vm->last_fence;
+ vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
+ dma_fence_put(last_fence);
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
@@ -458,14 +514,14 @@ out:
*/
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
+ if (submit->exec.objects)
+ drm_exec_fini(&submit->exec);
+
if (error) {
submit_unpin_objects(submit);
/* job wasn't enqueued to scheduler, so early retirement: */
msm_submit_retire(submit);
}
-
- if (submit->exec.objects)
- drm_exec_fini(&submit->exec);
}
void msm_submit_retire(struct msm_gem_submit *submit)
@@ -474,191 +530,29 @@ void msm_submit_retire(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
+ struct drm_gpuvm_bo *vm_bo = submit->bos[i].vm_bo;
+ msm_gem_lock(obj);
+ drm_gpuvm_bo_put(vm_bo);
+ msm_gem_unlock(obj);
drm_gem_object_put(obj);
}
}
-struct msm_submit_post_dep {
- struct drm_syncobj *syncobj;
- uint64_t point;
- struct dma_fence_chain *chain;
-};
-
-static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
- struct drm_file *file,
- uint64_t in_syncobjs_addr,
- uint32_t nr_in_syncobjs,
- size_t syncobj_stride)
-{
- struct drm_syncobj **syncobjs = NULL;
- struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
- int ret = 0;
- uint32_t i, j;
-
- syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
- if (!syncobjs)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < nr_in_syncobjs; ++i) {
- uint64_t address = in_syncobjs_addr + i * syncobj_stride;
-
- if (copy_from_user(&syncobj_desc,
- u64_to_user_ptr(address),
- min(syncobj_stride, sizeof(syncobj_desc)))) {
- ret = -EFAULT;
- break;
- }
-
- if (syncobj_desc.point &&
- !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
- ret = SUBMIT_ERROR(EOPNOTSUPP, submit, "syncobj timeline unsupported");
- break;
- }
-
- if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
- ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags);
- break;
- }
-
- ret = drm_sched_job_add_syncobj_dependency(&submit->base, file,
- syncobj_desc.handle, syncobj_desc.point);
- if (ret)
- break;
-
- if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
- syncobjs[i] =
- drm_syncobj_find(file, syncobj_desc.handle);
- if (!syncobjs[i]) {
- ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj handle: %u", i);
- break;
- }
- }
- }
-
- if (ret) {
- for (j = 0; j <= i; ++j) {
- if (syncobjs[j])
- drm_syncobj_put(syncobjs[j]);
- }
- kfree(syncobjs);
- return ERR_PTR(ret);
- }
- return syncobjs;
-}
-
-static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
- uint32_t nr_syncobjs)
-{
- uint32_t i;
-
- for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
- if (syncobjs[i])
- drm_syncobj_replace_fence(syncobjs[i], NULL);
- }
-}
-
-static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
- struct drm_file *file,
- uint64_t syncobjs_addr,
- uint32_t nr_syncobjs,
- size_t syncobj_stride)
-{
- struct msm_submit_post_dep *post_deps;
- struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
- int ret = 0;
- uint32_t i, j;
-
- post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
- if (!post_deps)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < nr_syncobjs; ++i) {
- uint64_t address = syncobjs_addr + i * syncobj_stride;
-
- if (copy_from_user(&syncobj_desc,
- u64_to_user_ptr(address),
- min(syncobj_stride, sizeof(syncobj_desc)))) {
- ret = -EFAULT;
- break;
- }
-
- post_deps[i].point = syncobj_desc.point;
-
- if (syncobj_desc.flags) {
- ret = UERR(EINVAL, dev, "invalid syncobj flags");
- break;
- }
-
- if (syncobj_desc.point) {
- if (!drm_core_check_feature(dev,
- DRIVER_SYNCOBJ_TIMELINE)) {
- ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported");
- break;
- }
-
- post_deps[i].chain = dma_fence_chain_alloc();
- if (!post_deps[i].chain) {
- ret = -ENOMEM;
- break;
- }
- }
-
- post_deps[i].syncobj =
- drm_syncobj_find(file, syncobj_desc.handle);
- if (!post_deps[i].syncobj) {
- ret = UERR(EINVAL, dev, "invalid syncobj handle");
- break;
- }
- }
-
- if (ret) {
- for (j = 0; j <= i; ++j) {
- dma_fence_chain_free(post_deps[j].chain);
- if (post_deps[j].syncobj)
- drm_syncobj_put(post_deps[j].syncobj);
- }
-
- kfree(post_deps);
- return ERR_PTR(ret);
- }
-
- return post_deps;
-}
-
-static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
- uint32_t count, struct dma_fence *fence)
-{
- uint32_t i;
-
- for (i = 0; post_deps && i < count; ++i) {
- if (post_deps[i].chain) {
- drm_syncobj_add_point(post_deps[i].syncobj,
- post_deps[i].chain,
- fence, post_deps[i].point);
- post_deps[i].chain = NULL;
- } else {
- drm_syncobj_replace_fence(post_deps[i].syncobj,
- fence);
- }
- }
-}
-
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
- struct msm_file_private *ctx = file->driver_priv;
+ struct msm_context *ctx = file->driver_priv;
struct msm_gem_submit *submit = NULL;
struct msm_gpu *gpu = priv->gpu;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
- struct msm_submit_post_dep *post_deps = NULL;
+ struct msm_syncobj_post_dep *post_deps = NULL;
struct drm_syncobj **syncobjs_to_reset = NULL;
struct sync_file *sync_file = NULL;
+ unsigned cmds_to_parse;
int out_fence_fd = -1;
unsigned i;
int ret;
@@ -669,10 +563,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- if (unlikely(!ctx->aspace) && !capable(CAP_SYS_RAWIO)) {
- DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n");
- return -EPERM;
- }
+ if (to_msm_vm(ctx->vm)->unusable)
+ return UERR(EPIPE, dev, "context is unusable");
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
@@ -693,6 +585,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!queue)
return -ENOENT;
+ if (queue->flags & MSM_SUBMITQUEUE_VM_BIND) {
+ ret = UERR(EINVAL, dev, "Invalid queue type");
+ goto out_post_unlock;
+ }
+
ring = gpu->rb[queue->ring_nr];
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
@@ -703,7 +600,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
- submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
+ submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds,
+ file->client_id);
if (IS_ERR(submit)) {
ret = PTR_ERR(submit);
goto out_post_unlock;
@@ -735,10 +633,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
- syncobjs_to_reset = msm_parse_deps(submit, file,
- args->in_syncobjs,
- args->nr_in_syncobjs,
- args->syncobj_stride);
+ syncobjs_to_reset = msm_syncobj_parse_deps(dev, &submit->base,
+ file, args->in_syncobjs,
+ args->nr_in_syncobjs,
+ args->syncobj_stride);
if (IS_ERR(syncobjs_to_reset)) {
ret = PTR_ERR(syncobjs_to_reset);
goto out_unlock;
@@ -746,10 +644,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
- post_deps = msm_parse_post_deps(dev, file,
- args->out_syncobjs,
- args->nr_out_syncobjs,
- args->syncobj_stride);
+ post_deps = msm_syncobj_parse_post_deps(dev, file,
+ args->out_syncobjs,
+ args->nr_out_syncobjs,
+ args->syncobj_stride);
if (IS_ERR(post_deps)) {
ret = PTR_ERR(post_deps);
goto out_unlock;
@@ -779,7 +677,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
- for (i = 0; i < args->nr_cmds; i++) {
+ cmds_to_parse = msm_context_is_vmbind(ctx) ? 0 : args->nr_cmds;
+
+ for (i = 0; i < cmds_to_parse; i++) {
struct drm_gem_object *obj;
uint64_t iova;
@@ -810,7 +710,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
}
- submit->nr_cmds = i;
+ submit->nr_cmds = args->nr_cmds;
idr_preload(GFP_KERNEL);
@@ -882,6 +782,18 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit_attach_object_fences(submit);
+ if (msm_context_is_vmbind(ctx)) {
+ /*
+ * If we are not using VM_BIND, submit_pin_vmas() will validate
+ * just the BOs attached to the submit. In that case we don't
+ * need to validate the _entire_ vm, because userspace tracked
+ * what BOs are associated with the submit.
+ */
+ ret = drm_gpuvm_validate(submit->vm, &submit->exec);
+ if (ret)
+ goto out;
+ }
+
/* The scheduler owns a ref now: */
msm_gem_submit_get(submit);
@@ -892,10 +804,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
args->fence = submit->fence_id;
queue->last_fence = submit->fence_id;
- msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
- msm_process_post_deps(post_deps, args->nr_out_syncobjs,
- submit->user_fence);
-
+ msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs);
+ msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, submit->user_fence);
out:
submit_cleanup(submit, !!ret);
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 11e842dda73c..3cd8562a5109 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -4,73 +4,319 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include "drm/drm_file.h"
+#include "drm/msm_drm.h"
+#include "linux/file.h"
+#include "linux/sync_file.h"
+
#include "msm_drv.h"
-#include "msm_fence.h"
#include "msm_gem.h"
+#include "msm_gpu.h"
#include "msm_mmu.h"
+#include "msm_syncobj.h"
+
+#define vm_dbg(fmt, ...) pr_debug("%s:%d: "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
+
+static uint vm_log_shift = 0;
+MODULE_PARM_DESC(vm_log_shift, "Length of VM op log");
+module_param_named(vm_log_shift, vm_log_shift, uint, 0600);
+
+/**
+ * struct msm_vm_map_op - create new pgtable mapping
+ */
+struct msm_vm_map_op {
+ /** @iova: start address for mapping */
+ uint64_t iova;
+ /** @range: size of the region to map */
+ uint64_t range;
+ /** @offset: offset into @sgt to map */
+ uint64_t offset;
+ /** @sgt: pages to map, or NULL for a PRR mapping */
+ struct sg_table *sgt;
+ /** @prot: the mapping protection flags */
+ int prot;
+
+ /**
+ * @queue_id: The id of the submitqueue the operation is performed
+ * on, or zero for (in particular) UNMAP ops triggered outside of
+ * a submitqueue (ie. process cleanup)
+ */
+ int queue_id;
+};
+
+/**
+ * struct msm_vm_unmap_op - unmap a range of pages from pgtable
+ */
+struct msm_vm_unmap_op {
+ /** @iova: start address for unmap */
+ uint64_t iova;
+ /** @range: size of region to unmap */
+ uint64_t range;
+
+ /** @reason: The reason for the unmap */
+ const char *reason;
+
+ /**
+ * @queue_id: The id of the submitqueue the operation is performed
+ * on, or zero for (in particular) UNMAP ops triggered outside of
+ * a submitqueue (ie. process cleanup)
+ */
+ int queue_id;
+};
+
+/**
+ * struct msm_vma_op - A MAP or UNMAP operation
+ */
+struct msm_vm_op {
+ /** @op: The operation type */
+ enum {
+ MSM_VM_OP_MAP = 1,
+ MSM_VM_OP_UNMAP,
+ } op;
+ union {
+ /** @map: Parameters used if op == MSM_VMA_OP_MAP */
+ struct msm_vm_map_op map;
+ /** @unmap: Parameters used if op == MSM_VMA_OP_UNMAP */
+ struct msm_vm_unmap_op unmap;
+ };
+ /** @node: list head in msm_vm_bind_job::vm_ops */
+ struct list_head node;
+
+ /**
+ * @obj: backing object for pages to be mapped/unmapped
+ *
+ * Async unmap ops, in particular, must hold a reference to the
+ * original GEM object backing the mapping that will be unmapped.
+ * But the same can be required in the map path, for example if
+ * there is not a corresponding unmap op, such as process exit.
+ *
+ * This ensures that the pages backing the mapping are not freed
+ * before the mapping is torn down.
+ */
+ struct drm_gem_object *obj;
+};
+
+/**
+ * struct msm_vm_bind_job - Tracking for a VM_BIND ioctl
+ *
+ * A table of userspace requested VM updates (MSM_VM_BIND_OP_UNMAP/MAP/MAP_NULL)
+ * gets applied to the vm, generating a list of VM ops (MSM_VM_OP_MAP/UNMAP)
+ * which are applied to the pgtables asynchronously. For example a userspace
+ * requested MSM_VM_BIND_OP_MAP could end up generating both an MSM_VM_OP_UNMAP
+ * to unmap an existing mapping, and a MSM_VM_OP_MAP to apply the new mapping.
+ */
+struct msm_vm_bind_job {
+ /** @base: base class for drm_sched jobs */
+ struct drm_sched_job base;
+ /** @vm: The VM being operated on */
+ struct drm_gpuvm *vm;
+ /** @fence: The fence that is signaled when job completes */
+ struct dma_fence *fence;
+ /** @queue: The queue that the job runs on */
+ struct msm_gpu_submitqueue *queue;
+ /** @prealloc: Tracking for pre-allocated MMU pgtable pages */
+ struct msm_mmu_prealloc prealloc;
+ /** @vm_ops: a list of struct msm_vm_op */
+ struct list_head vm_ops;
+ /** @bos_pinned: are the GEM objects being bound pinned? */
+ bool bos_pinned;
+ /** @nr_ops: the number of userspace requested ops */
+ unsigned int nr_ops;
+ /**
+ * @ops: the userspace requested ops
+ *
+ * The userspace requested ops are copied/parsed and validated
+ * before we start applying the updates to try to do as much up-
+ * front error checking as possible, to avoid the VM being in an
+ * undefined state due to partially executed VM_BIND.
+ *
+ * This table also serves to hold a reference to the backing GEM
+ * objects.
+ */
+ struct msm_vm_bind_op {
+ uint32_t op;
+ uint32_t flags;
+ union {
+ struct drm_gem_object *obj;
+ uint32_t handle;
+ };
+ uint64_t obj_offset;
+ uint64_t iova;
+ uint64_t range;
+ } ops[];
+};
+
+#define job_foreach_bo(obj, _job) \
+ for (unsigned i = 0; i < (_job)->nr_ops; i++) \
+ if ((obj = (_job)->ops[i].obj))
+
+static inline struct msm_vm_bind_job *to_msm_vm_bind_job(struct drm_sched_job *job)
+{
+ return container_of(job, struct msm_vm_bind_job, base);
+}
static void
-msm_gem_address_space_destroy(struct kref *kref)
+msm_gem_vm_free(struct drm_gpuvm *gpuvm)
{
- struct msm_gem_address_space *aspace = container_of(kref,
- struct msm_gem_address_space, kref);
+ struct msm_gem_vm *vm = container_of(gpuvm, struct msm_gem_vm, base);
- drm_mm_takedown(&aspace->mm);
- if (aspace->mmu)
- aspace->mmu->funcs->destroy(aspace->mmu);
- put_pid(aspace->pid);
- kfree(aspace);
+ drm_mm_takedown(&vm->mm);
+ if (vm->mmu)
+ vm->mmu->funcs->destroy(vm->mmu);
+ dma_fence_put(vm->last_fence);
+ put_pid(vm->pid);
+ kfree(vm->log);
+ kfree(vm);
}
+/**
+ * msm_gem_vm_unusable() - Mark a VM as unusable
+ * @gpuvm: the VM to mark unusable
+ */
+void
+msm_gem_vm_unusable(struct drm_gpuvm *gpuvm)
+{
+ struct msm_gem_vm *vm = to_msm_vm(gpuvm);
+ uint32_t vm_log_len = (1 << vm->log_shift);
+ uint32_t vm_log_mask = vm_log_len - 1;
+ uint32_t nr_vm_logs;
+ int first;
+
+ vm->unusable = true;
+
+ /* Bail if no log, or empty log: */
+ if (!vm->log || !vm->log[0].op)
+ return;
+
+ mutex_lock(&vm->mmu_lock);
+
+ /*
+ * log_idx is the next entry to overwrite, meaning it is the oldest, or
+ * first, entry (other than the special case handled below where the
+ * log hasn't wrapped around yet)
+ */
+ first = vm->log_idx;
+
+ if (!vm->log[first].op) {
+ /*
+ * If the next log entry has not been written yet, then only
+ * entries 0 to idx-1 are valid (ie. we haven't wrapped around
+ * yet)
+ */
+ nr_vm_logs = MAX(0, first - 1);
+ first = 0;
+ } else {
+ nr_vm_logs = vm_log_len;
+ }
+
+ pr_err("vm-log:\n");
+ for (int i = 0; i < nr_vm_logs; i++) {
+ int idx = (i + first) & vm_log_mask;
+ struct msm_gem_vm_log_entry *e = &vm->log[idx];
+ pr_err(" - %s:%d: 0x%016llx-0x%016llx\n",
+ e->op, e->queue_id, e->iova,
+ e->iova + e->range);
+ }
+
+ mutex_unlock(&vm->mmu_lock);
+}
-void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
+static void
+vm_log(struct msm_gem_vm *vm, const char *op, uint64_t iova, uint64_t range, int queue_id)
{
- if (aspace)
- kref_put(&aspace->kref, msm_gem_address_space_destroy);
+ int idx;
+
+ if (!vm->managed)
+ lockdep_assert_held(&vm->mmu_lock);
+
+ vm_dbg("%s:%p:%d: %016llx %016llx", op, vm, queue_id, iova, iova + range);
+
+ if (!vm->log)
+ return;
+
+ idx = vm->log_idx;
+ vm->log[idx].op = op;
+ vm->log[idx].iova = iova;
+ vm->log[idx].range = range;
+ vm->log[idx].queue_id = queue_id;
+ vm->log_idx = (vm->log_idx + 1) & ((1 << vm->log_shift) - 1);
+}
+
+static void
+vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op)
+{
+ const char *reason = op->reason;
+
+ if (!reason)
+ reason = "unmap";
+
+ vm_log(vm, reason, op->iova, op->range, op->queue_id);
+
+ vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range);
}
-struct msm_gem_address_space *
-msm_gem_address_space_get(struct msm_gem_address_space *aspace)
+static int
+vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op)
{
- if (!IS_ERR_OR_NULL(aspace))
- kref_get(&aspace->kref);
+ vm_log(vm, "map", op->iova, op->range, op->queue_id);
- return aspace;
+ return vm->mmu->funcs->map(vm->mmu, op->iova, op->sgt, op->offset,
+ op->range, op->prot);
}
/* Actually unmap memory for the vma */
-void msm_gem_vma_purge(struct msm_gem_vma *vma)
+void msm_gem_vma_unmap(struct drm_gpuva *vma, const char *reason)
{
- struct msm_gem_address_space *aspace = vma->aspace;
- unsigned size = vma->node.size;
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
/* Don't do anything if the memory isn't mapped */
- if (!vma->mapped)
+ if (!msm_vma->mapped)
return;
- aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
+ /*
+ * The mmu_lock is only needed when preallocation is used. But
+ * in that case we don't need to worry about recursion into
+ * shrinker
+ */
+ if (!vm->managed)
+ mutex_lock(&vm->mmu_lock);
- vma->mapped = false;
+ vm_unmap_op(vm, &(struct msm_vm_unmap_op){
+ .iova = vma->va.addr,
+ .range = vma->va.range,
+ .reason = reason,
+ });
+
+ if (!vm->managed)
+ mutex_unlock(&vm->mmu_lock);
+
+ msm_vma->mapped = false;
}
/* Map and pin vma: */
int
-msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
- struct sg_table *sgt, int size)
+msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
{
- struct msm_gem_address_space *aspace = vma->aspace;
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
int ret;
- if (GEM_WARN_ON(!vma->iova))
+ if (GEM_WARN_ON(!vma->va.addr))
return -EINVAL;
- if (vma->mapped)
+ if (msm_vma->mapped)
return 0;
- vma->mapped = true;
+ msm_vma->mapped = true;
- if (!aspace)
- return 0;
+ /*
+ * The mmu_lock is only needed when preallocation is used. But
+ * in that case we don't need to worry about recursion into
+ * shrinker
+ */
+ if (!vm->managed)
+ mutex_lock(&vm->mmu_lock);
/*
* NOTE: iommu/io-pgtable can allocate pages, so we cannot hold
@@ -81,97 +327,1205 @@ msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
* Revisit this if we can come up with a scheme to pre-alloc pages
* for the pgtable in map/unmap ops.
*/
- ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot);
+ ret = vm_map_op(vm, &(struct msm_vm_map_op){
+ .iova = vma->va.addr,
+ .range = vma->va.range,
+ .offset = vma->gem.offset,
+ .sgt = sgt,
+ .prot = prot,
+ });
- if (ret) {
- vma->mapped = false;
- }
+ if (!vm->managed)
+ mutex_unlock(&vm->mmu_lock);
+
+ if (ret)
+ msm_vma->mapped = false;
return ret;
}
/* Close an iova. Warn if it is still in use */
-void msm_gem_vma_close(struct msm_gem_vma *vma)
+void msm_gem_vma_close(struct drm_gpuva *vma)
{
- struct msm_gem_address_space *aspace = vma->aspace;
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
- GEM_WARN_ON(vma->mapped);
+ GEM_WARN_ON(msm_vma->mapped);
- spin_lock(&aspace->lock);
- if (vma->iova)
- drm_mm_remove_node(&vma->node);
- spin_unlock(&aspace->lock);
+ drm_gpuvm_resv_assert_held(&vm->base);
+
+ if (vma->gem.obj)
+ msm_gem_assert_locked(vma->gem.obj);
- vma->iova = 0;
+ if (vma->va.addr && vm->managed)
+ drm_mm_remove_node(&msm_vma->node);
- msm_gem_address_space_put(aspace);
+ drm_gpuva_remove(vma);
+ drm_gpuva_unlink(vma);
+
+ kfree(vma);
}
-struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace)
+/* Create a new vma and allocate an iova for it */
+struct drm_gpuva *
+msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
+ u64 offset, u64 range_start, u64 range_end)
{
+ struct msm_gem_vm *vm = to_msm_vm(gpuvm);
+ struct drm_gpuvm_bo *vm_bo;
struct msm_gem_vma *vma;
+ int ret;
+
+ drm_gpuvm_resv_assert_held(&vm->base);
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma)
- return NULL;
+ return ERR_PTR(-ENOMEM);
+
+ if (vm->managed) {
+ BUG_ON(offset != 0);
+ BUG_ON(!obj); /* NULL mappings not valid for kernel managed VM */
+ ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node,
+ obj->size, PAGE_SIZE, 0,
+ range_start, range_end, 0);
+
+ if (ret)
+ goto err_free_vma;
+
+ range_start = vma->node.start;
+ range_end = range_start + obj->size;
+ }
+
+ if (obj)
+ GEM_WARN_ON((range_end - range_start) > obj->size);
+
+ drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
+ vma->mapped = false;
+
+ ret = drm_gpuva_insert(&vm->base, &vma->base);
+ if (ret)
+ goto err_free_range;
- vma->aspace = aspace;
+ if (!obj)
+ return &vma->base;
- return vma;
+ vm_bo = drm_gpuvm_bo_obtain(&vm->base, obj);
+ if (IS_ERR(vm_bo)) {
+ ret = PTR_ERR(vm_bo);
+ goto err_va_remove;
+ }
+
+ drm_gpuvm_bo_extobj_add(vm_bo);
+ drm_gpuva_link(&vma->base, vm_bo);
+ GEM_WARN_ON(drm_gpuvm_bo_put(vm_bo));
+
+ return &vma->base;
+
+err_va_remove:
+ drm_gpuva_remove(&vma->base);
+err_free_range:
+ if (vm->managed)
+ drm_mm_remove_node(&vma->node);
+err_free_vma:
+ kfree(vma);
+ return ERR_PTR(ret);
}
-/* Initialize a new vma and allocate an iova for it */
-int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
- u64 range_start, u64 range_end)
+static int
+msm_gem_vm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
{
- struct msm_gem_address_space *aspace = vma->aspace;
+ struct drm_gem_object *obj = vm_bo->obj;
+ struct drm_gpuva *vma;
int ret;
- if (GEM_WARN_ON(!aspace))
- return -EINVAL;
+ vm_dbg("validate: %p", obj);
+
+ msm_gem_assert_locked(obj);
- if (GEM_WARN_ON(vma->iova))
- return -EBUSY;
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ ret = msm_gem_pin_vma_locked(obj, vma);
+ if (ret)
+ return ret;
+ }
- spin_lock(&aspace->lock);
- ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
- size, PAGE_SIZE, 0,
- range_start, range_end, 0);
- spin_unlock(&aspace->lock);
+ return 0;
+}
- if (ret)
- return ret;
+struct op_arg {
+ unsigned flags;
+ struct msm_vm_bind_job *job;
+};
- vma->iova = vma->node.start;
- vma->mapped = false;
+static void
+vm_op_enqueue(struct op_arg *arg, struct msm_vm_op _op)
+{
+ struct msm_vm_op *op = kmalloc(sizeof(*op), GFP_KERNEL);
+ *op = _op;
+ list_add_tail(&op->node, &arg->job->vm_ops);
+
+ if (op->obj)
+ drm_gem_object_get(op->obj);
+}
+
+static struct drm_gpuva *
+vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op)
+{
+ return msm_gem_vma_new(arg->job->vm, op->gem.obj, op->gem.offset,
+ op->va.addr, op->va.addr + op->va.range);
+}
+
+static int
+msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *arg)
+{
+ struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
+ struct drm_gem_object *obj = op->map.gem.obj;
+ struct drm_gpuva *vma;
+ struct sg_table *sgt;
+ unsigned prot;
+
+ vma = vma_from_op(arg, &op->map);
+ if (WARN_ON(IS_ERR(vma)))
+ return PTR_ERR(vma);
+
+ vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
+ vma->va.addr, vma->va.range);
+
+ vma->flags = ((struct op_arg *)arg)->flags;
+
+ if (obj) {
+ sgt = to_msm_bo(obj)->sgt;
+ prot = msm_gem_prot(obj);
+ } else {
+ sgt = NULL;
+ prot = IOMMU_READ | IOMMU_WRITE;
+ }
- kref_get(&aspace->kref);
+ vm_op_enqueue(arg, (struct msm_vm_op){
+ .op = MSM_VM_OP_MAP,
+ .map = {
+ .sgt = sgt,
+ .iova = vma->va.addr,
+ .range = vma->va.range,
+ .offset = vma->gem.offset,
+ .prot = prot,
+ .queue_id = job->queue->id,
+ },
+ .obj = vma->gem.obj,
+ });
+
+ to_msm_vma(vma)->mapped = true;
+
+ return 0;
+}
+
+static int
+msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
+{
+ struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
+ struct drm_gpuvm *vm = job->vm;
+ struct drm_gpuva *orig_vma = op->remap.unmap->va;
+ struct drm_gpuva *prev_vma = NULL, *next_vma = NULL;
+ struct drm_gpuvm_bo *vm_bo = orig_vma->vm_bo;
+ bool mapped = to_msm_vma(orig_vma)->mapped;
+ unsigned flags;
+
+ vm_dbg("orig_vma: %p:%p:%p: %016llx %016llx", vm, orig_vma,
+ orig_vma->gem.obj, orig_vma->va.addr, orig_vma->va.range);
+
+ if (mapped) {
+ uint64_t unmap_start, unmap_range;
+
+ drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
+
+ vm_op_enqueue(arg, (struct msm_vm_op){
+ .op = MSM_VM_OP_UNMAP,
+ .unmap = {
+ .iova = unmap_start,
+ .range = unmap_range,
+ .queue_id = job->queue->id,
+ },
+ .obj = orig_vma->gem.obj,
+ });
+
+ /*
+ * Part of this GEM obj is still mapped, but we're going to kill the
+ * existing VMA and replace it with one or two new ones (ie. two if
+ * the unmapped range is in the middle of the existing (unmap) VMA).
+ * So just set the state to unmapped:
+ */
+ to_msm_vma(orig_vma)->mapped = false;
+ }
+
+ /*
+ * Hold a ref to the vm_bo between the msm_gem_vma_close() and the
+ * creation of the new prev/next vma's, in case the vm_bo is tracked
+ * in the VM's evict list:
+ */
+ if (vm_bo)
+ drm_gpuvm_bo_get(vm_bo);
+
+ /*
+ * The prev_vma and/or next_vma are replacing the unmapped vma, and
+ * therefore should preserve it's flags:
+ */
+ flags = orig_vma->flags;
+
+ msm_gem_vma_close(orig_vma);
+
+ if (op->remap.prev) {
+ prev_vma = vma_from_op(arg, op->remap.prev);
+ if (WARN_ON(IS_ERR(prev_vma)))
+ return PTR_ERR(prev_vma);
+
+ vm_dbg("prev_vma: %p:%p: %016llx %016llx", vm, prev_vma, prev_vma->va.addr, prev_vma->va.range);
+ to_msm_vma(prev_vma)->mapped = mapped;
+ prev_vma->flags = flags;
+ }
+
+ if (op->remap.next) {
+ next_vma = vma_from_op(arg, op->remap.next);
+ if (WARN_ON(IS_ERR(next_vma)))
+ return PTR_ERR(next_vma);
+
+ vm_dbg("next_vma: %p:%p: %016llx %016llx", vm, next_vma, next_vma->va.addr, next_vma->va.range);
+ to_msm_vma(next_vma)->mapped = mapped;
+ next_vma->flags = flags;
+ }
+
+ if (!mapped)
+ drm_gpuvm_bo_evict(vm_bo, true);
+
+ /* Drop the previous ref: */
+ drm_gpuvm_bo_put(vm_bo);
return 0;
}
-struct msm_gem_address_space *
-msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
- u64 va_start, u64 size)
+static int
+msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *arg)
+{
+ struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
+ struct drm_gpuva *vma = op->unmap.va;
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
+
+ vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
+ vma->va.addr, vma->va.range);
+
+ if (!msm_vma->mapped)
+ goto out_close;
+
+ vm_op_enqueue(arg, (struct msm_vm_op){
+ .op = MSM_VM_OP_UNMAP,
+ .unmap = {
+ .iova = vma->va.addr,
+ .range = vma->va.range,
+ .queue_id = job->queue->id,
+ },
+ .obj = vma->gem.obj,
+ });
+
+ msm_vma->mapped = false;
+
+out_close:
+ msm_gem_vma_close(vma);
+
+ return 0;
+}
+
+static const struct drm_gpuvm_ops msm_gpuvm_ops = {
+ .vm_free = msm_gem_vm_free,
+ .vm_bo_validate = msm_gem_vm_bo_validate,
+ .sm_step_map = msm_gem_vm_sm_step_map,
+ .sm_step_remap = msm_gem_vm_sm_step_remap,
+ .sm_step_unmap = msm_gem_vm_sm_step_unmap,
+};
+
+static struct dma_fence *
+msm_vma_job_run(struct drm_sched_job *_job)
+{
+ struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job);
+ struct msm_gem_vm *vm = to_msm_vm(job->vm);
+ struct drm_gem_object *obj;
+ int ret = vm->unusable ? -EINVAL : 0;
+
+ vm_dbg("");
+
+ mutex_lock(&vm->mmu_lock);
+ vm->mmu->prealloc = &job->prealloc;
+
+ while (!list_empty(&job->vm_ops)) {
+ struct msm_vm_op *op =
+ list_first_entry(&job->vm_ops, struct msm_vm_op, node);
+
+ switch (op->op) {
+ case MSM_VM_OP_MAP:
+ /*
+ * On error, stop trying to map new things.. but we
+ * still want to process the unmaps (or in particular,
+ * the drm_gem_object_put()s)
+ */
+ if (!ret)
+ ret = vm_map_op(vm, &op->map);
+ break;
+ case MSM_VM_OP_UNMAP:
+ vm_unmap_op(vm, &op->unmap);
+ break;
+ }
+ drm_gem_object_put(op->obj);
+ list_del(&op->node);
+ kfree(op);
+ }
+
+ vm->mmu->prealloc = NULL;
+ mutex_unlock(&vm->mmu_lock);
+
+ /*
+ * We failed to perform at least _some_ of the pgtable updates, so
+ * now the VM is in an undefined state. Game over!
+ */
+ if (ret)
+ msm_gem_vm_unusable(job->vm);
+
+ job_foreach_bo (obj, job) {
+ msm_gem_lock(obj);
+ msm_gem_unpin_locked(obj);
+ msm_gem_unlock(obj);
+ }
+
+ /* VM_BIND ops are synchronous, so no fence to wait on: */
+ return NULL;
+}
+
+static void
+msm_vma_job_free(struct drm_sched_job *_job)
{
- struct msm_gem_address_space *aspace;
+ struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job);
+ struct msm_gem_vm *vm = to_msm_vm(job->vm);
+ struct drm_gem_object *obj;
+
+ vm->mmu->funcs->prealloc_cleanup(vm->mmu, &job->prealloc);
+
+ atomic_sub(job->prealloc.count, &vm->prealloc_throttle.in_flight);
+
+ drm_sched_job_cleanup(_job);
+
+ job_foreach_bo (obj, job)
+ drm_gem_object_put(obj);
+
+ msm_submitqueue_put(job->queue);
+ dma_fence_put(job->fence);
+
+ /* In error paths, we could have unexecuted ops: */
+ while (!list_empty(&job->vm_ops)) {
+ struct msm_vm_op *op =
+ list_first_entry(&job->vm_ops, struct msm_vm_op, node);
+ list_del(&op->node);
+ kfree(op);
+ }
+
+ wake_up(&vm->prealloc_throttle.wait);
+
+ kfree(job);
+}
+
+static const struct drm_sched_backend_ops msm_vm_bind_ops = {
+ .run_job = msm_vma_job_run,
+ .free_job = msm_vma_job_free
+};
+
+/**
+ * msm_gem_vm_create() - Create and initialize a &msm_gem_vm
+ * @drm: the drm device
+ * @mmu: the backing MMU objects handling mapping/unmapping
+ * @name: the name of the VM
+ * @va_start: the start offset of the VA space
+ * @va_size: the size of the VA space
+ * @managed: is it a kernel managed VM?
+ *
+ * In a kernel managed VM, the kernel handles address allocation, and only
+ * synchronous operations are supported. In a user managed VM, userspace
+ * handles virtual address allocation, and both async and sync operations
+ * are supported.
+ */
+struct drm_gpuvm *
+msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
+ u64 va_start, u64 va_size, bool managed)
+{
+ /*
+ * We mostly want to use DRM_GPUVM_RESV_PROTECTED, except that
+ * makes drm_gpuvm_bo_evict() a no-op for extobjs (ie. we loose
+ * tracking that an extobj is evicted) :facepalm:
+ */
+ enum drm_gpuvm_flags flags = 0;
+ struct msm_gem_vm *vm;
+ struct drm_gem_object *dummy_gem;
+ int ret = 0;
if (IS_ERR(mmu))
return ERR_CAST(mmu);
- aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
- if (!aspace)
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
return ERR_PTR(-ENOMEM);
- spin_lock_init(&aspace->lock);
- aspace->name = name;
- aspace->mmu = mmu;
- aspace->va_start = va_start;
- aspace->va_size = size;
+ dummy_gem = drm_gpuvm_resv_object_alloc(drm);
+ if (!dummy_gem) {
+ ret = -ENOMEM;
+ goto err_free_vm;
+ }
+
+ if (!managed) {
+ struct drm_sched_init_args args = {
+ .ops = &msm_vm_bind_ops,
+ .num_rqs = 1,
+ .credit_limit = 1,
+ .timeout = MAX_SCHEDULE_TIMEOUT,
+ .name = "msm-vm-bind",
+ .dev = drm->dev,
+ };
+
+ ret = drm_sched_init(&vm->sched, &args);
+ if (ret)
+ goto err_free_dummy;
+
+ init_waitqueue_head(&vm->prealloc_throttle.wait);
+ }
+
+ drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem,
+ va_start, va_size, 0, 0, &msm_gpuvm_ops);
+ drm_gem_object_put(dummy_gem);
+
+ vm->mmu = mmu;
+ mutex_init(&vm->mmu_lock);
+ vm->managed = managed;
+
+ drm_mm_init(&vm->mm, va_start, va_size);
+
+ /*
+ * We don't really need vm log for kernel managed VMs, as the kernel
+ * is responsible for ensuring that GEM objs are mapped if they are
+ * used by a submit. Furthermore we piggyback on mmu_lock to serialize
+ * access to the log.
+ *
+ * Limit the max log_shift to 8 to prevent userspace from asking us
+ * for an unreasonable log size.
+ */
+ if (!managed)
+ vm->log_shift = MIN(vm_log_shift, 8);
+
+ if (vm->log_shift) {
+ vm->log = kmalloc_array(1 << vm->log_shift, sizeof(vm->log[0]),
+ GFP_KERNEL | __GFP_ZERO);
+ }
+
+ return &vm->base;
+
+err_free_dummy:
+ drm_gem_object_put(dummy_gem);
+
+err_free_vm:
+ kfree(vm);
+ return ERR_PTR(ret);
+}
+
+/**
+ * msm_gem_vm_close() - Close a VM
+ * @gpuvm: The VM to close
+ *
+ * Called when the drm device file is closed, to tear down VM related resources
+ * (which will drop refcounts to GEM objects that were still mapped into the
+ * VM at the time).
+ */
+void
+msm_gem_vm_close(struct drm_gpuvm *gpuvm)
+{
+ struct msm_gem_vm *vm = to_msm_vm(gpuvm);
+ struct drm_gpuva *vma, *tmp;
+ struct drm_exec exec;
+
+ /*
+ * For kernel managed VMs, the VMAs are torn down when the handle is
+ * closed, so nothing more to do.
+ */
+ if (vm->managed)
+ return;
- drm_mm_init(&aspace->mm, va_start, size);
+ if (vm->last_fence)
+ dma_fence_wait(vm->last_fence, false);
+
+ /* Kill the scheduler now, so we aren't racing with it for cleanup: */
+ drm_sched_stop(&vm->sched, NULL);
+ drm_sched_fini(&vm->sched);
+
+ /* Tear down any remaining mappings: */
+ drm_exec_init(&exec, 0, 2);
+ drm_exec_until_all_locked (&exec) {
+ drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(gpuvm));
+ drm_exec_retry_on_contention(&exec);
+
+ drm_gpuvm_for_each_va_safe (vma, tmp, gpuvm) {
+ struct drm_gem_object *obj = vma->gem.obj;
+
+ /*
+ * MSM_BO_NO_SHARE objects share the same resv as the
+ * VM, in which case the obj is already locked:
+ */
+ if (obj && (obj->resv == drm_gpuvm_resv(gpuvm)))
+ obj = NULL;
+
+ if (obj) {
+ drm_exec_lock_obj(&exec, obj);
+ drm_exec_retry_on_contention(&exec);
+ }
+
+ msm_gem_vma_unmap(vma, "close");
+ msm_gem_vma_close(vma);
+
+ if (obj) {
+ drm_exec_unlock_obj(&exec, obj);
+ }
+ }
+ }
+ drm_exec_fini(&exec);
+}
+
+
+static struct msm_vm_bind_job *
+vm_bind_job_create(struct drm_device *dev, struct drm_file *file,
+ struct msm_gpu_submitqueue *queue, uint32_t nr_ops)
+{
+ struct msm_vm_bind_job *job;
+ uint64_t sz;
+ int ret;
- kref_init(&aspace->kref);
+ sz = struct_size(job, ops, nr_ops);
- return aspace;
+ if (sz > SIZE_MAX)
+ return ERR_PTR(-ENOMEM);
+
+ job = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
+ if (!job)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_sched_job_init(&job->base, queue->entity, 1, queue,
+ file->client_id);
+ if (ret) {
+ kfree(job);
+ return ERR_PTR(ret);
+ }
+
+ job->vm = msm_context_vm(dev, queue->ctx);
+ job->queue = queue;
+ INIT_LIST_HEAD(&job->vm_ops);
+
+ return job;
+}
+
+static bool invalid_alignment(uint64_t addr)
+{
+ /*
+ * Technically this is about GPU alignment, not CPU alignment. But
+ * I've not seen any qcom SoC where the SMMU does not support the
+ * CPU's smallest page size.
+ */
+ return !PAGE_ALIGNED(addr);
+}
+
+static int
+lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
+{
+ struct drm_device *dev = job->vm->drm;
+ int i = job->nr_ops++;
+ int ret = 0;
+
+ job->ops[i].op = op->op;
+ job->ops[i].handle = op->handle;
+ job->ops[i].obj_offset = op->obj_offset;
+ job->ops[i].iova = op->iova;
+ job->ops[i].range = op->range;
+ job->ops[i].flags = op->flags;
+
+ if (op->flags & ~MSM_VM_BIND_OP_FLAGS)
+ ret = UERR(EINVAL, dev, "invalid flags: %x\n", op->flags);
+
+ if (invalid_alignment(op->iova))
+ ret = UERR(EINVAL, dev, "invalid address: %016llx\n", op->iova);
+
+ if (invalid_alignment(op->obj_offset))
+ ret = UERR(EINVAL, dev, "invalid bo_offset: %016llx\n", op->obj_offset);
+
+ if (invalid_alignment(op->range))
+ ret = UERR(EINVAL, dev, "invalid range: %016llx\n", op->range);
+
+ if (!drm_gpuvm_range_valid(job->vm, op->iova, op->range))
+ ret = UERR(EINVAL, dev, "invalid range: %016llx, %016llx\n", op->iova, op->range);
+
+ /*
+ * MAP must specify a valid handle. But the handle MBZ for
+ * UNMAP or MAP_NULL.
+ */
+ if (op->op == MSM_VM_BIND_OP_MAP) {
+ if (!op->handle)
+ ret = UERR(EINVAL, dev, "invalid handle\n");
+ } else if (op->handle) {
+ ret = UERR(EINVAL, dev, "handle must be zero\n");
+ }
+
+ switch (op->op) {
+ case MSM_VM_BIND_OP_MAP:
+ case MSM_VM_BIND_OP_MAP_NULL:
+ case MSM_VM_BIND_OP_UNMAP:
+ break;
+ default:
+ ret = UERR(EINVAL, dev, "invalid op: %u\n", op->op);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * ioctl parsing, parameter validation, and GEM handle lookup
+ */
+static int
+vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args,
+ struct drm_file *file, int *nr_bos)
+{
+ struct drm_device *dev = job->vm->drm;
+ int ret = 0;
+ int cnt = 0;
+
+ if (args->nr_ops == 1) {
+ /* Single op case, the op is inlined: */
+ ret = lookup_op(job, &args->op);
+ } else {
+ for (unsigned i = 0; i < args->nr_ops; i++) {
+ struct drm_msm_vm_bind_op op;
+ void __user *userptr =
+ u64_to_user_ptr(args->ops + (i * sizeof(op)));
+
+ /* make sure we don't have garbage flags, in case we hit
+ * error path before flags is initialized:
+ */
+ job->ops[i].flags = 0;
+
+ if (copy_from_user(&op, userptr, sizeof(op))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = lookup_op(job, &op);
+ if (ret)
+ break;
+ }
+ }
+
+ if (ret) {
+ job->nr_ops = 0;
+ goto out;
+ }
+
+ spin_lock(&file->table_lock);
+
+ for (unsigned i = 0; i < args->nr_ops; i++) {
+ struct drm_gem_object *obj;
+
+ if (!job->ops[i].handle) {
+ job->ops[i].obj = NULL;
+ continue;
+ }
+
+ /*
+ * normally use drm_gem_object_lookup(), but for bulk lookup
+ * all under single table_lock just hit object_idr directly:
+ */
+ obj = idr_find(&file->object_idr, job->ops[i].handle);
+ if (!obj) {
+ ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", job->ops[i].handle, i);
+ goto out_unlock;
+ }
+
+ drm_gem_object_get(obj);
+
+ job->ops[i].obj = obj;
+ cnt++;
+ }
+
+ *nr_bos = cnt;
+
+out_unlock:
+ spin_unlock(&file->table_lock);
+
+out:
+ return ret;
+}
+
+static void
+prealloc_count(struct msm_vm_bind_job *job,
+ struct msm_vm_bind_op *first,
+ struct msm_vm_bind_op *last)
+{
+ struct msm_mmu *mmu = to_msm_vm(job->vm)->mmu;
+
+ if (!first)
+ return;
+
+ uint64_t start_iova = first->iova;
+ uint64_t end_iova = last->iova + last->range;
+
+ mmu->funcs->prealloc_count(mmu, &job->prealloc, start_iova, end_iova - start_iova);
+}
+
+static bool
+ops_are_same_pte(struct msm_vm_bind_op *first, struct msm_vm_bind_op *next)
+{
+ /*
+ * Last level pte covers 2MB.. so we should merge two ops, from
+ * the PoV of figuring out how much pgtable pages to pre-allocate
+ * if they land in the same 2MB range:
+ */
+ uint64_t pte_mask = ~(SZ_2M - 1);
+ return ((first->iova + first->range) & pte_mask) == (next->iova & pte_mask);
+}
+
+/*
+ * Determine the amount of memory to prealloc for pgtables. For sparse images,
+ * in particular, userspace plays some tricks with the order of page mappings
+ * to get the desired swizzle pattern, resulting in a large # of tiny MAP ops.
+ * So detect when multiple MAP operations are physically contiguous, and count
+ * them as a single mapping. Otherwise the prealloc_count() will not realize
+ * they can share pagetable pages and vastly overcount.
+ */
+static int
+vm_bind_prealloc_count(struct msm_vm_bind_job *job)
+{
+ struct msm_vm_bind_op *first = NULL, *last = NULL;
+ struct msm_gem_vm *vm = to_msm_vm(job->vm);
+ int ret;
+
+ for (int i = 0; i < job->nr_ops; i++) {
+ struct msm_vm_bind_op *op = &job->ops[i];
+
+ /* We only care about MAP/MAP_NULL: */
+ if (op->op == MSM_VM_BIND_OP_UNMAP)
+ continue;
+
+ /*
+ * If op is contiguous with last in the current range, then
+ * it becomes the new last in the range and we continue
+ * looping:
+ */
+ if (last && ops_are_same_pte(last, op)) {
+ last = op;
+ continue;
+ }
+
+ /*
+ * If op is not contiguous with the current range, flush
+ * the current range and start anew:
+ */
+ prealloc_count(job, first, last);
+ first = last = op;
+ }
+
+ /* Flush the remaining range: */
+ prealloc_count(job, first, last);
+
+ /*
+ * Now that we know the needed amount to pre-alloc, throttle on pending
+ * VM_BIND jobs if we already have too much pre-alloc memory in flight
+ */
+ ret = wait_event_interruptible(
+ vm->prealloc_throttle.wait,
+ atomic_read(&vm->prealloc_throttle.in_flight) <= 1024);
+ if (ret)
+ return ret;
+
+ atomic_add(job->prealloc.count, &vm->prealloc_throttle.in_flight);
+
+ return 0;
+}
+
+/*
+ * Lock VM and GEM objects
+ */
+static int
+vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
+{
+ int ret;
+
+ /* Lock VM and objects: */
+ drm_exec_until_all_locked (exec) {
+ ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(job->vm));
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ return ret;
+
+ for (unsigned i = 0; i < job->nr_ops; i++) {
+ const struct msm_vm_bind_op *op = &job->ops[i];
+
+ switch (op->op) {
+ case MSM_VM_BIND_OP_UNMAP:
+ ret = drm_gpuvm_sm_unmap_exec_lock(job->vm, exec,
+ op->iova,
+ op->obj_offset);
+ break;
+ case MSM_VM_BIND_OP_MAP:
+ case MSM_VM_BIND_OP_MAP_NULL:
+ ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
+ op->iova, op->range,
+ op->obj, op->obj_offset);
+ break;
+ default:
+ /*
+ * lookup_op() should have already thrown an error for
+ * invalid ops
+ */
+ WARN_ON("unreachable");
+ }
+
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Pin GEM objects, ensuring that we have backing pages. Pinning will move
+ * the object to the pinned LRU so that the shrinker knows to first consider
+ * other objects for evicting.
+ */
+static int
+vm_bind_job_pin_objects(struct msm_vm_bind_job *job)
+{
+ struct drm_gem_object *obj;
+
+ /*
+ * First loop, before holding the LRU lock, avoids holding the
+ * LRU lock while calling msm_gem_pin_vma_locked (which could
+ * trigger get_pages())
+ */
+ job_foreach_bo (obj, job) {
+ struct page **pages;
+
+ pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+ }
+
+ struct msm_drm_private *priv = job->vm->drm->dev_private;
+
+ /*
+ * A second loop while holding the LRU lock (a) avoids acquiring/dropping
+ * the LRU lock for each individual bo, while (b) avoiding holding the
+ * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger
+ * get_pages() which could trigger reclaim.. and if we held the LRU lock
+ * could trigger deadlock with the shrinker).
+ */
+ mutex_lock(&priv->lru.lock);
+ job_foreach_bo (obj, job)
+ msm_gem_pin_obj_locked(obj);
+ mutex_unlock(&priv->lru.lock);
+
+ job->bos_pinned = true;
+
+ return 0;
+}
+
+/*
+ * Unpin GEM objects. Normally this is done after the bind job is run.
+ */
+static void
+vm_bind_job_unpin_objects(struct msm_vm_bind_job *job)
+{
+ struct drm_gem_object *obj;
+
+ if (!job->bos_pinned)
+ return;
+
+ job_foreach_bo (obj, job)
+ msm_gem_unpin_locked(obj);
+
+ job->bos_pinned = false;
+}
+
+/*
+ * Pre-allocate pgtable memory, and translate the VM bind requests into a
+ * sequence of pgtable updates to be applied asynchronously.
+ */
+static int
+vm_bind_job_prepare(struct msm_vm_bind_job *job)
+{
+ struct msm_gem_vm *vm = to_msm_vm(job->vm);
+ struct msm_mmu *mmu = vm->mmu;
+ int ret;
+
+ ret = mmu->funcs->prealloc_allocate(mmu, &job->prealloc);
+ if (ret)
+ return ret;
+
+ for (unsigned i = 0; i < job->nr_ops; i++) {
+ const struct msm_vm_bind_op *op = &job->ops[i];
+ struct op_arg arg = {
+ .job = job,
+ };
+
+ switch (op->op) {
+ case MSM_VM_BIND_OP_UNMAP:
+ ret = drm_gpuvm_sm_unmap(job->vm, &arg, op->iova,
+ op->range);
+ break;
+ case MSM_VM_BIND_OP_MAP:
+ if (op->flags & MSM_VM_BIND_OP_DUMP)
+ arg.flags |= MSM_VMA_DUMP;
+ fallthrough;
+ case MSM_VM_BIND_OP_MAP_NULL:
+ ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
+ op->range, op->obj, op->obj_offset);
+ break;
+ default:
+ /*
+ * lookup_op() should have already thrown an error for
+ * invalid ops
+ */
+ BUG_ON("unreachable");
+ }
+
+ if (ret) {
+ /*
+ * If we've already started modifying the vm, we can't
+ * adequetly describe to userspace the intermediate
+ * state the vm is in. So throw up our hands!
+ */
+ if (i > 0)
+ msm_gem_vm_unusable(job->vm);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Attach fences to the GEM objects being bound. This will signify to
+ * the shrinker that they are busy even after dropping the locks (ie.
+ * drm_exec_fini())
+ */
+static void
+vm_bind_job_attach_fences(struct msm_vm_bind_job *job)
+{
+ for (unsigned i = 0; i < job->nr_ops; i++) {
+ struct drm_gem_object *obj = job->ops[i].obj;
+
+ if (!obj)
+ continue;
+
+ dma_resv_add_fence(obj->resv, job->fence,
+ DMA_RESV_USAGE_KERNEL);
+ }
+}
+
+int
+msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_vm_bind *args = data;
+ struct msm_context *ctx = file->driver_priv;
+ struct msm_vm_bind_job *job = NULL;
+ struct msm_gpu *gpu = priv->gpu;
+ struct msm_gpu_submitqueue *queue;
+ struct msm_syncobj_post_dep *post_deps = NULL;
+ struct drm_syncobj **syncobjs_to_reset = NULL;
+ struct sync_file *sync_file = NULL;
+ struct dma_fence *fence;
+ int out_fence_fd = -1;
+ int ret, nr_bos = 0;
+ unsigned i;
+
+ if (!gpu)
+ return -ENXIO;
+
+ /*
+ * Maybe we could allow just UNMAP ops? OTOH userspace should just
+ * immediately close the device file and all will be torn down.
+ */
+ if (to_msm_vm(ctx->vm)->unusable)
+ return UERR(EPIPE, dev, "context is unusable");
+
+ /*
+ * Technically, you cannot create a VM_BIND submitqueue in the first
+ * place, if you haven't opted in to VM_BIND context. But it is
+ * cleaner / less confusing, to check this case directly.
+ */
+ if (!msm_context_is_vmbind(ctx))
+ return UERR(EINVAL, dev, "context does not support vmbind");
+
+ if (args->flags & ~MSM_VM_BIND_FLAGS)
+ return UERR(EINVAL, dev, "invalid flags");
+
+ queue = msm_submitqueue_get(ctx, args->queue_id);
+ if (!queue)
+ return -ENOENT;
+
+ if (!(queue->flags & MSM_SUBMITQUEUE_VM_BIND)) {
+ ret = UERR(EINVAL, dev, "Invalid queue type");
+ goto out_post_unlock;
+ }
+
+ if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ ret = out_fence_fd;
+ goto out_post_unlock;
+ }
+ }
+
+ job = vm_bind_job_create(dev, file, queue, args->nr_ops);
+ if (IS_ERR(job)) {
+ ret = PTR_ERR(job);
+ goto out_post_unlock;
+ }
+
+ ret = mutex_lock_interruptible(&queue->lock);
+ if (ret)
+ goto out_post_unlock;
+
+ if (args->flags & MSM_VM_BIND_FENCE_FD_IN) {
+ struct dma_fence *in_fence;
+
+ in_fence = sync_file_get_fence(args->fence_fd);
+
+ if (!in_fence) {
+ ret = UERR(EINVAL, dev, "invalid in-fence");
+ goto out_unlock;
+ }
+
+ ret = drm_sched_job_add_dependency(&job->base, in_fence);
+ if (ret)
+ goto out_unlock;
+ }
+
+ if (args->in_syncobjs > 0) {
+ syncobjs_to_reset = msm_syncobj_parse_deps(dev, &job->base,
+ file, args->in_syncobjs,
+ args->nr_in_syncobjs,
+ args->syncobj_stride);
+ if (IS_ERR(syncobjs_to_reset)) {
+ ret = PTR_ERR(syncobjs_to_reset);
+ goto out_unlock;
+ }
+ }
+
+ if (args->out_syncobjs > 0) {
+ post_deps = msm_syncobj_parse_post_deps(dev, file,
+ args->out_syncobjs,
+ args->nr_out_syncobjs,
+ args->syncobj_stride);
+ if (IS_ERR(post_deps)) {
+ ret = PTR_ERR(post_deps);
+ goto out_unlock;
+ }
+ }
+
+ ret = vm_bind_job_lookup_ops(job, args, file, &nr_bos);
+ if (ret)
+ goto out_unlock;
+
+ ret = vm_bind_prealloc_count(job);
+ if (ret)
+ goto out_unlock;
+
+ struct drm_exec exec;
+ unsigned flags = DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT;
+ drm_exec_init(&exec, flags, nr_bos + 1);
+
+ ret = vm_bind_job_lock_objects(job, &exec);
+ if (ret)
+ goto out;
+
+ ret = vm_bind_job_pin_objects(job);
+ if (ret)
+ goto out;
+
+ ret = vm_bind_job_prepare(job);
+ if (ret)
+ goto out;
+
+ drm_sched_job_arm(&job->base);
+
+ job->fence = dma_fence_get(&job->base.s_fence->finished);
+
+ if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
+ sync_file = sync_file_create(job->fence);
+ if (!sync_file) {
+ ret = -ENOMEM;
+ } else {
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
+ }
+ }
+
+ if (ret)
+ goto out;
+
+ vm_bind_job_attach_fences(job);
+
+ /*
+ * The job can be free'd (and fence unref'd) at any point after
+ * drm_sched_entity_push_job(), so we need to hold our own ref
+ */
+ fence = dma_fence_get(job->fence);
+
+ drm_sched_entity_push_job(&job->base);
+
+ msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs);
+ msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, fence);
+
+ dma_fence_put(fence);
+
+out:
+ if (ret)
+ vm_bind_job_unpin_objects(job);
+
+ drm_exec_fini(&exec);
+out_unlock:
+ mutex_unlock(&queue->lock);
+out_post_unlock:
+ if (ret && (out_fence_fd >= 0)) {
+ put_unused_fd(out_fence_fd);
+ if (sync_file)
+ fput(sync_file->file);
+ }
+
+ if (!IS_ERR_OR_NULL(job)) {
+ if (ret)
+ msm_vma_job_free(&job->base);
+ } else {
+ /*
+ * If the submit hasn't yet taken ownership of the queue
+ * then we need to drop the reference ourself:
+ */
+ msm_submitqueue_put(queue);
+ }
+
+ if (!IS_ERR_OR_NULL(post_deps)) {
+ for (i = 0; i < args->nr_out_syncobjs; ++i) {
+ kfree(post_deps[i].chain);
+ drm_syncobj_put(post_deps[i].syncobj);
+ }
+ kfree(post_deps);
+ }
+
+ if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
+ for (i = 0; i < args->nr_in_syncobjs; ++i) {
+ if (syncobjs_to_reset[i])
+ drm_syncobj_put(syncobjs_to_reset[i]);
+ }
+ kfree(syncobjs_to_reset);
+ }
+
+ return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 3947f7ba1421..c317b25a8162 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -148,7 +148,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
return 0;
}
-void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
+void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx,
struct drm_printer *p)
{
drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
@@ -219,13 +219,14 @@ static void msm_gpu_devcoredump_free(void *data)
}
static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
- struct drm_gem_object *obj, u64 iova, bool full)
+ struct drm_gem_object *obj, u64 iova,
+ bool full, size_t offset, size_t size)
{
struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
struct msm_gem_object *msm_obj = to_msm_bo(obj);
/* Don't record write only objects */
- state_bo->size = obj->size;
+ state_bo->size = size;
state_bo->flags = msm_obj->flags;
state_bo->iova = iova;
@@ -236,26 +237,126 @@ static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
if (full) {
void *ptr;
- state_bo->data = kvmalloc(obj->size, GFP_KERNEL);
+ state_bo->data = kvmalloc(size, GFP_KERNEL);
if (!state_bo->data)
goto out;
- msm_gem_lock(obj);
ptr = msm_gem_get_vaddr_active(obj);
- msm_gem_unlock(obj);
if (IS_ERR(ptr)) {
kvfree(state_bo->data);
state_bo->data = NULL;
goto out;
}
- memcpy(state_bo->data, ptr, obj->size);
- msm_gem_put_vaddr(obj);
+ memcpy(state_bo->data, ptr + offset, size);
+ msm_gem_put_vaddr_locked(obj);
}
out:
state->nr_bos++;
}
+static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submit *submit)
+{
+ extern bool rd_full;
+
+ if (msm_context_is_vmbind(submit->queue->ctx)) {
+ struct drm_exec exec;
+ struct drm_gpuva *vma;
+ unsigned cnt = 0;
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ cnt = 0;
+
+ drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(submit->vm));
+ drm_exec_retry_on_contention(&exec);
+
+ drm_gpuvm_for_each_va (vma, submit->vm) {
+ if (!vma->gem.obj)
+ continue;
+
+ cnt++;
+ drm_exec_lock_obj(&exec, vma->gem.obj);
+ drm_exec_retry_on_contention(&exec);
+ }
+
+ }
+
+ drm_gpuvm_for_each_va (vma, submit->vm)
+ cnt++;
+
+ state->bos = kcalloc(cnt, sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ drm_gpuvm_for_each_va (vma, submit->vm) {
+ bool dump = rd_full || (vma->flags & MSM_VMA_DUMP);
+
+ /* Skip MAP_NULL/PRR VMAs: */
+ if (!vma->gem.obj)
+ continue;
+
+ msm_gpu_crashstate_get_bo(state, vma->gem.obj, vma->va.addr,
+ dump, vma->gem.offset, vma->va.range);
+ }
+
+ drm_exec_fini(&exec);
+ } else {
+ state->bos = kcalloc(submit->nr_bos,
+ sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ for (int i = 0; state->bos && i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;;
+ bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP);
+
+ msm_gem_lock(obj);
+ msm_gpu_crashstate_get_bo(state, obj, submit->bos[i].iova,
+ dump, 0, obj->size);
+ msm_gem_unlock(obj);
+ }
+ }
+}
+
+static void crashstate_get_vm_logs(struct msm_gpu_state *state, struct msm_gem_vm *vm)
+{
+ uint32_t vm_log_len = (1 << vm->log_shift);
+ uint32_t vm_log_mask = vm_log_len - 1;
+ int first;
+
+ /* Bail if no log, or empty log: */
+ if (!vm->log || !vm->log[0].op)
+ return;
+
+ mutex_lock(&vm->mmu_lock);
+
+ /*
+ * log_idx is the next entry to overwrite, meaning it is the oldest, or
+ * first, entry (other than the special case handled below where the
+ * log hasn't wrapped around yet)
+ */
+ first = vm->log_idx;
+
+ if (!vm->log[first].op) {
+ /*
+ * If the next log entry has not been written yet, then only
+ * entries 0 to idx-1 are valid (ie. we haven't wrapped around
+ * yet)
+ */
+ state->nr_vm_logs = MAX(0, first - 1);
+ first = 0;
+ } else {
+ state->nr_vm_logs = vm_log_len;
+ }
+
+ state->vm_logs = kmalloc_array(
+ state->nr_vm_logs, sizeof(vm->log[0]), GFP_KERNEL);
+ for (int i = 0; i < state->nr_vm_logs; i++) {
+ int idx = (i + first) & vm_log_mask;
+
+ state->vm_logs[i] = vm->log[idx];
+ }
+
+ mutex_unlock(&vm->mmu_lock);
+}
+
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
char *comm, char *cmd)
@@ -280,26 +381,18 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
if (fault_info)
state->fault_info = *fault_info;
- if (submit) {
- int i;
-
- if (state->fault_info.ttbr0) {
- struct msm_gpu_fault_info *info = &state->fault_info;
- struct msm_mmu *mmu = submit->aspace->mmu;
+ if (submit && state->fault_info.ttbr0) {
+ struct msm_gpu_fault_info *info = &state->fault_info;
+ struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu;
- msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
- &info->asid);
- msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
- }
-
- state->bos = kcalloc(submit->nr_bos,
- sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+ msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
+ &info->asid);
+ msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
+ }
- for (i = 0; state->bos && i < submit->nr_bos; i++) {
- msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
- submit->bos[i].iova,
- should_dump(submit, i));
- }
+ if (submit) {
+ crashstate_get_vm_logs(state, to_msm_vm(submit->vm));
+ crashstate_get_bos(state, submit);
}
/* Set the active crash state to be dumped on failure */
@@ -342,7 +435,7 @@ static void retire_submits(struct msm_gpu *gpu);
static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
{
- struct msm_file_private *ctx = submit->queue->ctx;
+ struct msm_context *ctx = submit->queue->ctx;
struct task_struct *task;
WARN_ON(!mutex_is_locked(&submit->gpu->lock));
@@ -389,8 +482,20 @@ static void recover_worker(struct kthread_work *work)
/* Increment the fault counts */
submit->queue->faults++;
- if (submit->aspace)
- submit->aspace->faults++;
+ if (submit->vm) {
+ struct msm_gem_vm *vm = to_msm_vm(submit->vm);
+
+ vm->faults++;
+
+ /*
+ * If userspace has opted-in to VM_BIND (and therefore userspace
+ * management of the VM), faults mark the VM as unusuable. This
+ * matches vulkan expectations (vulkan is the main target for
+ * VM_BIND)
+ */
+ if (!vm->managed)
+ msm_gem_vm_unusable(submit->vm);
+ }
get_comm_cmdline(submit, &comm, &cmd);
@@ -828,10 +933,12 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
}
/* Return a new address space for a msm_drm_private instance */
-struct msm_gem_address_space *
-msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
+struct drm_gpuvm *
+msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
+ bool kernel_managed)
{
- struct msm_gem_address_space *aspace = NULL;
+ struct drm_gpuvm *vm = NULL;
+
if (!gpu)
return NULL;
@@ -839,16 +946,16 @@ msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *ta
* If the target doesn't support private address spaces then return
* the global one
*/
- if (gpu->funcs->create_private_address_space) {
- aspace = gpu->funcs->create_private_address_space(gpu);
- if (!IS_ERR(aspace))
- aspace->pid = get_pid(task_pid(task));
+ if (gpu->funcs->create_private_vm) {
+ vm = gpu->funcs->create_private_vm(gpu, kernel_managed);
+ if (!IS_ERR(vm))
+ to_msm_vm(vm)->pid = get_pid(task_pid(task));
}
- if (IS_ERR_OR_NULL(aspace))
- aspace = msm_gem_address_space_get(gpu->aspace);
+ if (IS_ERR_OR_NULL(vm))
+ vm = drm_gpuvm_get(gpu->vm);
- return aspace;
+ return vm;
}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
@@ -942,19 +1049,15 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
msm_devfreq_init(gpu);
-
- gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
-
- if (gpu->aspace == NULL)
- DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
- else if (IS_ERR(gpu->aspace)) {
- ret = PTR_ERR(gpu->aspace);
+ gpu->vm = gpu->funcs->create_vm(gpu, pdev);
+ if (IS_ERR(gpu->vm)) {
+ ret = PTR_ERR(gpu->vm);
goto fail;
}
memptrs = msm_gem_kernel_new(drm,
sizeof(struct msm_rbmemptrs) * nr_rings,
- check_apriv(gpu, MSM_BO_WC), gpu->aspace, &gpu->memptrs_bo,
+ check_apriv(gpu, MSM_BO_WC), gpu->vm, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
@@ -998,7 +1101,7 @@ fail:
gpu->rb[i] = NULL;
}
- msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
platform_set_drvdata(pdev, NULL);
return ret;
@@ -1015,11 +1118,12 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
gpu->rb[i] = NULL;
}
- msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
- if (!IS_ERR_OR_NULL(gpu->aspace)) {
- gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
- msm_gem_address_space_put(gpu->aspace);
+ if (!IS_ERR_OR_NULL(gpu->vm)) {
+ struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(gpu->vm);
}
if (gpu->worker) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 5bf7cd985b9c..b2a96544f92a 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -20,9 +20,10 @@
#include "msm_gem.h"
struct msm_gem_submit;
+struct msm_gem_vm_log_entry;
struct msm_gpu_perfcntr;
struct msm_gpu_state;
-struct msm_file_private;
+struct msm_context;
struct msm_gpu_config {
const char *ioname;
@@ -44,9 +45,9 @@ struct msm_gpu_config {
* + z180_gpu
*/
struct msm_gpu_funcs {
- int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ int (*get_param)(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t *value, uint32_t *len);
- int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ int (*set_param)(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t value, uint32_t len);
int (*hw_init)(struct msm_gpu *gpu);
@@ -78,10 +79,8 @@ struct msm_gpu_funcs {
/* note: gpu_set_freq() can assume that we have been pm_resumed */
void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
bool suspended);
- struct msm_gem_address_space *(*create_address_space)
- (struct msm_gpu *gpu, struct platform_device *pdev);
- struct msm_gem_address_space *(*create_private_address_space)
- (struct msm_gpu *gpu);
+ struct drm_gpuvm *(*create_vm)(struct msm_gpu *gpu, struct platform_device *pdev);
+ struct drm_gpuvm *(*create_private_vm)(struct msm_gpu *gpu, bool kernel_managed);
uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
/**
@@ -236,7 +235,7 @@ struct msm_gpu {
void __iomem *mmio;
int irq;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
@@ -341,26 +340,61 @@ struct msm_gpu_perfcntr {
#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
/**
- * struct msm_file_private - per-drm_file context
- *
- * @queuelock: synchronizes access to submitqueues list
- * @submitqueues: list of &msm_gpu_submitqueue created by userspace
- * @queueid: counter incremented each time a submitqueue is created,
- * used to assign &msm_gpu_submitqueue.id
- * @aspace: the per-process GPU address-space
- * @ref: reference count
- * @seqno: unique per process seqno
+ * struct msm_context - per-drm_file context
*/
-struct msm_file_private {
+struct msm_context {
+ /** @queuelock: synchronizes access to submitqueues list */
rwlock_t queuelock;
+
+ /** @submitqueues: list of &msm_gpu_submitqueue created by userspace */
struct list_head submitqueues;
+
+ /**
+ * @queueid:
+ *
+ * Counter incremented each time a submitqueue is created, used to
+ * assign &msm_gpu_submitqueue.id
+ */
int queueid;
- struct msm_gem_address_space *aspace;
+
+ /**
+ * @closed: The device file associated with this context has been closed.
+ *
+ * Once the device is closed, any submits that have not been written
+ * to the ring buffer are no-op'd.
+ */
+ bool closed;
+
+ /**
+ * @userspace_managed_vm:
+ *
+ * Has userspace opted-in to userspace managed VM (ie. VM_BIND) via
+ * MSM_PARAM_EN_VM_BIND?
+ */
+ bool userspace_managed_vm;
+
+ /**
+ * @vm:
+ *
+ * The per-process GPU address-space. Do not access directly, use
+ * msm_context_vm().
+ */
+ struct drm_gpuvm *vm;
+
+ /** @kref: the reference count */
struct kref ref;
+
+ /**
+ * @seqno:
+ *
+ * A unique per-process sequence number. Used to detect context
+ * switches, without relying on keeping a, potentially dangling,
+ * pointer to the previous context.
+ */
int seqno;
/**
- * sysprof:
+ * @sysprof:
*
* The value of MSM_PARAM_SYSPROF set by userspace. This is
* intended to be used by system profiling tools like Mesa's
@@ -378,21 +412,21 @@ struct msm_file_private {
int sysprof;
/**
- * comm: Overridden task comm, see MSM_PARAM_COMM
+ * @comm: Overridden task comm, see MSM_PARAM_COMM
*
* Accessed under msm_gpu::lock
*/
char *comm;
/**
- * cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
+ * @cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
*
* Accessed under msm_gpu::lock
*/
char *cmdline;
/**
- * elapsed:
+ * @elapsed:
*
* The total (cumulative) elapsed time GPU was busy with rendering
* from this context in ns.
@@ -400,7 +434,7 @@ struct msm_file_private {
uint64_t elapsed_ns;
/**
- * cycles:
+ * @cycles:
*
* The total (cumulative) GPU cycles elapsed attributed to this
* context.
@@ -408,7 +442,7 @@ struct msm_file_private {
uint64_t cycles;
/**
- * entities:
+ * @entities:
*
* Table of per-priority-level sched entities used by submitqueues
* associated with this &drm_file. Because some userspace apps
@@ -421,7 +455,7 @@ struct msm_file_private {
struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
/**
- * ctx_mem:
+ * @ctx_mem:
*
* Total amount of memory of GEM buffers with handles attached for
* this context.
@@ -429,6 +463,24 @@ struct msm_file_private {
atomic64_t ctx_mem;
};
+struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx);
+
+/**
+ * msm_context_is_vm_bind() - has userspace opted in to VM_BIND?
+ *
+ * @ctx: the drm_file context
+ *
+ * See MSM_PARAM_EN_VM_BIND. If userspace is managing the VM, it can
+ * do sparse binding including having multiple, potentially partial,
+ * mappings in the VM. Therefore certain legacy uabi (ie. GET_IOVA,
+ * SET_IOVA) are rejected because they don't have a sensible meaning.
+ */
+static inline bool
+msm_context_is_vmbind(struct msm_context *ctx)
+{
+ return ctx->userspace_managed_vm;
+}
+
/**
* msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
*
@@ -506,13 +558,16 @@ struct msm_gpu_submitqueue {
u32 ring_nr;
int faults;
uint32_t last_fence;
- struct msm_file_private *ctx;
+ struct msm_context *ctx;
struct list_head node;
struct idr fence_idr;
struct spinlock idr_lock;
struct mutex lock;
struct kref ref;
struct drm_sched_entity *entity;
+
+ /** @_vm_bind_entity: used for @entity pointer for VM_BIND queues */
+ struct drm_sched_entity _vm_bind_entity[0];
};
struct msm_gpu_state_bo {
@@ -549,6 +604,9 @@ struct msm_gpu_state {
struct msm_gpu_fault_info fault_info;
+ int nr_vm_logs;
+ struct msm_gem_vm_log_entry *vm_logs;
+
int nr_bos;
struct msm_gpu_state_bo *bos;
};
@@ -602,33 +660,32 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val)
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
-void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
+void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx,
struct drm_printer *p);
-int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
-struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+int msm_submitqueue_init(struct drm_device *drm, struct msm_context *ctx);
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_context *ctx,
u32 id);
int msm_submitqueue_create(struct drm_device *drm,
- struct msm_file_private *ctx,
+ struct msm_context *ctx,
u32 prio, u32 flags, u32 *id);
-int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+int msm_submitqueue_query(struct drm_device *drm, struct msm_context *ctx,
struct drm_msm_submitqueue_query *args);
-int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
-void msm_submitqueue_close(struct msm_file_private *ctx);
+int msm_submitqueue_remove(struct msm_context *ctx, u32 id);
+void msm_submitqueue_close(struct msm_context *ctx);
void msm_submitqueue_destroy(struct kref *kref);
-int msm_file_private_set_sysprof(struct msm_file_private *ctx,
- struct msm_gpu *gpu, int sysprof);
-void __msm_file_private_destroy(struct kref *kref);
+int msm_context_set_sysprof(struct msm_context *ctx, struct msm_gpu *gpu, int sysprof);
+void __msm_context_destroy(struct kref *kref);
-static inline void msm_file_private_put(struct msm_file_private *ctx)
+static inline void msm_context_put(struct msm_context *ctx)
{
- kref_put(&ctx->ref, __msm_file_private_destroy);
+ kref_put(&ctx->ref, __msm_context_destroy);
}
-static inline struct msm_file_private *msm_file_private_get(
- struct msm_file_private *ctx)
+static inline struct msm_context *msm_context_get(
+ struct msm_context *ctx)
{
kref_get(&ctx->ref);
return ctx;
@@ -656,8 +713,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config);
-struct msm_gem_address_space *
-msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
+struct drm_gpuvm *
+msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
+ bool kernel_managed);
void msm_gpu_cleanup(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
index 7f863282db0d..781bbe5540bd 100644
--- a/drivers/gpu/drm/msm/msm_gpu_trace.h
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -205,6 +205,20 @@ TRACE_EVENT(msm_gpu_preemption_irq,
TP_printk("preempted to %u", __entry->ring_id)
);
+TRACE_EVENT(msm_mmu_prealloc_cleanup,
+ TP_PROTO(u32 count, u32 remaining),
+ TP_ARGS(count, remaining),
+ TP_STRUCT__entry(
+ __field(u32, count)
+ __field(u32, remaining)
+ ),
+ TP_fast_assign(
+ __entry->count = count;
+ __entry->remaining = remaining;
+ ),
+ TP_printk("count=%u, remaining=%u", __entry->count, __entry->remaining)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 739ce2c283a4..55c29f49b788 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -6,13 +6,18 @@
#include <linux/adreno-smmu-priv.h>
#include <linux/io-pgtable.h>
+#include <linux/kmemleak.h>
#include "msm_drv.h"
+#include "msm_gpu_trace.h"
#include "msm_mmu.h"
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
atomic_t pagetables;
+ struct page *prr_page;
+
+ struct kmem_cache *pt_cache;
};
#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
@@ -26,6 +31,9 @@ struct msm_iommu_pagetable {
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
phys_addr_t ttbr;
u32 asid;
+
+ /** @root_page_table: Stores the root page table pointer. */
+ void *root_page_table;
};
static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
{
@@ -93,15 +101,24 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+ int ret = 0;
while (size) {
- size_t unmapped, pgsize, count;
+ size_t pgsize, count;
+ ssize_t unmapped;
pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
- if (!unmapped)
- break;
+ if (unmapped <= 0) {
+ ret = -EINVAL;
+ /*
+ * Continue attempting to unamp the remained of the
+ * range, so we don't end up with some dangling
+ * mapped pages
+ */
+ unmapped = PAGE_SIZE;
+ }
iova += unmapped;
size -= unmapped;
@@ -109,11 +126,42 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
- return (size == 0) ? 0 : -EINVAL;
+ return ret;
+}
+
+static int msm_iommu_pagetable_map_prr(struct msm_mmu *mmu, u64 iova, size_t len, int prot)
+{
+ struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+ struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+ struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
+ phys_addr_t phys = page_to_phys(iommu->prr_page);
+ u64 addr = iova;
+
+ while (len) {
+ size_t mapped = 0;
+ size_t size = PAGE_SIZE;
+ int ret;
+
+ ret = ops->map_pages(ops, addr, phys, size, 1, prot, GFP_KERNEL, &mapped);
+
+ /* map_pages could fail after mapping some of the pages,
+ * so update the counters before error handling.
+ */
+ addr += mapped;
+ len -= mapped;
+
+ if (ret) {
+ msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
}
static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
- struct sg_table *sgt, size_t len, int prot)
+ struct sg_table *sgt, size_t off, size_t len,
+ int prot)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
@@ -121,10 +169,26 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
u64 addr = iova;
unsigned int i;
+ if (!sgt)
+ return msm_iommu_pagetable_map_prr(mmu, iova, len, prot);
+
for_each_sgtable_sg(sgt, sg, i) {
size_t size = sg->length;
phys_addr_t phys = sg_phys(sg);
+ if (!len)
+ break;
+
+ if (size <= off) {
+ off -= size;
+ continue;
+ }
+
+ phys += off;
+ size -= off;
+ size = min_t(size_t, size, len);
+ off = 0;
+
while (size) {
size_t pgsize, count, mapped = 0;
int ret;
@@ -140,6 +204,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
phys += mapped;
addr += mapped;
size -= mapped;
+ len -= mapped;
if (ret) {
msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
@@ -162,9 +227,16 @@ static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
* If this is the last attached pagetable for the parent,
* disable TTBR0 in the arm-smmu driver
*/
- if (atomic_dec_return(&iommu->pagetables) == 0)
+ if (atomic_dec_return(&iommu->pagetables) == 0) {
adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
+ if (adreno_smmu->set_prr_bit) {
+ adreno_smmu->set_prr_bit(adreno_smmu->cookie, false);
+ __free_page(iommu->prr_page);
+ iommu->prr_page = NULL;
+ }
+ }
+
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable);
}
@@ -217,7 +289,148 @@ msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[
return 0;
}
+static void
+msm_iommu_pagetable_prealloc_count(struct msm_mmu *mmu, struct msm_mmu_prealloc *p,
+ uint64_t iova, size_t len)
+{
+ u64 pt_count;
+
+ /*
+ * L1, L2 and L3 page tables.
+ *
+ * We could optimize L3 allocation by iterating over the sgt and merging
+ * 2M contiguous blocks, but it's simpler to over-provision and return
+ * the pages if they're not used.
+ *
+ * The first level descriptor (v8 / v7-lpae page table format) encodes
+ * 30 bits of address. The second level encodes 29. For the 3rd it is
+ * 39.
+ *
+ * https://developer.arm.com/documentation/ddi0406/c/System-Level-Architecture/Virtual-Memory-System-Architecture--VMSA-/Long-descriptor-translation-table-format/Long-descriptor-translation-table-format-descriptors?lang=en#BEIHEFFB
+ */
+ pt_count = ((ALIGN(iova + len, 1ull << 39) - ALIGN_DOWN(iova, 1ull << 39)) >> 39) +
+ ((ALIGN(iova + len, 1ull << 30) - ALIGN_DOWN(iova, 1ull << 30)) >> 30) +
+ ((ALIGN(iova + len, 1ull << 21) - ALIGN_DOWN(iova, 1ull << 21)) >> 21);
+
+ p->count += pt_count;
+}
+
+static struct kmem_cache *
+get_pt_cache(struct msm_mmu *mmu)
+{
+ struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+ return to_msm_iommu(pagetable->parent)->pt_cache;
+}
+
+static int
+msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_prealloc *p)
+{
+ struct kmem_cache *pt_cache = get_pt_cache(mmu);
+ int ret;
+
+ p->pages = kvmalloc_array(p->count, sizeof(p->pages), GFP_KERNEL);
+ if (!p->pages)
+ return -ENOMEM;
+
+ ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);
+ if (ret != p->count) {
+ p->count = ret;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_prealloc *p)
+{
+ struct kmem_cache *pt_cache = get_pt_cache(mmu);
+ uint32_t remaining_pt_count = p->count - p->ptr;
+
+ if (p->count > 0)
+ trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);
+
+ kmem_cache_free_bulk(pt_cache, remaining_pt_count, &p->pages[p->ptr]);
+ kvfree(p->pages);
+}
+
+/**
+ * alloc_pt() - Custom page table allocator
+ * @cookie: Cookie passed at page table allocation time.
+ * @size: Size of the page table. This size should be fixed,
+ * and determined at creation time based on the granule size.
+ * @gfp: GFP flags.
+ *
+ * We want a custom allocator so we can use a cache for page table
+ * allocations and amortize the cost of the over-reservation that's
+ * done to allow asynchronous VM operations.
+ *
+ * Return: non-NULL on success, NULL if the allocation failed for any
+ * reason.
+ */
+static void *
+msm_iommu_pagetable_alloc_pt(void *cookie, size_t size, gfp_t gfp)
+{
+ struct msm_iommu_pagetable *pagetable = cookie;
+ struct msm_mmu_prealloc *p = pagetable->base.prealloc;
+ void *page;
+
+ /* Allocation of the root page table happening during init. */
+ if (unlikely(!pagetable->root_page_table)) {
+ struct page *p;
+
+ p = alloc_pages_node(dev_to_node(pagetable->iommu_dev),
+ gfp | __GFP_ZERO, get_order(size));
+ page = p ? page_address(p) : NULL;
+ pagetable->root_page_table = page;
+ return page;
+ }
+
+ if (WARN_ON(!p) || WARN_ON(p->ptr >= p->count))
+ return NULL;
+
+ page = p->pages[p->ptr++];
+ memset(page, 0, size);
+
+ /*
+ * Page table entries don't use virtual addresses, which trips out
+ * kmemleak. kmemleak_alloc_phys() might work, but physical addresses
+ * are mixed with other fields, and I fear kmemleak won't detect that
+ * either.
+ *
+ * Let's just ignore memory passed to the page-table driver for now.
+ */
+ kmemleak_ignore(page);
+
+ return page;
+}
+
+
+/**
+ * free_pt() - Custom page table free function
+ * @cookie: Cookie passed at page table allocation time.
+ * @data: Page table to free.
+ * @size: Size of the page table. This size should be fixed,
+ * and determined at creation time based on the granule size.
+ */
+static void
+msm_iommu_pagetable_free_pt(void *cookie, void *data, size_t size)
+{
+ struct msm_iommu_pagetable *pagetable = cookie;
+
+ if (unlikely(pagetable->root_page_table == data)) {
+ free_pages((unsigned long)data, get_order(size));
+ pagetable->root_page_table = NULL;
+ return;
+ }
+
+ kmem_cache_free(get_pt_cache(&pagetable->base), data);
+}
+
static const struct msm_mmu_funcs pagetable_funcs = {
+ .prealloc_count = msm_iommu_pagetable_prealloc_count,
+ .prealloc_allocate = msm_iommu_pagetable_prealloc_allocate,
+ .prealloc_cleanup = msm_iommu_pagetable_prealloc_cleanup,
.map = msm_iommu_pagetable_map,
.unmap = msm_iommu_pagetable_unmap,
.destroy = msm_iommu_pagetable_destroy,
@@ -268,7 +481,18 @@ static const struct iommu_flush_ops tlb_ops = {
static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg);
-struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+static size_t get_tblsz(const struct io_pgtable_cfg *cfg)
+{
+ int pg_shift, bits_per_level;
+
+ pg_shift = __ffs(cfg->pgsize_bitmap);
+ /* arm_lpae_iopte is u64: */
+ bits_per_level = pg_shift - ilog2(sizeof(u64));
+
+ return sizeof(u64) << bits_per_level;
+}
+
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
struct msm_iommu *iommu = to_msm_iommu(parent);
@@ -302,6 +526,36 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
ttbr0_cfg.tlb = &tlb_ops;
+ if (!kernel_managed) {
+ ttbr0_cfg.quirks |= IO_PGTABLE_QUIRK_NO_WARN;
+
+ /*
+ * With userspace managed VM (aka VM_BIND), we need to pre-
+ * allocate pages ahead of time for map/unmap operations,
+ * handing them to io-pgtable via custom alloc/free ops as
+ * needed:
+ */
+ ttbr0_cfg.alloc = msm_iommu_pagetable_alloc_pt;
+ ttbr0_cfg.free = msm_iommu_pagetable_free_pt;
+
+ /*
+ * Restrict to single page granules. Otherwise we may run
+ * into a situation where userspace wants to unmap/remap
+ * only a part of a larger block mapping, which is not
+ * possible without unmapping the entire block. Which in
+ * turn could cause faults if the GPU is accessing other
+ * parts of the block mapping.
+ *
+ * Note that prior to commit 33729a5fc0ca ("iommu/io-pgtable-arm:
+ * Remove split on unmap behavior)" this was handled in
+ * io-pgtable-arm. But this apparently does not work
+ * correctly on SMMUv3.
+ */
+ WARN_ON(!(ttbr0_cfg.pgsize_bitmap & PAGE_SIZE));
+ ttbr0_cfg.pgsize_bitmap = PAGE_SIZE;
+ }
+
+ pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
&ttbr0_cfg, pagetable);
@@ -321,12 +575,30 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
kfree(pagetable);
return ERR_PTR(ret);
}
+
+ BUG_ON(iommu->prr_page);
+ if (adreno_smmu->set_prr_bit) {
+ /*
+ * We need a zero'd page for two reasons:
+ *
+ * 1) Reserve a known physical address to use when
+ * mapping NULL / sparsely resident regions
+ * 2) Read back zero
+ *
+ * It appears the hw drops writes to the PRR region
+ * on the floor, but reads actually return whatever
+ * is in the PRR page.
+ */
+ iommu->prr_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ adreno_smmu->set_prr_addr(adreno_smmu->cookie,
+ page_to_phys(iommu->prr_page));
+ adreno_smmu->set_prr_bit(adreno_smmu->cookie, true);
+ }
}
/* Needed later for TLB flush */
pagetable->parent = parent;
pagetable->tlb = ttbr1_cfg->tlb;
- pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
@@ -388,11 +660,14 @@ static void msm_iommu_detach(struct msm_mmu *mmu)
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, size_t len, int prot)
+ struct sg_table *sgt, size_t off, size_t len,
+ int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
+ WARN_ON(off != 0);
+
/* The arm-smmu driver expects the addresses to be sign extended */
if (iova & BIT_ULL(48))
iova |= GENMASK_ULL(63, 49);
@@ -419,6 +694,7 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
iommu_domain_free(iommu->domain);
+ kmem_cache_destroy(iommu->pt_cache);
kfree(iommu);
}
@@ -492,6 +768,14 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig
return mmu;
iommu = to_msm_iommu(mmu);
+ if (adreno_smmu && adreno_smmu->cookie) {
+ const struct io_pgtable_cfg *cfg =
+ adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
+ size_t tblsz = get_tblsz(cfg);
+
+ iommu->pt_cache =
+ kmem_cache_create("msm-mmu-pt", tblsz, tblsz, 0, NULL);
+ }
iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu);
/* Enable stall on iommu fault: */
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 35d5397e73b4..6889f1c1e721 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -13,6 +13,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_vblank.h>
+#include <drm/clients/drm_client_setup.h>
#include "disp/msm_disp_snapshot.h"
#include "msm_drv.h"
@@ -137,7 +138,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
vbl_work->enable = enable;
vbl_work->priv = priv;
- queue_work(priv->wq, &vbl_work->work);
+ queue_work(priv->kms->wq, &vbl_work->work);
return 0;
}
@@ -176,9 +177,9 @@ static int msm_kms_fault_handler(void *arg, unsigned long iova, int flags, void
return -ENOSYS;
}
-struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
+struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev)
{
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
struct device *mdss_dev = mdp_dev->parent;
@@ -204,17 +205,26 @@ struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
return NULL;
}
- aspace = msm_gem_address_space_create(mmu, "mdp_kms",
- 0x1000, 0x100000000 - 0x1000);
- if (IS_ERR(aspace)) {
- dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
+ vm = msm_gem_vm_create(dev, mmu, "mdp_kms",
+ 0x1000, 0x100000000 - 0x1000, true);
+ if (IS_ERR(vm)) {
+ dev_err(mdp_dev, "vm create, error %pe\n", vm);
mmu->funcs->destroy(mmu);
- return aspace;
+ return vm;
}
- msm_mmu_set_fault_handler(aspace->mmu, kms, msm_kms_fault_handler);
+ msm_mmu_set_fault_handler(to_msm_vm(vm)->mmu, kms, msm_kms_fault_handler);
- return aspace;
+ return vm;
+}
+
+void msm_drm_kms_unregister(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct drm_device *ddev = priv->dev;
+
+ drm_atomic_helper_shutdown(ddev);
}
void msm_drm_kms_uninit(struct device *dev)
@@ -227,10 +237,17 @@ void msm_drm_kms_uninit(struct device *dev)
BUG_ON(!kms);
+ /* We must cancel and cleanup any pending vblank enable/disable
+ * work before msm_irq_uninstall() to avoid work re-enabling an
+ * irq after uninstall has disabled it.
+ */
+
+ flush_workqueue(kms->wq);
+
/* clean up event worker threads */
- for (i = 0; i < priv->num_crtcs; i++) {
- if (priv->event_thread[i].worker)
- kthread_destroy_worker(priv->event_thread[i].worker);
+ for (i = 0; i < MAX_CRTCS; i++) {
+ if (kms->event_thread[i].worker)
+ kthread_destroy_worker(kms->event_thread[i].worker);
}
drm_kms_helper_poll_fini(ddev);
@@ -261,7 +278,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
- return ret;
+ goto err_msm_uninit;
}
/* Enable normalization of plane zpos */
@@ -283,7 +300,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
struct msm_drm_thread *ev_thread;
/* initialize event thread */
- ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
+ ev_thread = &kms->event_thread[drm_crtc_index(crtc)];
ev_thread->dev = ddev;
ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id);
if (IS_ERR(ev_thread->worker)) {
@@ -296,7 +313,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
sched_set_fifo(ev_thread->worker->task);
}
- ret = drm_vblank_init(ddev, priv->num_crtcs);
+ ret = drm_vblank_init(ddev, ddev->mode_config.num_crtc);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
goto err_msm_uninit;
@@ -359,3 +376,13 @@ void msm_kms_shutdown(struct platform_device *pdev)
if (drm && drm->registered && priv->kms)
drm_atomic_helper_shutdown(drm);
}
+
+void msm_drm_kms_post_init(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct drm_device *ddev = priv->dev;
+
+ drm_kms_helper_poll_init(ddev);
+ drm_client_setup(ddev, NULL);
+}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 43b58d052ee6..8a7be7b854de 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -13,6 +13,8 @@
#include "msm_drv.h"
+#ifdef CONFIG_DRM_MSM_KMS
+
#define MAX_PLANE 4
/* As there are different display controller blocks depending on the
@@ -127,10 +129,22 @@ struct msm_pending_timer {
unsigned crtc_idx;
};
+/* Commit/Event thread specific structure */
+struct msm_drm_thread {
+ struct drm_device *dev;
+ struct kthread_worker *worker;
+};
+
struct msm_kms {
const struct msm_kms_funcs *funcs;
struct drm_device *dev;
+ struct hdmi *hdmi;
+
+ struct msm_dsi *dsi[MSM_DSI_CONTROLLER_COUNT];
+
+ struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT];
+
/* irq number to be passed on to msm_irq_install */
int irq;
bool irq_requested;
@@ -139,7 +153,7 @@ struct msm_kms {
atomic_t fault_snapshot_capture;
/* mapper-id used to request GEM buffer mapped for scanout: */
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
/* disp snapshot support */
struct kthread_worker *dump_worker;
@@ -153,6 +167,9 @@ struct msm_kms {
struct mutex commit_lock[MAX_CRTCS];
unsigned pending_crtc_mask;
struct msm_pending_timer pending_timers[MAX_CRTCS];
+
+ struct workqueue_struct *wq;
+ struct msm_drm_thread event_thread[MAX_CRTCS];
};
static inline int msm_kms_init(struct msm_kms *kms,
@@ -165,6 +182,10 @@ static inline int msm_kms_init(struct msm_kms *kms,
kms->funcs = funcs;
+ kms->wq = alloc_ordered_workqueue("msm", 0);
+ if (!kms->wq)
+ return -ENOMEM;
+
for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) {
ret = msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i);
if (ret) {
@@ -181,6 +202,8 @@ static inline void msm_kms_destroy(struct msm_kms *kms)
for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)
msm_atomic_destroy_pending_timer(&kms->pending_timers[i]);
+
+ destroy_workqueue(kms->wq);
}
#define for_each_crtc_mask(dev, crtc, crtc_mask) \
@@ -192,6 +215,29 @@ static inline void msm_kms_destroy(struct msm_kms *kms)
for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv);
+void msm_drm_kms_post_init(struct device *dev);
+void msm_drm_kms_unregister(struct device *dev);
void msm_drm_kms_uninit(struct device *dev);
+#else /* ! CONFIG_DRM_MSM_KMS */
+
+static inline int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
+{
+ return -ENODEV;
+}
+
+static inline void msm_drm_kms_post_init(struct device *dev)
+{
+}
+
+static inline void msm_drm_kms_unregister(struct device *dev)
+{
+}
+
+static inline void msm_drm_kms_uninit(struct device *dev)
+{
+}
+
+#endif
+
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 709979fcfab6..1f5fe7811e01 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -16,14 +16,17 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include "msm_mdss.h"
+#include <linux/soc/qcom/ubwc.h>
+
#include "msm_kms.h"
#include <generated/mdss.xml.h>
#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
-#define DEFAULT_REG_BW 153600 /* Used in mdss fbdev driver */
+struct msm_mdss_data {
+ u32 reg_bus_bw;
+};
struct msm_mdss {
struct device *dev;
@@ -36,7 +39,8 @@ struct msm_mdss {
unsigned long enabled_mask;
struct irq_domain *domain;
} irq_controller;
- const struct msm_mdss_data *mdss_data;
+ const struct qcom_ubwc_cfg_data *mdss_data;
+ u32 reg_bus_bw;
struct icc_path *mdp_path[2];
u32 num_mdp_paths;
struct icc_path *reg_bus_path;
@@ -165,9 +169,9 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
{
- const struct msm_mdss_data *data = msm_mdss->mdss_data;
+ const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data;
u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
- MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13);
if (data->ubwc_bank_spread)
value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
@@ -180,9 +184,9 @@ static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
{
- const struct msm_mdss_data *data = msm_mdss->mdss_data;
+ const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data;
u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle & 0x1) |
- MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13);
if (data->macrotile_mode)
value |= MDSS_UBWC_STATIC_MACROTILE_MODE;
@@ -198,9 +202,9 @@ static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
{
- const struct msm_mdss_data *data = msm_mdss->mdss_data;
+ const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data;
u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
- MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit - 13);
if (data->ubwc_bank_spread)
value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
@@ -222,67 +226,22 @@ static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
}
}
-#define MDSS_HW_MAJ_MIN \
- (MDSS_HW_VERSION_MAJOR__MASK | MDSS_HW_VERSION_MINOR__MASK)
-
-#define MDSS_HW_MSM8996 0x1007
-#define MDSS_HW_MSM8937 0x100e
-#define MDSS_HW_MSM8953 0x1010
-#define MDSS_HW_MSM8998 0x3000
-#define MDSS_HW_SDM660 0x3002
-#define MDSS_HW_SDM630 0x3003
-
-/*
- * MDP5 platforms use generic qcom,mdp5 compat string, so we have to generate this data
- */
-static const struct msm_mdss_data *msm_mdss_generate_mdp5_mdss_data(struct msm_mdss *mdss)
+static void msm_mdss_setup_ubwc_dec_50(struct msm_mdss *msm_mdss)
{
- struct msm_mdss_data *data;
- u32 hw_rev;
-
- data = devm_kzalloc(mdss->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return NULL;
-
- hw_rev = readl_relaxed(mdss->mmio + REG_MDSS_HW_VERSION);
- hw_rev = FIELD_GET(MDSS_HW_MAJ_MIN, hw_rev);
-
- if (hw_rev == MDSS_HW_MSM8996 ||
- hw_rev == MDSS_HW_MSM8937 ||
- hw_rev == MDSS_HW_MSM8953 ||
- hw_rev == MDSS_HW_MSM8998 ||
- hw_rev == MDSS_HW_SDM660 ||
- hw_rev == MDSS_HW_SDM630) {
- data->ubwc_dec_version = UBWC_1_0;
- data->ubwc_enc_version = UBWC_1_0;
- }
-
- if (hw_rev == MDSS_HW_MSM8996 ||
- hw_rev == MDSS_HW_MSM8998)
- data->highest_bank_bit = 2;
- else
- data->highest_bank_bit = 1;
-
- return data;
-}
-
-const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev)
-{
- struct msm_mdss *mdss;
+ const struct qcom_ubwc_cfg_data *data = msm_mdss->mdss_data;
+ u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
- if (!dev)
- return ERR_PTR(-EINVAL);
+ if (data->ubwc_bank_spread)
+ value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
- mdss = dev_get_drvdata(dev);
+ if (data->macrotile_mode)
+ value |= MDSS_UBWC_STATIC_MACROTILE_MODE;
- /*
- * We could not do it at the probe time, since hw revision register was
- * not readable. Fill data structure now for the MDP5 platforms.
- */
- if (!mdss->mdss_data && mdss->is_mdp5)
- mdss->mdss_data = msm_mdss_generate_mdp5_mdss_data(mdss);
+ writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
- return mdss->mdss_data;
+ writel_relaxed(4, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2);
+ writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE);
}
static int msm_mdss_enable(struct msm_mdss *msm_mdss)
@@ -297,12 +256,8 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
for (i = 0; i < msm_mdss->num_mdp_paths; i++)
icc_set_bw(msm_mdss->mdp_path[i], 0, Bps_to_icc(MIN_IB_BW));
- if (msm_mdss->mdss_data && msm_mdss->mdss_data->reg_bus_bw)
- icc_set_bw(msm_mdss->reg_bus_path, 0,
- msm_mdss->mdss_data->reg_bus_bw);
- else
- icc_set_bw(msm_mdss->reg_bus_path, 0,
- DEFAULT_REG_BW);
+ icc_set_bw(msm_mdss->reg_bus_path, 0,
+ msm_mdss->reg_bus_bw);
ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
if (ret) {
@@ -339,6 +294,9 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
case UBWC_4_3:
msm_mdss_setup_ubwc_dec_40(msm_mdss);
break;
+ case UBWC_5_0:
+ msm_mdss_setup_ubwc_dec_50(msm_mdss);
+ break;
default:
dev_err(msm_mdss->dev, "Unsupported UBWC decoder version %x\n",
msm_mdss->mdss_data->ubwc_dec_version);
@@ -438,6 +396,7 @@ static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_d
static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5)
{
+ const struct msm_mdss_data *mdss_data;
struct msm_mdss *msm_mdss;
int ret;
int irq;
@@ -450,7 +409,15 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
if (!msm_mdss)
return ERR_PTR(-ENOMEM);
- msm_mdss->mdss_data = of_device_get_match_data(&pdev->dev);
+ msm_mdss->mdss_data = qcom_ubwc_config_get_data();
+ if (IS_ERR(msm_mdss->mdss_data))
+ return ERR_CAST(msm_mdss->mdss_data);
+
+ mdss_data = of_device_get_match_data(&pdev->dev);
+ if (!mdss_data)
+ return ERR_PTR(-EINVAL);
+
+ msm_mdss->reg_bus_bw = mdss_data->reg_bus_bw;
msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
if (IS_ERR(msm_mdss->mmio))
@@ -569,205 +536,49 @@ static void mdss_remove(struct platform_device *pdev)
msm_mdss_destroy(mdss);
}
-static const struct msm_mdss_data msm8998_data = {
- .ubwc_enc_version = UBWC_1_0,
- .ubwc_dec_version = UBWC_1_0,
- .highest_bank_bit = 2,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data qcm2290_data = {
- /* no UBWC */
- .highest_bank_bit = 0x2,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sa8775p_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_0,
- .ubwc_swizzle = 4,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 0,
- .macrotile_mode = true,
- .reg_bus_bw = 74000,
-};
-
-static const struct msm_mdss_data sar2130p_data = {
- .ubwc_enc_version = UBWC_3_0, /* 4.0.2 in hw */
- .ubwc_dec_version = UBWC_4_3,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 0,
- .macrotile_mode = 1,
- .reg_bus_bw = 74000,
-};
-
-static const struct msm_mdss_data sc7180_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 0x1,
- .reg_bus_bw = 76800,
+static const struct msm_mdss_data data_57k = {
+ .reg_bus_bw = 57000,
};
-static const struct msm_mdss_data sc7280_data = {
- .ubwc_enc_version = UBWC_3_0,
- .ubwc_dec_version = UBWC_4_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 1,
- .macrotile_mode = true,
+static const struct msm_mdss_data data_74k = {
.reg_bus_bw = 74000,
};
-static const struct msm_mdss_data sc8180x_data = {
- .ubwc_enc_version = UBWC_3_0,
- .ubwc_dec_version = UBWC_3_0,
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sc8280xp_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sdm670_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .highest_bank_bit = 1,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sdm845_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .highest_bank_bit = 2,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm6350_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 1,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm7150_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .highest_bank_bit = 1,
+static const struct msm_mdss_data data_76k8 = {
.reg_bus_bw = 76800,
};
-static const struct msm_mdss_data sm8150_data = {
- .ubwc_enc_version = UBWC_3_0,
- .ubwc_dec_version = UBWC_3_0,
- .highest_bank_bit = 2,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm6115_data = {
- .ubwc_enc_version = UBWC_1_0,
- .ubwc_dec_version = UBWC_2_0,
- .ubwc_swizzle = 7,
- .ubwc_bank_spread = true,
- .highest_bank_bit = 0x1,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm6125_data = {
- .ubwc_enc_version = UBWC_1_0,
- .ubwc_dec_version = UBWC_3_0,
- .ubwc_swizzle = 1,
- .highest_bank_bit = 1,
-};
-
-static const struct msm_mdss_data sm6150_data = {
- .ubwc_enc_version = UBWC_2_0,
- .ubwc_dec_version = UBWC_2_0,
- .highest_bank_bit = 1,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm8250_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- /* TODO: highest_bank_bit = 2 for LP_DDR4 */
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- .reg_bus_bw = 76800,
-};
-
-static const struct msm_mdss_data sm8350_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_0,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- /* TODO: highest_bank_bit = 2 for LP_DDR4 */
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- .reg_bus_bw = 74000,
-};
-
-static const struct msm_mdss_data sm8550_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_3,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- /* TODO: highest_bank_bit = 2 for LP_DDR4 */
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- .reg_bus_bw = 57000,
-};
-
-static const struct msm_mdss_data x1e80100_data = {
- .ubwc_enc_version = UBWC_4_0,
- .ubwc_dec_version = UBWC_4_3,
- .ubwc_swizzle = 6,
- .ubwc_bank_spread = true,
- /* TODO: highest_bank_bit = 2 for LP_DDR4 */
- .highest_bank_bit = 3,
- .macrotile_mode = true,
- /* TODO: Add reg_bus_bw with real value */
+static const struct msm_mdss_data data_153k6 = {
+ .reg_bus_bw = 153600,
};
static const struct of_device_id mdss_dt_match[] = {
- { .compatible = "qcom,mdss" },
- { .compatible = "qcom,msm8998-mdss", .data = &msm8998_data },
- { .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data },
- { .compatible = "qcom,sa8775p-mdss", .data = &sa8775p_data },
- { .compatible = "qcom,sar2130p-mdss", .data = &sar2130p_data },
- { .compatible = "qcom,sdm670-mdss", .data = &sdm670_data },
- { .compatible = "qcom,sdm845-mdss", .data = &sdm845_data },
- { .compatible = "qcom,sc7180-mdss", .data = &sc7180_data },
- { .compatible = "qcom,sc7280-mdss", .data = &sc7280_data },
- { .compatible = "qcom,sc8180x-mdss", .data = &sc8180x_data },
- { .compatible = "qcom,sc8280xp-mdss", .data = &sc8280xp_data },
- { .compatible = "qcom,sm6115-mdss", .data = &sm6115_data },
- { .compatible = "qcom,sm6125-mdss", .data = &sm6125_data },
- { .compatible = "qcom,sm6150-mdss", .data = &sm6150_data },
- { .compatible = "qcom,sm6350-mdss", .data = &sm6350_data },
- { .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
- { .compatible = "qcom,sm7150-mdss", .data = &sm7150_data },
- { .compatible = "qcom,sm8150-mdss", .data = &sm8150_data },
- { .compatible = "qcom,sm8250-mdss", .data = &sm8250_data },
- { .compatible = "qcom,sm8350-mdss", .data = &sm8350_data },
- { .compatible = "qcom,sm8450-mdss", .data = &sm8350_data },
- { .compatible = "qcom,sm8550-mdss", .data = &sm8550_data },
- { .compatible = "qcom,sm8650-mdss", .data = &sm8550_data},
- { .compatible = "qcom,x1e80100-mdss", .data = &x1e80100_data},
+ { .compatible = "qcom,mdss", .data = &data_153k6 },
+ { .compatible = "qcom,msm8998-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,qcm2290-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sa8775p-mdss", .data = &data_74k },
+ { .compatible = "qcom,sar2130p-mdss", .data = &data_74k },
+ { .compatible = "qcom,sdm670-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sdm845-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sc7180-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sc7280-mdss", .data = &data_74k },
+ { .compatible = "qcom,sc8180x-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sc8280xp-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm6115-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm6125-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm6150-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm6350-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm6375-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm7150-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm8150-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm8250-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,sm8350-mdss", .data = &data_74k },
+ { .compatible = "qcom,sm8450-mdss", .data = &data_74k },
+ { .compatible = "qcom,sm8550-mdss", .data = &data_57k },
+ { .compatible = "qcom,sm8650-mdss", .data = &data_57k },
+ { .compatible = "qcom,sm8750-mdss", .data = &data_57k },
+ /* TODO: x1e8: Add reg_bus_bw with real value */
+ { .compatible = "qcom,x1e80100-mdss", .data = &data_153k6 },
{}
};
MODULE_DEVICE_TABLE(of, mdss_dt_match);
diff --git a/drivers/gpu/drm/msm/msm_mdss.h b/drivers/gpu/drm/msm/msm_mdss.h
deleted file mode 100644
index 14dc53704314..000000000000
--- a/drivers/gpu/drm/msm/msm_mdss.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2018, The Linux Foundation
- */
-
-#ifndef __MSM_MDSS_H__
-#define __MSM_MDSS_H__
-
-struct msm_mdss_data {
- u32 ubwc_enc_version;
- /* can be read from register 0x58 */
- u32 ubwc_dec_version;
- u32 ubwc_swizzle;
- u32 highest_bank_bit;
- bool ubwc_bank_spread;
- bool macrotile_mode;
- u32 reg_bus_bw;
-};
-
-#define UBWC_1_0 0x10000000
-#define UBWC_2_0 0x20000000
-#define UBWC_3_0 0x30000000
-#define UBWC_4_0 0x40000000
-#define UBWC_4_3 0x40030000
-
-const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev);
-
-#endif /* __MSM_MDSS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 0c694907140d..8915662fbd4d 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -9,10 +9,18 @@
#include <linux/iommu.h>
+struct msm_mmu_prealloc;
+struct msm_mmu;
+struct msm_gpu;
+
struct msm_mmu_funcs {
void (*detach)(struct msm_mmu *mmu);
+ void (*prealloc_count)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p,
+ uint64_t iova, size_t len);
+ int (*prealloc_allocate)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
+ void (*prealloc_cleanup)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- size_t len, int prot);
+ size_t off, size_t len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
void (*destroy)(struct msm_mmu *mmu);
void (*set_stall)(struct msm_mmu *mmu, bool enable);
@@ -24,12 +32,38 @@ enum msm_mmu_type {
MSM_MMU_IOMMU_PAGETABLE,
};
+/**
+ * struct msm_mmu_prealloc - Tracking for pre-allocated pages for MMU updates.
+ */
+struct msm_mmu_prealloc {
+ /** @count: Number of pages reserved. */
+ uint32_t count;
+ /** @ptr: Index of first unused page in @pages */
+ uint32_t ptr;
+ /**
+ * @pages: Array of pages preallocated for MMU table updates.
+ *
+ * After a VM operation, there might be free pages remaining in this
+ * array (since the amount allocated is a worst-case). These are
+ * returned to the pt_cache at mmu->prealloc_cleanup().
+ */
+ void **pages;
+};
+
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
struct device *dev;
int (*handler)(void *arg, unsigned long iova, int flags, void *data);
void *arg;
enum msm_mmu_type type;
+
+ /**
+ * @prealloc: pre-allocated pages for pgtable
+ *
+ * Set while a VM_BIND job is running, serialized under
+ * msm_gem_vm::mmu_lock.
+ */
+ struct msm_mmu_prealloc *prealloc;
};
static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
@@ -51,7 +85,7 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
mmu->handler = handler;
}
-struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed);
int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
int *asid);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 39138e190cb9..54493a94dcb7 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -308,21 +308,11 @@ void msm_rd_debugfs_cleanup(struct msm_drm_private *priv)
priv->hangrd = NULL;
}
-static void snapshot_buf(struct msm_rd_state *rd,
- struct msm_gem_submit *submit, int idx,
- uint64_t iova, uint32_t size, bool full)
+static void snapshot_buf(struct msm_rd_state *rd, struct drm_gem_object *obj,
+ uint64_t iova, bool full, size_t offset, size_t size)
{
- struct drm_gem_object *obj = submit->bos[idx].obj;
- unsigned offset = 0;
const char *buf;
- if (iova) {
- offset = iova - submit->bos[idx].iova;
- } else {
- iova = submit->bos[idx].iova;
- size = obj->size;
- }
-
/*
* Always write the GPUADDR header so can get a complete list of all the
* buffers in the cmd
@@ -333,10 +323,6 @@ static void snapshot_buf(struct msm_rd_state *rd,
if (!full)
return;
- /* But only dump the contents of buffers marked READ */
- if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
- return;
-
buf = msm_gem_get_vaddr_active(obj);
if (IS_ERR(buf))
return;
@@ -352,6 +338,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
{
+ extern bool rd_full;
struct task_struct *task;
char msg[256];
int i, n;
@@ -385,16 +372,43 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
- for (i = 0; i < submit->nr_bos; i++)
- snapshot_buf(rd, submit, i, 0, 0, should_dump(submit, i));
+ if (msm_context_is_vmbind(submit->queue->ctx)) {
+ struct drm_gpuva *vma;
- for (i = 0; i < submit->nr_cmds; i++) {
- uint32_t szd = submit->cmd[i].size; /* in dwords */
+ drm_gpuvm_resv_assert_held(submit->vm);
+
+ drm_gpuvm_for_each_va (vma, submit->vm) {
+ bool dump = rd_full || (vma->flags & MSM_VMA_DUMP);
+
+ /* Skip MAP_NULL/PRR VMAs: */
+ if (!vma->gem.obj)
+ continue;
+
+ snapshot_buf(rd, vma->gem.obj, vma->va.addr, dump,
+ vma->gem.offset, vma->va.range);
+ }
+
+ } else {
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;
+ bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP);
+
+ snapshot_buf(rd, obj, submit->bos[i].iova, dump, 0, obj->size);
+ }
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ uint32_t szd = submit->cmd[i].size; /* in dwords */
+ int idx = submit->cmd[i].idx;
+ bool dump = rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
+
+ /* snapshot cmdstream bo's (if we haven't already): */
+ if (!dump) {
+ struct drm_gem_object *obj = submit->bos[idx].obj;
+ size_t offset = submit->cmd[i].iova - submit->bos[idx].iova;
- /* snapshot cmdstream bo's (if we haven't already): */
- if (!should_dump(submit, i)) {
- snapshot_buf(rd, submit, submit->cmd[i].idx,
- submit->cmd[i].iova, szd * 4, true);
+ snapshot_buf(rd, obj, submit->cmd[i].iova, true,
+ offset, szd * 4);
+ }
}
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 89dce15eed3b..b2f612e5dc79 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -17,6 +17,7 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
struct msm_fence_context *fctx = submit->ring->fctx;
struct msm_gpu *gpu = submit->gpu;
struct msm_drm_private *priv = gpu->dev->dev_private;
+ unsigned nr_cmds = submit->nr_cmds;
int i;
msm_fence_init(submit->hw_fence, fctx);
@@ -36,8 +37,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
/* TODO move submit path over to using a per-ring lock.. */
mutex_lock(&gpu->lock);
+ if (submit->queue->ctx->closed)
+ submit->nr_cmds = 0;
+
msm_gpu_submit(gpu, submit);
+ submit->nr_cmds = nr_cmds;
+
mutex_unlock(&gpu->lock);
return dma_fence_get(submit->hw_fence);
@@ -84,7 +90,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY),
- gpu->aspace, &ring->bo, &ring->iova);
+ gpu->vm, &ring->bo, &ring->iova);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
@@ -131,7 +137,7 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
msm_fence_context_free(ring->fctx);
- msm_gem_kernel_put(ring->bo, ring->gpu->aspace);
+ msm_gem_kernel_put(ring->bo, ring->gpu->vm);
kfree(ring);
}
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 7fed1de63b5d..8617a82cd6b3 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -7,8 +7,7 @@
#include "msm_gpu.h"
-int msm_file_private_set_sysprof(struct msm_file_private *ctx,
- struct msm_gpu *gpu, int sysprof)
+int msm_context_set_sysprof(struct msm_context *ctx, struct msm_gpu *gpu, int sysprof)
{
/*
* Since pm_runtime and sysprof_active are both refcounts, we
@@ -46,10 +45,10 @@ int msm_file_private_set_sysprof(struct msm_file_private *ctx,
return 0;
}
-void __msm_file_private_destroy(struct kref *kref)
+void __msm_context_destroy(struct kref *kref)
{
- struct msm_file_private *ctx = container_of(kref,
- struct msm_file_private, ref);
+ struct msm_context *ctx = container_of(kref,
+ struct msm_context, ref);
int i;
for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
@@ -60,7 +59,7 @@ void __msm_file_private_destroy(struct kref *kref)
kfree(ctx->entities[i]);
}
- msm_gem_address_space_put(ctx->aspace);
+ drm_gpuvm_put(ctx->vm);
kfree(ctx->comm);
kfree(ctx->cmdline);
kfree(ctx);
@@ -73,12 +72,15 @@ void msm_submitqueue_destroy(struct kref *kref)
idr_destroy(&queue->fence_idr);
- msm_file_private_put(queue->ctx);
+ if (queue->entity == &queue->_vm_bind_entity[0])
+ drm_sched_entity_destroy(queue->entity);
+
+ msm_context_put(queue->ctx);
kfree(queue);
}
-struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_context *ctx,
u32 id)
{
struct msm_gpu_submitqueue *entry;
@@ -101,9 +103,9 @@ struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
return NULL;
}
-void msm_submitqueue_close(struct msm_file_private *ctx)
+void msm_submitqueue_close(struct msm_context *ctx)
{
- struct msm_gpu_submitqueue *entry, *tmp;
+ struct msm_gpu_submitqueue *queue, *tmp;
if (!ctx)
return;
@@ -112,14 +114,21 @@ void msm_submitqueue_close(struct msm_file_private *ctx)
* No lock needed in close and there won't
* be any more user ioctls coming our way
*/
- list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
- list_del(&entry->node);
- msm_submitqueue_put(entry);
+ list_for_each_entry_safe(queue, tmp, &ctx->submitqueues, node) {
+ if (queue->entity == &queue->_vm_bind_entity[0])
+ drm_sched_entity_flush(queue->entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+ list_del(&queue->node);
+ msm_submitqueue_put(queue);
}
+
+ if (!ctx->vm)
+ return;
+
+ msm_gem_vm_close(ctx->vm);
}
static struct drm_sched_entity *
-get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
+get_sched_entity(struct msm_context *ctx, struct msm_ringbuffer *ring,
unsigned ring_nr, enum drm_sched_priority sched_prio)
{
static DEFINE_MUTEX(entity_lock);
@@ -155,14 +164,12 @@ get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
return ctx->entities[idx];
}
-int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
+int msm_submitqueue_create(struct drm_device *drm, struct msm_context *ctx,
u32 prio, u32 flags, u32 *id)
{
struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue;
enum drm_sched_priority sched_prio;
- extern int enable_preemption;
- bool preemption_supported;
unsigned ring_nr;
int ret;
@@ -172,26 +179,53 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
if (!priv->gpu)
return -ENODEV;
- preemption_supported = priv->gpu->nr_rings == 1 && enable_preemption != 0;
+ if (flags & MSM_SUBMITQUEUE_VM_BIND) {
+ unsigned sz;
- if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported)
- return -EINVAL;
+ /* Not allowed for kernel managed VMs (ie. kernel allocs VA) */
+ if (!msm_context_is_vmbind(ctx))
+ return -EINVAL;
- ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
- if (ret)
- return ret;
+ if (prio)
+ return -EINVAL;
+
+ sz = struct_size(queue, _vm_bind_entity, 1);
+ queue = kzalloc(sz, GFP_KERNEL);
+ } else {
+ extern int enable_preemption;
+ bool preemption_supported =
+ priv->gpu->nr_rings == 1 && enable_preemption != 0;
+
+ if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported)
+ return -EINVAL;
- queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
+ if (ret)
+ return ret;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ }
if (!queue)
return -ENOMEM;
kref_init(&queue->ref);
queue->flags = flags;
- queue->ring_nr = ring_nr;
- queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
- ring_nr, sched_prio);
+ if (flags & MSM_SUBMITQUEUE_VM_BIND) {
+ struct drm_gpu_scheduler *sched = &to_msm_vm(msm_context_vm(drm, ctx))->sched;
+
+ queue->entity = &queue->_vm_bind_entity[0];
+
+ drm_sched_entity_init(queue->entity, DRM_SCHED_PRIORITY_KERNEL,
+ &sched, 1, NULL);
+ } else {
+ queue->ring_nr = ring_nr;
+
+ queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
+ ring_nr, sched_prio);
+ }
+
if (IS_ERR(queue->entity)) {
ret = PTR_ERR(queue->entity);
kfree(queue);
@@ -200,7 +234,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
write_lock(&ctx->queuelock);
- queue->ctx = msm_file_private_get(ctx);
+ queue->ctx = msm_context_get(ctx);
queue->id = ctx->queueid++;
if (id)
@@ -221,7 +255,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
* Create the default submit-queue (id==0), used for backwards compatibility
* for userspace that pre-dates the introduction of submitqueues.
*/
-int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
+int msm_submitqueue_init(struct drm_device *drm, struct msm_context *ctx)
{
struct msm_drm_private *priv = drm->dev_private;
int default_prio, max_priority;
@@ -261,7 +295,7 @@ static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
return ret ? -EFAULT : 0;
}
-int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+int msm_submitqueue_query(struct drm_device *drm, struct msm_context *ctx,
struct drm_msm_submitqueue_query *args)
{
struct msm_gpu_submitqueue *queue;
@@ -282,7 +316,7 @@ int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
return ret;
}
-int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
+int msm_submitqueue_remove(struct msm_context *ctx, u32 id)
{
struct msm_gpu_submitqueue *entry;
diff --git a/drivers/gpu/drm/msm/msm_syncobj.c b/drivers/gpu/drm/msm/msm_syncobj.c
new file mode 100644
index 000000000000..4baa9f522c54
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_syncobj.c
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2020 Google, Inc */
+
+#include "drm/drm_drv.h"
+
+#include "msm_drv.h"
+#include "msm_syncobj.h"
+
+struct drm_syncobj **
+msm_syncobj_parse_deps(struct drm_device *dev,
+ struct drm_sched_job *job,
+ struct drm_file *file,
+ uint64_t in_syncobjs_addr,
+ uint32_t nr_in_syncobjs,
+ size_t syncobj_stride)
+{
+ struct drm_syncobj **syncobjs = NULL;
+ struct drm_msm_syncobj syncobj_desc = {0};
+ int ret = 0;
+ uint32_t i, j;
+
+ syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!syncobjs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_in_syncobjs; ++i) {
+ uint64_t address = in_syncobjs_addr + i * syncobj_stride;
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (syncobj_desc.point &&
+ !drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) {
+ ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported");
+ break;
+ }
+
+ if (syncobj_desc.flags & ~MSM_SYNCOBJ_FLAGS) {
+ ret = UERR(EINVAL, dev, "invalid syncobj flags: %x", syncobj_desc.flags);
+ break;
+ }
+
+ ret = drm_sched_job_add_syncobj_dependency(job, file,
+ syncobj_desc.handle,
+ syncobj_desc.point);
+ if (ret)
+ break;
+
+ if (syncobj_desc.flags & MSM_SYNCOBJ_RESET) {
+ syncobjs[i] = drm_syncobj_find(file, syncobj_desc.handle);
+ if (!syncobjs[i]) {
+ ret = UERR(EINVAL, dev, "invalid syncobj handle: %u", i);
+ break;
+ }
+ }
+ }
+
+ if (ret) {
+ for (j = 0; j <= i; ++j) {
+ if (syncobjs[j])
+ drm_syncobj_put(syncobjs[j]);
+ }
+ kfree(syncobjs);
+ return ERR_PTR(ret);
+ }
+ return syncobjs;
+}
+
+void
+msm_syncobj_reset(struct drm_syncobj **syncobjs, uint32_t nr_syncobjs)
+{
+ uint32_t i;
+
+ for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
+ if (syncobjs[i])
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+ }
+}
+
+struct msm_syncobj_post_dep *
+msm_syncobj_parse_post_deps(struct drm_device *dev,
+ struct drm_file *file,
+ uint64_t syncobjs_addr,
+ uint32_t nr_syncobjs,
+ size_t syncobj_stride)
+{
+ struct msm_syncobj_post_dep *post_deps;
+ struct drm_msm_syncobj syncobj_desc = {0};
+ int ret = 0;
+ uint32_t i, j;
+
+ post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!post_deps)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_syncobjs; ++i) {
+ uint64_t address = syncobjs_addr + i * syncobj_stride;
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ post_deps[i].point = syncobj_desc.point;
+
+ if (syncobj_desc.flags) {
+ ret = UERR(EINVAL, dev, "invalid syncobj flags");
+ break;
+ }
+
+ if (syncobj_desc.point) {
+ if (!drm_core_check_feature(dev,
+ DRIVER_SYNCOBJ_TIMELINE)) {
+ ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported");
+ break;
+ }
+
+ post_deps[i].chain = dma_fence_chain_alloc();
+ if (!post_deps[i].chain) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ post_deps[i].syncobj =
+ drm_syncobj_find(file, syncobj_desc.handle);
+ if (!post_deps[i].syncobj) {
+ ret = UERR(EINVAL, dev, "invalid syncobj handle");
+ break;
+ }
+ }
+
+ if (ret) {
+ for (j = 0; j <= i; ++j) {
+ dma_fence_chain_free(post_deps[j].chain);
+ if (post_deps[j].syncobj)
+ drm_syncobj_put(post_deps[j].syncobj);
+ }
+
+ kfree(post_deps);
+ return ERR_PTR(ret);
+ }
+
+ return post_deps;
+}
+
+void
+msm_syncobj_process_post_deps(struct msm_syncobj_post_dep *post_deps,
+ uint32_t count, struct dma_fence *fence)
+{
+ uint32_t i;
+
+ for (i = 0; post_deps && i < count; ++i) {
+ if (post_deps[i].chain) {
+ drm_syncobj_add_point(post_deps[i].syncobj,
+ post_deps[i].chain,
+ fence, post_deps[i].point);
+ post_deps[i].chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(post_deps[i].syncobj,
+ fence);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/msm/msm_syncobj.h b/drivers/gpu/drm/msm/msm_syncobj.h
new file mode 100644
index 000000000000..bcaa15d01da0
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_syncobj.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2020 Google, Inc */
+
+#ifndef __MSM_GEM_SYNCOBJ_H__
+#define __MSM_GEM_SYNCOBJ_H__
+
+#include "drm/drm_device.h"
+#include "drm/drm_syncobj.h"
+#include "drm/gpu_scheduler.h"
+
+struct msm_syncobj_post_dep {
+ struct drm_syncobj *syncobj;
+ uint64_t point;
+ struct dma_fence_chain *chain;
+};
+
+struct drm_syncobj **
+msm_syncobj_parse_deps(struct drm_device *dev,
+ struct drm_sched_job *job,
+ struct drm_file *file,
+ uint64_t in_syncobjs_addr,
+ uint32_t nr_in_syncobjs,
+ size_t syncobj_stride);
+
+void msm_syncobj_reset(struct drm_syncobj **syncobjs, uint32_t nr_syncobjs);
+
+struct msm_syncobj_post_dep *
+msm_syncobj_parse_post_deps(struct drm_device *dev,
+ struct drm_file *file,
+ uint64_t syncobjs_addr,
+ uint32_t nr_syncobjs,
+ size_t syncobj_stride);
+
+void msm_syncobj_process_post_deps(struct msm_syncobj_post_dep *post_deps,
+ uint32_t count, struct dma_fence *fence);
+
+#endif /* __MSM_GEM_SYNCOBJ_H__ */
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
index 2db425abf0f3..d860fd94feae 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
@@ -5,6 +5,11 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<import file="freedreno_copyright.xml"/>
<import file="adreno/adreno_common.xml"/>
<import file="adreno/adreno_pm4.xml"/>
+<import file="adreno/a6xx_enums.xml"/>
+<import file="adreno/a7xx_enums.xml"/>
+<import file="adreno/a6xx_perfcntrs.xml"/>
+<import file="adreno/a7xx_perfcntrs.xml"/>
+<import file="adreno/a6xx_descriptors.xml"/>
<!--
Each register that is actually being used by driver should have "usage" defined,
@@ -20,2205 +25,6 @@ is either overwritten by renderpass/blit (ib2) or not used if not overwritten
by a particular renderpass/blit.
-->
-<!-- these might be same as a5xx -->
-<enum name="a6xx_tile_mode">
- <value name="TILE6_LINEAR" value="0"/>
- <value name="TILE6_2" value="2"/>
- <value name="TILE6_3" value="3"/>
-</enum>
-
-<enum name="a6xx_format">
- <value value="0x02" name="FMT6_A8_UNORM"/>
- <value value="0x03" name="FMT6_8_UNORM"/>
- <value value="0x04" name="FMT6_8_SNORM"/>
- <value value="0x05" name="FMT6_8_UINT"/>
- <value value="0x06" name="FMT6_8_SINT"/>
-
- <value value="0x08" name="FMT6_4_4_4_4_UNORM"/>
- <value value="0x0a" name="FMT6_5_5_5_1_UNORM"/>
- <value value="0x0c" name="FMT6_1_5_5_5_UNORM"/> <!-- read only -->
- <value value="0x0e" name="FMT6_5_6_5_UNORM"/>
-
- <value value="0x0f" name="FMT6_8_8_UNORM"/>
- <value value="0x10" name="FMT6_8_8_SNORM"/>
- <value value="0x11" name="FMT6_8_8_UINT"/>
- <value value="0x12" name="FMT6_8_8_SINT"/>
- <value value="0x13" name="FMT6_L8_A8_UNORM"/>
-
- <value value="0x15" name="FMT6_16_UNORM"/>
- <value value="0x16" name="FMT6_16_SNORM"/>
- <value value="0x17" name="FMT6_16_FLOAT"/>
- <value value="0x18" name="FMT6_16_UINT"/>
- <value value="0x19" name="FMT6_16_SINT"/>
-
- <value value="0x21" name="FMT6_8_8_8_UNORM"/>
- <value value="0x22" name="FMT6_8_8_8_SNORM"/>
- <value value="0x23" name="FMT6_8_8_8_UINT"/>
- <value value="0x24" name="FMT6_8_8_8_SINT"/>
-
- <value value="0x30" name="FMT6_8_8_8_8_UNORM"/>
- <value value="0x31" name="FMT6_8_8_8_X8_UNORM"/> <!-- samples 1 for alpha -->
- <value value="0x32" name="FMT6_8_8_8_8_SNORM"/>
- <value value="0x33" name="FMT6_8_8_8_8_UINT"/>
- <value value="0x34" name="FMT6_8_8_8_8_SINT"/>
-
- <value value="0x35" name="FMT6_9_9_9_E5_FLOAT"/>
-
- <value value="0x36" name="FMT6_10_10_10_2_UNORM"/>
- <value value="0x37" name="FMT6_10_10_10_2_UNORM_DEST"/>
- <value value="0x39" name="FMT6_10_10_10_2_SNORM"/>
- <value value="0x3a" name="FMT6_10_10_10_2_UINT"/>
- <value value="0x3b" name="FMT6_10_10_10_2_SINT"/>
-
- <value value="0x42" name="FMT6_11_11_10_FLOAT"/>
-
- <value value="0x43" name="FMT6_16_16_UNORM"/>
- <value value="0x44" name="FMT6_16_16_SNORM"/>
- <value value="0x45" name="FMT6_16_16_FLOAT"/>
- <value value="0x46" name="FMT6_16_16_UINT"/>
- <value value="0x47" name="FMT6_16_16_SINT"/>
-
- <value value="0x48" name="FMT6_32_UNORM"/>
- <value value="0x49" name="FMT6_32_SNORM"/>
- <value value="0x4a" name="FMT6_32_FLOAT"/>
- <value value="0x4b" name="FMT6_32_UINT"/>
- <value value="0x4c" name="FMT6_32_SINT"/>
- <value value="0x4d" name="FMT6_32_FIXED"/>
-
- <value value="0x58" name="FMT6_16_16_16_UNORM"/>
- <value value="0x59" name="FMT6_16_16_16_SNORM"/>
- <value value="0x5a" name="FMT6_16_16_16_FLOAT"/>
- <value value="0x5b" name="FMT6_16_16_16_UINT"/>
- <value value="0x5c" name="FMT6_16_16_16_SINT"/>
-
- <value value="0x60" name="FMT6_16_16_16_16_UNORM"/>
- <value value="0x61" name="FMT6_16_16_16_16_SNORM"/>
- <value value="0x62" name="FMT6_16_16_16_16_FLOAT"/>
- <value value="0x63" name="FMT6_16_16_16_16_UINT"/>
- <value value="0x64" name="FMT6_16_16_16_16_SINT"/>
-
- <value value="0x65" name="FMT6_32_32_UNORM"/>
- <value value="0x66" name="FMT6_32_32_SNORM"/>
- <value value="0x67" name="FMT6_32_32_FLOAT"/>
- <value value="0x68" name="FMT6_32_32_UINT"/>
- <value value="0x69" name="FMT6_32_32_SINT"/>
- <value value="0x6a" name="FMT6_32_32_FIXED"/>
-
- <value value="0x70" name="FMT6_32_32_32_UNORM"/>
- <value value="0x71" name="FMT6_32_32_32_SNORM"/>
- <value value="0x72" name="FMT6_32_32_32_UINT"/>
- <value value="0x73" name="FMT6_32_32_32_SINT"/>
- <value value="0x74" name="FMT6_32_32_32_FLOAT"/>
- <value value="0x75" name="FMT6_32_32_32_FIXED"/>
-
- <value value="0x80" name="FMT6_32_32_32_32_UNORM"/>
- <value value="0x81" name="FMT6_32_32_32_32_SNORM"/>
- <value value="0x82" name="FMT6_32_32_32_32_FLOAT"/>
- <value value="0x83" name="FMT6_32_32_32_32_UINT"/>
- <value value="0x84" name="FMT6_32_32_32_32_SINT"/>
- <value value="0x85" name="FMT6_32_32_32_32_FIXED"/>
-
- <value value="0x8c" name="FMT6_G8R8B8R8_422_UNORM"/> <!-- UYVY -->
- <value value="0x8d" name="FMT6_R8G8R8B8_422_UNORM"/> <!-- YUYV -->
- <value value="0x8e" name="FMT6_R8_G8B8_2PLANE_420_UNORM"/> <!-- NV12 -->
- <value value="0x8f" name="FMT6_NV21"/>
- <value value="0x90" name="FMT6_R8_G8_B8_3PLANE_420_UNORM"/> <!-- YV12 -->
-
- <value value="0x91" name="FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8"/>
-
- <!-- Note: tiling/UBWC for these may be different from equivalent formats
- For example FMT6_NV12_Y is not compatible with FMT6_8_UNORM
- -->
- <value value="0x94" name="FMT6_NV12_Y"/>
- <value value="0x95" name="FMT6_NV12_UV"/>
- <value value="0x96" name="FMT6_NV12_VU"/>
- <value value="0x97" name="FMT6_NV12_4R"/>
- <value value="0x98" name="FMT6_NV12_4R_Y"/>
- <value value="0x99" name="FMT6_NV12_4R_UV"/>
- <value value="0x9a" name="FMT6_P010"/>
- <value value="0x9b" name="FMT6_P010_Y"/>
- <value value="0x9c" name="FMT6_P010_UV"/>
- <value value="0x9d" name="FMT6_TP10"/>
- <value value="0x9e" name="FMT6_TP10_Y"/>
- <value value="0x9f" name="FMT6_TP10_UV"/>
-
- <value value="0xa0" name="FMT6_Z24_UNORM_S8_UINT"/>
-
- <value value="0xab" name="FMT6_ETC2_RG11_UNORM"/>
- <value value="0xac" name="FMT6_ETC2_RG11_SNORM"/>
- <value value="0xad" name="FMT6_ETC2_R11_UNORM"/>
- <value value="0xae" name="FMT6_ETC2_R11_SNORM"/>
- <value value="0xaf" name="FMT6_ETC1"/>
- <value value="0xb0" name="FMT6_ETC2_RGB8"/>
- <value value="0xb1" name="FMT6_ETC2_RGBA8"/>
- <value value="0xb2" name="FMT6_ETC2_RGB8A1"/>
- <value value="0xb3" name="FMT6_DXT1"/>
- <value value="0xb4" name="FMT6_DXT3"/>
- <value value="0xb5" name="FMT6_DXT5"/>
- <value value="0xb7" name="FMT6_RGTC1_UNORM"/>
- <value value="0xb8" name="FMT6_RGTC1_SNORM"/>
- <value value="0xbb" name="FMT6_RGTC2_UNORM"/>
- <value value="0xbc" name="FMT6_RGTC2_SNORM"/>
- <value value="0xbe" name="FMT6_BPTC_UFLOAT"/>
- <value value="0xbf" name="FMT6_BPTC_FLOAT"/>
- <value value="0xc0" name="FMT6_BPTC"/>
- <value value="0xc1" name="FMT6_ASTC_4x4"/>
- <value value="0xc2" name="FMT6_ASTC_5x4"/>
- <value value="0xc3" name="FMT6_ASTC_5x5"/>
- <value value="0xc4" name="FMT6_ASTC_6x5"/>
- <value value="0xc5" name="FMT6_ASTC_6x6"/>
- <value value="0xc6" name="FMT6_ASTC_8x5"/>
- <value value="0xc7" name="FMT6_ASTC_8x6"/>
- <value value="0xc8" name="FMT6_ASTC_8x8"/>
- <value value="0xc9" name="FMT6_ASTC_10x5"/>
- <value value="0xca" name="FMT6_ASTC_10x6"/>
- <value value="0xcb" name="FMT6_ASTC_10x8"/>
- <value value="0xcc" name="FMT6_ASTC_10x10"/>
- <value value="0xcd" name="FMT6_ASTC_12x10"/>
- <value value="0xce" name="FMT6_ASTC_12x12"/>
-
- <!-- for sampling stencil (integer, 2nd channel), not available on a630 -->
- <value value="0xea" name="FMT6_Z24_UINT_S8_UINT"/>
-
- <!-- Not a hw enum, used internally in driver -->
- <value value="0xff" name="FMT6_NONE"/>
-
-</enum>
-
-<!-- probably same as a5xx -->
-<enum name="a6xx_polygon_mode">
- <value name="POLYMODE6_POINTS" value="1"/>
- <value name="POLYMODE6_LINES" value="2"/>
- <value name="POLYMODE6_TRIANGLES" value="3"/>
-</enum>
-
-<enum name="a6xx_depth_format">
- <value name="DEPTH6_NONE" value="0"/>
- <value name="DEPTH6_16" value="1"/>
- <value name="DEPTH6_24_8" value="2"/>
- <value name="DEPTH6_32" value="4"/>
-</enum>
-
-<bitset name="a6x_cp_protect" inline="yes">
- <bitfield name="BASE_ADDR" low="0" high="17"/>
- <bitfield name="MASK_LEN" low="18" high="30"/>
- <bitfield name="READ" pos="31" type="boolean"/>
-</bitset>
-
-<enum name="a6xx_shader_id">
- <value value="0x9" name="A6XX_TP0_TMO_DATA"/>
- <value value="0xa" name="A6XX_TP0_SMO_DATA"/>
- <value value="0xb" name="A6XX_TP0_MIPMAP_BASE_DATA"/>
- <value value="0x19" name="A6XX_TP1_TMO_DATA"/>
- <value value="0x1a" name="A6XX_TP1_SMO_DATA"/>
- <value value="0x1b" name="A6XX_TP1_MIPMAP_BASE_DATA"/>
- <value value="0x29" name="A6XX_SP_INST_DATA"/>
- <value value="0x2a" name="A6XX_SP_LB_0_DATA"/>
- <value value="0x2b" name="A6XX_SP_LB_1_DATA"/>
- <value value="0x2c" name="A6XX_SP_LB_2_DATA"/>
- <value value="0x2d" name="A6XX_SP_LB_3_DATA"/>
- <value value="0x2e" name="A6XX_SP_LB_4_DATA"/>
- <value value="0x2f" name="A6XX_SP_LB_5_DATA"/>
- <value value="0x30" name="A6XX_SP_CB_BINDLESS_DATA"/>
- <value value="0x31" name="A6XX_SP_CB_LEGACY_DATA"/>
- <value value="0x32" name="A6XX_SP_UAV_DATA"/>
- <value value="0x33" name="A6XX_SP_INST_TAG"/>
- <value value="0x34" name="A6XX_SP_CB_BINDLESS_TAG"/>
- <value value="0x35" name="A6XX_SP_TMO_UMO_TAG"/>
- <value value="0x36" name="A6XX_SP_SMO_TAG"/>
- <value value="0x37" name="A6XX_SP_STATE_DATA"/>
- <value value="0x49" name="A6XX_HLSQ_CHUNK_CVS_RAM"/>
- <value value="0x4a" name="A6XX_HLSQ_CHUNK_CPS_RAM"/>
- <value value="0x4b" name="A6XX_HLSQ_CHUNK_CVS_RAM_TAG"/>
- <value value="0x4c" name="A6XX_HLSQ_CHUNK_CPS_RAM_TAG"/>
- <value value="0x4d" name="A6XX_HLSQ_ICB_CVS_CB_BASE_TAG"/>
- <value value="0x4e" name="A6XX_HLSQ_ICB_CPS_CB_BASE_TAG"/>
- <value value="0x50" name="A6XX_HLSQ_CVS_MISC_RAM"/>
- <value value="0x51" name="A6XX_HLSQ_CPS_MISC_RAM"/>
- <value value="0x52" name="A6XX_HLSQ_INST_RAM"/>
- <value value="0x53" name="A6XX_HLSQ_GFX_CVS_CONST_RAM"/>
- <value value="0x54" name="A6XX_HLSQ_GFX_CPS_CONST_RAM"/>
- <value value="0x55" name="A6XX_HLSQ_CVS_MISC_RAM_TAG"/>
- <value value="0x56" name="A6XX_HLSQ_CPS_MISC_RAM_TAG"/>
- <value value="0x57" name="A6XX_HLSQ_INST_RAM_TAG"/>
- <value value="0x58" name="A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/>
- <value value="0x59" name="A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/>
- <value value="0x5a" name="A6XX_HLSQ_PWR_REST_RAM"/>
- <value value="0x5b" name="A6XX_HLSQ_PWR_REST_TAG"/>
- <value value="0x60" name="A6XX_HLSQ_DATAPATH_META"/>
- <value value="0x61" name="A6XX_HLSQ_FRONTEND_META"/>
- <value value="0x62" name="A6XX_HLSQ_INDIRECT_META"/>
- <value value="0x63" name="A6XX_HLSQ_BACKEND_META"/>
- <value value="0x70" name="A6XX_SP_LB_6_DATA"/>
- <value value="0x71" name="A6XX_SP_LB_7_DATA"/>
- <value value="0x73" name="A6XX_HLSQ_INST_RAM_1"/>
-</enum>
-
-<enum name="a7xx_statetype_id">
- <value value="0" name="A7XX_TP0_NCTX_REG"/>
- <value value="1" name="A7XX_TP0_CTX0_3D_CVS_REG"/>
- <value value="2" name="A7XX_TP0_CTX0_3D_CPS_REG"/>
- <value value="3" name="A7XX_TP0_CTX1_3D_CVS_REG"/>
- <value value="4" name="A7XX_TP0_CTX1_3D_CPS_REG"/>
- <value value="5" name="A7XX_TP0_CTX2_3D_CPS_REG"/>
- <value value="6" name="A7XX_TP0_CTX3_3D_CPS_REG"/>
- <value value="9" name="A7XX_TP0_TMO_DATA"/>
- <value value="10" name="A7XX_TP0_SMO_DATA"/>
- <value value="11" name="A7XX_TP0_MIPMAP_BASE_DATA"/>
- <value value="32" name="A7XX_SP_NCTX_REG"/>
- <value value="33" name="A7XX_SP_CTX0_3D_CVS_REG"/>
- <value value="34" name="A7XX_SP_CTX0_3D_CPS_REG"/>
- <value value="35" name="A7XX_SP_CTX1_3D_CVS_REG"/>
- <value value="36" name="A7XX_SP_CTX1_3D_CPS_REG"/>
- <value value="37" name="A7XX_SP_CTX2_3D_CPS_REG"/>
- <value value="38" name="A7XX_SP_CTX3_3D_CPS_REG"/>
- <value value="39" name="A7XX_SP_INST_DATA"/>
- <value value="40" name="A7XX_SP_INST_DATA_1"/>
- <value value="41" name="A7XX_SP_LB_0_DATA"/>
- <value value="42" name="A7XX_SP_LB_1_DATA"/>
- <value value="43" name="A7XX_SP_LB_2_DATA"/>
- <value value="44" name="A7XX_SP_LB_3_DATA"/>
- <value value="45" name="A7XX_SP_LB_4_DATA"/>
- <value value="46" name="A7XX_SP_LB_5_DATA"/>
- <value value="47" name="A7XX_SP_LB_6_DATA"/>
- <value value="48" name="A7XX_SP_LB_7_DATA"/>
- <value value="49" name="A7XX_SP_CB_RAM"/>
- <value value="50" name="A7XX_SP_LB_13_DATA"/>
- <value value="51" name="A7XX_SP_LB_14_DATA"/>
- <value value="52" name="A7XX_SP_INST_TAG"/>
- <value value="53" name="A7XX_SP_INST_DATA_2"/>
- <value value="54" name="A7XX_SP_TMO_TAG"/>
- <value value="55" name="A7XX_SP_SMO_TAG"/>
- <value value="56" name="A7XX_SP_STATE_DATA"/>
- <value value="57" name="A7XX_SP_HWAVE_RAM"/>
- <value value="58" name="A7XX_SP_L0_INST_BUF"/>
- <value value="59" name="A7XX_SP_LB_8_DATA"/>
- <value value="60" name="A7XX_SP_LB_9_DATA"/>
- <value value="61" name="A7XX_SP_LB_10_DATA"/>
- <value value="62" name="A7XX_SP_LB_11_DATA"/>
- <value value="63" name="A7XX_SP_LB_12_DATA"/>
- <value value="64" name="A7XX_HLSQ_DATAPATH_DSTR_META"/>
- <value value="67" name="A7XX_HLSQ_L2STC_TAG_RAM"/>
- <value value="68" name="A7XX_HLSQ_L2STC_INFO_CMD"/>
- <value value="69" name="A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG"/>
- <value value="70" name="A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG"/>
- <value value="71" name="A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM"/>
- <value value="72" name="A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM"/>
- <value value="73" name="A7XX_HLSQ_CHUNK_CVS_RAM"/>
- <value value="74" name="A7XX_HLSQ_CHUNK_CPS_RAM"/>
- <value value="75" name="A7XX_HLSQ_CHUNK_CVS_RAM_TAG"/>
- <value value="76" name="A7XX_HLSQ_CHUNK_CPS_RAM_TAG"/>
- <value value="77" name="A7XX_HLSQ_ICB_CVS_CB_BASE_TAG"/>
- <value value="78" name="A7XX_HLSQ_ICB_CPS_CB_BASE_TAG"/>
- <value value="79" name="A7XX_HLSQ_CVS_MISC_RAM"/>
- <value value="80" name="A7XX_HLSQ_CPS_MISC_RAM"/>
- <value value="81" name="A7XX_HLSQ_CPS_MISC_RAM_1"/>
- <value value="82" name="A7XX_HLSQ_INST_RAM"/>
- <value value="83" name="A7XX_HLSQ_GFX_CVS_CONST_RAM"/>
- <value value="84" name="A7XX_HLSQ_GFX_CPS_CONST_RAM"/>
- <value value="85" name="A7XX_HLSQ_CVS_MISC_RAM_TAG"/>
- <value value="86" name="A7XX_HLSQ_CPS_MISC_RAM_TAG"/>
- <value value="87" name="A7XX_HLSQ_INST_RAM_TAG"/>
- <value value="88" name="A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/>
- <value value="89" name="A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/>
- <value value="90" name="A7XX_HLSQ_GFX_LOCAL_MISC_RAM"/>
- <value value="91" name="A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG"/>
- <value value="92" name="A7XX_HLSQ_INST_RAM_1"/>
- <value value="93" name="A7XX_HLSQ_STPROC_META"/>
- <value value="94" name="A7XX_HLSQ_BV_BE_META"/>
- <value value="95" name="A7XX_HLSQ_INST_RAM_2"/>
- <value value="96" name="A7XX_HLSQ_DATAPATH_META"/>
- <value value="97" name="A7XX_HLSQ_FRONTEND_META"/>
- <value value="98" name="A7XX_HLSQ_INDIRECT_META"/>
- <value value="99" name="A7XX_HLSQ_BACKEND_META"/>
-</enum>
-
-<enum name="a6xx_debugbus_id">
- <value value="0x1" name="A6XX_DBGBUS_CP"/>
- <value value="0x2" name="A6XX_DBGBUS_RBBM"/>
- <value value="0x3" name="A6XX_DBGBUS_VBIF"/>
- <value value="0x4" name="A6XX_DBGBUS_HLSQ"/>
- <value value="0x5" name="A6XX_DBGBUS_UCHE"/>
- <value value="0x6" name="A6XX_DBGBUS_DPM"/>
- <value value="0x7" name="A6XX_DBGBUS_TESS"/>
- <value value="0x8" name="A6XX_DBGBUS_PC"/>
- <value value="0x9" name="A6XX_DBGBUS_VFDP"/>
- <value value="0xa" name="A6XX_DBGBUS_VPC"/>
- <value value="0xb" name="A6XX_DBGBUS_TSE"/>
- <value value="0xc" name="A6XX_DBGBUS_RAS"/>
- <value value="0xd" name="A6XX_DBGBUS_VSC"/>
- <value value="0xe" name="A6XX_DBGBUS_COM"/>
- <value value="0x10" name="A6XX_DBGBUS_LRZ"/>
- <value value="0x11" name="A6XX_DBGBUS_A2D"/>
- <value value="0x12" name="A6XX_DBGBUS_CCUFCHE"/>
- <value value="0x13" name="A6XX_DBGBUS_GMU_CX"/>
- <value value="0x14" name="A6XX_DBGBUS_RBP"/>
- <value value="0x15" name="A6XX_DBGBUS_DCS"/>
- <value value="0x16" name="A6XX_DBGBUS_DBGC"/>
- <value value="0x17" name="A6XX_DBGBUS_CX"/>
- <value value="0x18" name="A6XX_DBGBUS_GMU_GX"/>
- <value value="0x19" name="A6XX_DBGBUS_TPFCHE"/>
- <value value="0x1a" name="A6XX_DBGBUS_GBIF_GX"/>
- <value value="0x1d" name="A6XX_DBGBUS_GPC"/>
- <value value="0x1e" name="A6XX_DBGBUS_LARC"/>
- <value value="0x1f" name="A6XX_DBGBUS_HLSQ_SPTP"/>
- <value value="0x20" name="A6XX_DBGBUS_RB_0"/>
- <value value="0x21" name="A6XX_DBGBUS_RB_1"/>
- <value value="0x22" name="A6XX_DBGBUS_RB_2"/>
- <value value="0x24" name="A6XX_DBGBUS_UCHE_WRAPPER"/>
- <value value="0x28" name="A6XX_DBGBUS_CCU_0"/>
- <value value="0x29" name="A6XX_DBGBUS_CCU_1"/>
- <value value="0x2a" name="A6XX_DBGBUS_CCU_2"/>
- <value value="0x38" name="A6XX_DBGBUS_VFD_0"/>
- <value value="0x39" name="A6XX_DBGBUS_VFD_1"/>
- <value value="0x3a" name="A6XX_DBGBUS_VFD_2"/>
- <value value="0x3b" name="A6XX_DBGBUS_VFD_3"/>
- <value value="0x3c" name="A6XX_DBGBUS_VFD_4"/>
- <value value="0x3d" name="A6XX_DBGBUS_VFD_5"/>
- <value value="0x40" name="A6XX_DBGBUS_SP_0"/>
- <value value="0x41" name="A6XX_DBGBUS_SP_1"/>
- <value value="0x42" name="A6XX_DBGBUS_SP_2"/>
- <value value="0x48" name="A6XX_DBGBUS_TPL1_0"/>
- <value value="0x49" name="A6XX_DBGBUS_TPL1_1"/>
- <value value="0x4a" name="A6XX_DBGBUS_TPL1_2"/>
- <value value="0x4b" name="A6XX_DBGBUS_TPL1_3"/>
- <value value="0x4c" name="A6XX_DBGBUS_TPL1_4"/>
- <value value="0x4d" name="A6XX_DBGBUS_TPL1_5"/>
- <value value="0x58" name="A6XX_DBGBUS_SPTP_0"/>
- <value value="0x59" name="A6XX_DBGBUS_SPTP_1"/>
- <value value="0x5a" name="A6XX_DBGBUS_SPTP_2"/>
- <value value="0x5b" name="A6XX_DBGBUS_SPTP_3"/>
- <value value="0x5c" name="A6XX_DBGBUS_SPTP_4"/>
- <value value="0x5d" name="A6XX_DBGBUS_SPTP_5"/>
-</enum>
-
-<enum name="a7xx_state_location">
- <value value="0" name="A7XX_HLSQ_STATE"/>
- <value value="1" name="A7XX_HLSQ_DP"/>
- <value value="2" name="A7XX_SP_TOP"/>
- <value value="3" name="A7XX_USPTP"/>
- <value value="4" name="A7XX_HLSQ_DP_STR"/>
-</enum>
-
-<enum name="a7xx_pipe">
- <value value="0" name="A7XX_PIPE_NONE"/>
- <value value="1" name="A7XX_PIPE_BR"/>
- <value value="2" name="A7XX_PIPE_BV"/>
- <value value="3" name="A7XX_PIPE_LPAC"/>
-</enum>
-
-<enum name="a7xx_cluster">
- <value value="0" name="A7XX_CLUSTER_NONE"/>
- <value value="1" name="A7XX_CLUSTER_FE"/>
- <value value="2" name="A7XX_CLUSTER_SP_VS"/>
- <value value="3" name="A7XX_CLUSTER_PC_VS"/>
- <value value="4" name="A7XX_CLUSTER_GRAS"/>
- <value value="5" name="A7XX_CLUSTER_SP_PS"/>
- <value value="6" name="A7XX_CLUSTER_VPC_PS"/>
- <value value="7" name="A7XX_CLUSTER_PS"/>
-</enum>
-
-<enum name="a7xx_debugbus_id">
- <value value="1" name="A7XX_DBGBUS_CP_0_0"/>
- <value value="2" name="A7XX_DBGBUS_CP_0_1"/>
- <value value="3" name="A7XX_DBGBUS_RBBM"/>
- <value value="5" name="A7XX_DBGBUS_GBIF_GX"/>
- <value value="6" name="A7XX_DBGBUS_GBIF_CX"/>
- <value value="7" name="A7XX_DBGBUS_HLSQ"/>
- <value value="9" name="A7XX_DBGBUS_UCHE_0"/>
- <value value="10" name="A7XX_DBGBUS_UCHE_1"/>
- <value value="13" name="A7XX_DBGBUS_TESS_BR"/>
- <value value="14" name="A7XX_DBGBUS_TESS_BV"/>
- <value value="17" name="A7XX_DBGBUS_PC_BR"/>
- <value value="18" name="A7XX_DBGBUS_PC_BV"/>
- <value value="21" name="A7XX_DBGBUS_VFDP_BR"/>
- <value value="22" name="A7XX_DBGBUS_VFDP_BV"/>
- <value value="25" name="A7XX_DBGBUS_VPC_BR"/>
- <value value="26" name="A7XX_DBGBUS_VPC_BV"/>
- <value value="29" name="A7XX_DBGBUS_TSE_BR"/>
- <value value="30" name="A7XX_DBGBUS_TSE_BV"/>
- <value value="33" name="A7XX_DBGBUS_RAS_BR"/>
- <value value="34" name="A7XX_DBGBUS_RAS_BV"/>
- <value value="37" name="A7XX_DBGBUS_VSC"/>
- <value value="39" name="A7XX_DBGBUS_COM_0"/>
- <value value="43" name="A7XX_DBGBUS_LRZ_BR"/>
- <value value="44" name="A7XX_DBGBUS_LRZ_BV"/>
- <value value="47" name="A7XX_DBGBUS_UFC_0"/>
- <value value="48" name="A7XX_DBGBUS_UFC_1"/>
- <value value="55" name="A7XX_DBGBUS_GMU_GX"/>
- <value value="59" name="A7XX_DBGBUS_DBGC"/>
- <value value="60" name="A7XX_DBGBUS_CX"/>
- <value value="61" name="A7XX_DBGBUS_GMU_CX"/>
- <value value="62" name="A7XX_DBGBUS_GPC_BR"/>
- <value value="63" name="A7XX_DBGBUS_GPC_BV"/>
- <value value="66" name="A7XX_DBGBUS_LARC"/>
- <value value="68" name="A7XX_DBGBUS_HLSQ_SPTP"/>
- <value value="70" name="A7XX_DBGBUS_RB_0"/>
- <value value="71" name="A7XX_DBGBUS_RB_1"/>
- <value value="72" name="A7XX_DBGBUS_RB_2"/>
- <value value="73" name="A7XX_DBGBUS_RB_3"/>
- <value value="74" name="A7XX_DBGBUS_RB_4"/>
- <value value="75" name="A7XX_DBGBUS_RB_5"/>
- <value value="102" name="A7XX_DBGBUS_UCHE_WRAPPER"/>
- <value value="106" name="A7XX_DBGBUS_CCU_0"/>
- <value value="107" name="A7XX_DBGBUS_CCU_1"/>
- <value value="108" name="A7XX_DBGBUS_CCU_2"/>
- <value value="109" name="A7XX_DBGBUS_CCU_3"/>
- <value value="110" name="A7XX_DBGBUS_CCU_4"/>
- <value value="111" name="A7XX_DBGBUS_CCU_5"/>
- <value value="138" name="A7XX_DBGBUS_VFD_BR_0"/>
- <value value="139" name="A7XX_DBGBUS_VFD_BR_1"/>
- <value value="140" name="A7XX_DBGBUS_VFD_BR_2"/>
- <value value="141" name="A7XX_DBGBUS_VFD_BR_3"/>
- <value value="142" name="A7XX_DBGBUS_VFD_BR_4"/>
- <value value="143" name="A7XX_DBGBUS_VFD_BR_5"/>
- <value value="144" name="A7XX_DBGBUS_VFD_BR_6"/>
- <value value="145" name="A7XX_DBGBUS_VFD_BR_7"/>
- <value value="202" name="A7XX_DBGBUS_VFD_BV_0"/>
- <value value="203" name="A7XX_DBGBUS_VFD_BV_1"/>
- <value value="204" name="A7XX_DBGBUS_VFD_BV_2"/>
- <value value="205" name="A7XX_DBGBUS_VFD_BV_3"/>
- <value value="234" name="A7XX_DBGBUS_USP_0"/>
- <value value="235" name="A7XX_DBGBUS_USP_1"/>
- <value value="236" name="A7XX_DBGBUS_USP_2"/>
- <value value="237" name="A7XX_DBGBUS_USP_3"/>
- <value value="238" name="A7XX_DBGBUS_USP_4"/>
- <value value="239" name="A7XX_DBGBUS_USP_5"/>
- <value value="266" name="A7XX_DBGBUS_TP_0"/>
- <value value="267" name="A7XX_DBGBUS_TP_1"/>
- <value value="268" name="A7XX_DBGBUS_TP_2"/>
- <value value="269" name="A7XX_DBGBUS_TP_3"/>
- <value value="270" name="A7XX_DBGBUS_TP_4"/>
- <value value="271" name="A7XX_DBGBUS_TP_5"/>
- <value value="272" name="A7XX_DBGBUS_TP_6"/>
- <value value="273" name="A7XX_DBGBUS_TP_7"/>
- <value value="274" name="A7XX_DBGBUS_TP_8"/>
- <value value="275" name="A7XX_DBGBUS_TP_9"/>
- <value value="276" name="A7XX_DBGBUS_TP_10"/>
- <value value="277" name="A7XX_DBGBUS_TP_11"/>
- <value value="330" name="A7XX_DBGBUS_USPTP_0"/>
- <value value="331" name="A7XX_DBGBUS_USPTP_1"/>
- <value value="332" name="A7XX_DBGBUS_USPTP_2"/>
- <value value="333" name="A7XX_DBGBUS_USPTP_3"/>
- <value value="334" name="A7XX_DBGBUS_USPTP_4"/>
- <value value="335" name="A7XX_DBGBUS_USPTP_5"/>
- <value value="336" name="A7XX_DBGBUS_USPTP_6"/>
- <value value="337" name="A7XX_DBGBUS_USPTP_7"/>
- <value value="338" name="A7XX_DBGBUS_USPTP_8"/>
- <value value="339" name="A7XX_DBGBUS_USPTP_9"/>
- <value value="340" name="A7XX_DBGBUS_USPTP_10"/>
- <value value="341" name="A7XX_DBGBUS_USPTP_11"/>
- <value value="396" name="A7XX_DBGBUS_CCHE_0"/>
- <value value="397" name="A7XX_DBGBUS_CCHE_1"/>
- <value value="398" name="A7XX_DBGBUS_CCHE_2"/>
- <value value="408" name="A7XX_DBGBUS_VPC_DSTR_0"/>
- <value value="409" name="A7XX_DBGBUS_VPC_DSTR_1"/>
- <value value="410" name="A7XX_DBGBUS_VPC_DSTR_2"/>
- <value value="411" name="A7XX_DBGBUS_HLSQ_DP_STR_0"/>
- <value value="412" name="A7XX_DBGBUS_HLSQ_DP_STR_1"/>
- <value value="413" name="A7XX_DBGBUS_HLSQ_DP_STR_2"/>
- <value value="414" name="A7XX_DBGBUS_HLSQ_DP_STR_3"/>
- <value value="415" name="A7XX_DBGBUS_HLSQ_DP_STR_4"/>
- <value value="416" name="A7XX_DBGBUS_HLSQ_DP_STR_5"/>
- <value value="443" name="A7XX_DBGBUS_UFC_DSTR_0"/>
- <value value="444" name="A7XX_DBGBUS_UFC_DSTR_1"/>
- <value value="445" name="A7XX_DBGBUS_UFC_DSTR_2"/>
- <value value="446" name="A7XX_DBGBUS_CGC_SUBCORE"/>
- <value value="447" name="A7XX_DBGBUS_CGC_CORE"/>
-</enum>
-
-<enum name="a6xx_cp_perfcounter_select">
- <value value="0" name="PERF_CP_ALWAYS_COUNT"/>
- <value value="1" name="PERF_CP_BUSY_GFX_CORE_IDLE"/>
- <value value="2" name="PERF_CP_BUSY_CYCLES"/>
- <value value="3" name="PERF_CP_NUM_PREEMPTIONS"/>
- <value value="4" name="PERF_CP_PREEMPTION_REACTION_DELAY"/>
- <value value="5" name="PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
- <value value="6" name="PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
- <value value="7" name="PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
- <value value="8" name="PERF_CP_PREDICATED_DRAWS_KILLED"/>
- <value value="9" name="PERF_CP_MODE_SWITCH"/>
- <value value="10" name="PERF_CP_ZPASS_DONE"/>
- <value value="11" name="PERF_CP_CONTEXT_DONE"/>
- <value value="12" name="PERF_CP_CACHE_FLUSH"/>
- <value value="13" name="PERF_CP_LONG_PREEMPTIONS"/>
- <value value="14" name="PERF_CP_SQE_I_CACHE_STARVE"/>
- <value value="15" name="PERF_CP_SQE_IDLE"/>
- <value value="16" name="PERF_CP_SQE_PM4_STARVE_RB_IB"/>
- <value value="17" name="PERF_CP_SQE_PM4_STARVE_SDS"/>
- <value value="18" name="PERF_CP_SQE_MRB_STARVE"/>
- <value value="19" name="PERF_CP_SQE_RRB_STARVE"/>
- <value value="20" name="PERF_CP_SQE_VSD_STARVE"/>
- <value value="21" name="PERF_CP_VSD_DECODE_STARVE"/>
- <value value="22" name="PERF_CP_SQE_PIPE_OUT_STALL"/>
- <value value="23" name="PERF_CP_SQE_SYNC_STALL"/>
- <value value="24" name="PERF_CP_SQE_PM4_WFI_STALL"/>
- <value value="25" name="PERF_CP_SQE_SYS_WFI_STALL"/>
- <value value="26" name="PERF_CP_SQE_T4_EXEC"/>
- <value value="27" name="PERF_CP_SQE_LOAD_STATE_EXEC"/>
- <value value="28" name="PERF_CP_SQE_SAVE_SDS_STATE"/>
- <value value="29" name="PERF_CP_SQE_DRAW_EXEC"/>
- <value value="30" name="PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
- <value value="31" name="PERF_CP_SQE_EXEC_PROFILED"/>
- <value value="32" name="PERF_CP_MEMORY_POOL_EMPTY"/>
- <value value="33" name="PERF_CP_MEMORY_POOL_SYNC_STALL"/>
- <value value="34" name="PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
- <value value="35" name="PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
- <value value="36" name="PERF_CP_AHB_STALL_SQE_GMU"/>
- <value value="37" name="PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
- <value value="38" name="PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
- <value value="39" name="PERF_CP_CLUSTER0_EMPTY"/>
- <value value="40" name="PERF_CP_CLUSTER1_EMPTY"/>
- <value value="41" name="PERF_CP_CLUSTER2_EMPTY"/>
- <value value="42" name="PERF_CP_CLUSTER3_EMPTY"/>
- <value value="43" name="PERF_CP_CLUSTER4_EMPTY"/>
- <value value="44" name="PERF_CP_CLUSTER5_EMPTY"/>
- <value value="45" name="PERF_CP_PM4_DATA"/>
- <value value="46" name="PERF_CP_PM4_HEADERS"/>
- <value value="47" name="PERF_CP_VBIF_READ_BEATS"/>
- <value value="48" name="PERF_CP_VBIF_WRITE_BEATS"/>
- <value value="49" name="PERF_CP_SQE_INSTR_COUNTER"/>
-</enum>
-
-<enum name="a6xx_rbbm_perfcounter_select">
- <value value="0" name="PERF_RBBM_ALWAYS_COUNT"/>
- <value value="1" name="PERF_RBBM_ALWAYS_ON"/>
- <value value="2" name="PERF_RBBM_TSE_BUSY"/>
- <value value="3" name="PERF_RBBM_RAS_BUSY"/>
- <value value="4" name="PERF_RBBM_PC_DCALL_BUSY"/>
- <value value="5" name="PERF_RBBM_PC_VSD_BUSY"/>
- <value value="6" name="PERF_RBBM_STATUS_MASKED"/>
- <value value="7" name="PERF_RBBM_COM_BUSY"/>
- <value value="8" name="PERF_RBBM_DCOM_BUSY"/>
- <value value="9" name="PERF_RBBM_VBIF_BUSY"/>
- <value value="10" name="PERF_RBBM_VSC_BUSY"/>
- <value value="11" name="PERF_RBBM_TESS_BUSY"/>
- <value value="12" name="PERF_RBBM_UCHE_BUSY"/>
- <value value="13" name="PERF_RBBM_HLSQ_BUSY"/>
-</enum>
-
-<enum name="a6xx_pc_perfcounter_select">
- <value value="0" name="PERF_PC_BUSY_CYCLES"/>
- <value value="1" name="PERF_PC_WORKING_CYCLES"/>
- <value value="2" name="PERF_PC_STALL_CYCLES_VFD"/>
- <value value="3" name="PERF_PC_STALL_CYCLES_TSE"/>
- <value value="4" name="PERF_PC_STALL_CYCLES_VPC"/>
- <value value="5" name="PERF_PC_STALL_CYCLES_UCHE"/>
- <value value="6" name="PERF_PC_STALL_CYCLES_TESS"/>
- <value value="7" name="PERF_PC_STALL_CYCLES_TSE_ONLY"/>
- <value value="8" name="PERF_PC_STALL_CYCLES_VPC_ONLY"/>
- <value value="9" name="PERF_PC_PASS1_TF_STALL_CYCLES"/>
- <value value="10" name="PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
- <value value="11" name="PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
- <value value="12" name="PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
- <value value="13" name="PERF_PC_STARVE_CYCLES_FOR_POSITION"/>
- <value value="14" name="PERF_PC_STARVE_CYCLES_DI"/>
- <value value="15" name="PERF_PC_VIS_STREAMS_LOADED"/>
- <value value="16" name="PERF_PC_INSTANCES"/>
- <value value="17" name="PERF_PC_VPC_PRIMITIVES"/>
- <value value="18" name="PERF_PC_DEAD_PRIM"/>
- <value value="19" name="PERF_PC_LIVE_PRIM"/>
- <value value="20" name="PERF_PC_VERTEX_HITS"/>
- <value value="21" name="PERF_PC_IA_VERTICES"/>
- <value value="22" name="PERF_PC_IA_PRIMITIVES"/>
- <value value="23" name="PERF_PC_GS_PRIMITIVES"/>
- <value value="24" name="PERF_PC_HS_INVOCATIONS"/>
- <value value="25" name="PERF_PC_DS_INVOCATIONS"/>
- <value value="26" name="PERF_PC_VS_INVOCATIONS"/>
- <value value="27" name="PERF_PC_GS_INVOCATIONS"/>
- <value value="28" name="PERF_PC_DS_PRIMITIVES"/>
- <value value="29" name="PERF_PC_VPC_POS_DATA_TRANSACTION"/>
- <value value="30" name="PERF_PC_3D_DRAWCALLS"/>
- <value value="31" name="PERF_PC_2D_DRAWCALLS"/>
- <value value="32" name="PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
- <value value="33" name="PERF_TESS_BUSY_CYCLES"/>
- <value value="34" name="PERF_TESS_WORKING_CYCLES"/>
- <value value="35" name="PERF_TESS_STALL_CYCLES_PC"/>
- <value value="36" name="PERF_TESS_STARVE_CYCLES_PC"/>
- <value value="37" name="PERF_PC_TSE_TRANSACTION"/>
- <value value="38" name="PERF_PC_TSE_VERTEX"/>
- <value value="39" name="PERF_PC_TESS_PC_UV_TRANS"/>
- <value value="40" name="PERF_PC_TESS_PC_UV_PATCHES"/>
- <value value="41" name="PERF_PC_TESS_FACTOR_TRANS"/>
-</enum>
-
-<enum name="a6xx_vfd_perfcounter_select">
- <value value="0" name="PERF_VFD_BUSY_CYCLES"/>
- <value value="1" name="PERF_VFD_STALL_CYCLES_UCHE"/>
- <value value="2" name="PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
- <value value="3" name="PERF_VFD_STALL_CYCLES_SP_INFO"/>
- <value value="4" name="PERF_VFD_STALL_CYCLES_SP_ATTR"/>
- <value value="5" name="PERF_VFD_STARVE_CYCLES_UCHE"/>
- <value value="6" name="PERF_VFD_RBUFFER_FULL"/>
- <value value="7" name="PERF_VFD_ATTR_INFO_FIFO_FULL"/>
- <value value="8" name="PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
- <value value="9" name="PERF_VFD_NUM_ATTRIBUTES"/>
- <value value="10" name="PERF_VFD_UPPER_SHADER_FIBERS"/>
- <value value="11" name="PERF_VFD_LOWER_SHADER_FIBERS"/>
- <value value="12" name="PERF_VFD_MODE_0_FIBERS"/>
- <value value="13" name="PERF_VFD_MODE_1_FIBERS"/>
- <value value="14" name="PERF_VFD_MODE_2_FIBERS"/>
- <value value="15" name="PERF_VFD_MODE_3_FIBERS"/>
- <value value="16" name="PERF_VFD_MODE_4_FIBERS"/>
- <value value="17" name="PERF_VFD_TOTAL_VERTICES"/>
- <value value="18" name="PERF_VFDP_STALL_CYCLES_VFD"/>
- <value value="19" name="PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
- <value value="20" name="PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
- <value value="21" name="PERF_VFDP_STARVE_CYCLES_PC"/>
- <value value="22" name="PERF_VFDP_VS_STAGE_WAVES"/>
-</enum>
-
-<enum name="a6xx_hlsq_perfcounter_select">
- <value value="0" name="PERF_HLSQ_BUSY_CYCLES"/>
- <value value="1" name="PERF_HLSQ_STALL_CYCLES_UCHE"/>
- <value value="2" name="PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
- <value value="3" name="PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
- <value value="4" name="PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
- <value value="5" name="PERF_HLSQ_UCHE_LATENCY_COUNT"/>
- <value value="6" name="PERF_HLSQ_FS_STAGE_1X_WAVES"/>
- <value value="7" name="PERF_HLSQ_FS_STAGE_2X_WAVES"/>
- <value value="8" name="PERF_HLSQ_QUADS"/>
- <value value="9" name="PERF_HLSQ_CS_INVOCATIONS"/>
- <value value="10" name="PERF_HLSQ_COMPUTE_DRAWCALLS"/>
- <value value="11" name="PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
- <value value="12" name="PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
- <value value="13" name="PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
- <value value="14" name="PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
- <value value="15" name="PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
- <value value="16" name="PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
- <value value="17" name="PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
- <value value="18" name="PERF_HLSQ_STALL_CYCLES_VPC"/>
- <value value="19" name="PERF_HLSQ_PIXELS"/>
- <value value="20" name="PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
-</enum>
-
-<enum name="a6xx_vpc_perfcounter_select">
- <value value="0" name="PERF_VPC_BUSY_CYCLES"/>
- <value value="1" name="PERF_VPC_WORKING_CYCLES"/>
- <value value="2" name="PERF_VPC_STALL_CYCLES_UCHE"/>
- <value value="3" name="PERF_VPC_STALL_CYCLES_VFD_WACK"/>
- <value value="4" name="PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
- <value value="5" name="PERF_VPC_STALL_CYCLES_PC"/>
- <value value="6" name="PERF_VPC_STALL_CYCLES_SP_LM"/>
- <value value="7" name="PERF_VPC_STARVE_CYCLES_SP"/>
- <value value="8" name="PERF_VPC_STARVE_CYCLES_LRZ"/>
- <value value="9" name="PERF_VPC_PC_PRIMITIVES"/>
- <value value="10" name="PERF_VPC_SP_COMPONENTS"/>
- <value value="11" name="PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
- <value value="12" name="PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
- <value value="13" name="PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
- <value value="14" name="PERF_VPC_LM_TRANSACTION"/>
- <value value="15" name="PERF_VPC_STREAMOUT_TRANSACTION"/>
- <value value="16" name="PERF_VPC_VS_BUSY_CYCLES"/>
- <value value="17" name="PERF_VPC_PS_BUSY_CYCLES"/>
- <value value="18" name="PERF_VPC_VS_WORKING_CYCLES"/>
- <value value="19" name="PERF_VPC_PS_WORKING_CYCLES"/>
- <value value="20" name="PERF_VPC_STARVE_CYCLES_RB"/>
- <value value="21" name="PERF_VPC_NUM_VPCRAM_READ_POS"/>
- <value value="22" name="PERF_VPC_WIT_FULL_CYCLES"/>
- <value value="23" name="PERF_VPC_VPCRAM_FULL_CYCLES"/>
- <value value="24" name="PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
- <value value="25" name="PERF_VPC_NUM_VPCRAM_WRITE"/>
- <value value="26" name="PERF_VPC_NUM_VPCRAM_READ_SO"/>
- <value value="27" name="PERF_VPC_NUM_ATTR_REQ_LM"/>
-</enum>
-
-<enum name="a6xx_tse_perfcounter_select">
- <value value="0" name="PERF_TSE_BUSY_CYCLES"/>
- <value value="1" name="PERF_TSE_CLIPPING_CYCLES"/>
- <value value="2" name="PERF_TSE_STALL_CYCLES_RAS"/>
- <value value="3" name="PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
- <value value="4" name="PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
- <value value="5" name="PERF_TSE_STARVE_CYCLES_PC"/>
- <value value="6" name="PERF_TSE_INPUT_PRIM"/>
- <value value="7" name="PERF_TSE_INPUT_NULL_PRIM"/>
- <value value="8" name="PERF_TSE_TRIVAL_REJ_PRIM"/>
- <value value="9" name="PERF_TSE_CLIPPED_PRIM"/>
- <value value="10" name="PERF_TSE_ZERO_AREA_PRIM"/>
- <value value="11" name="PERF_TSE_FACENESS_CULLED_PRIM"/>
- <value value="12" name="PERF_TSE_ZERO_PIXEL_PRIM"/>
- <value value="13" name="PERF_TSE_OUTPUT_NULL_PRIM"/>
- <value value="14" name="PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
- <value value="15" name="PERF_TSE_CINVOCATION"/>
- <value value="16" name="PERF_TSE_CPRIMITIVES"/>
- <value value="17" name="PERF_TSE_2D_INPUT_PRIM"/>
- <value value="18" name="PERF_TSE_2D_ALIVE_CYCLES"/>
- <value value="19" name="PERF_TSE_CLIP_PLANES"/>
-</enum>
-
-<enum name="a6xx_ras_perfcounter_select">
- <value value="0" name="PERF_RAS_BUSY_CYCLES"/>
- <value value="1" name="PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
- <value value="2" name="PERF_RAS_STALL_CYCLES_LRZ"/>
- <value value="3" name="PERF_RAS_STARVE_CYCLES_TSE"/>
- <value value="4" name="PERF_RAS_SUPER_TILES"/>
- <value value="5" name="PERF_RAS_8X4_TILES"/>
- <value value="6" name="PERF_RAS_MASKGEN_ACTIVE"/>
- <value value="7" name="PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
- <value value="8" name="PERF_RAS_FULLY_COVERED_8X4_TILES"/>
- <value value="9" name="PERF_RAS_PRIM_KILLED_INVISILBE"/>
- <value value="10" name="PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
- <value value="11" name="PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
- <value value="12" name="PERF_RAS_BLOCKS"/>
-</enum>
-
-<enum name="a6xx_uche_perfcounter_select">
- <value value="0" name="PERF_UCHE_BUSY_CYCLES"/>
- <value value="1" name="PERF_UCHE_STALL_CYCLES_ARBITER"/>
- <value value="2" name="PERF_UCHE_VBIF_LATENCY_CYCLES"/>
- <value value="3" name="PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
- <value value="4" name="PERF_UCHE_VBIF_READ_BEATS_TP"/>
- <value value="5" name="PERF_UCHE_VBIF_READ_BEATS_VFD"/>
- <value value="6" name="PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
- <value value="7" name="PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
- <value value="8" name="PERF_UCHE_VBIF_READ_BEATS_SP"/>
- <value value="9" name="PERF_UCHE_READ_REQUESTS_TP"/>
- <value value="10" name="PERF_UCHE_READ_REQUESTS_VFD"/>
- <value value="11" name="PERF_UCHE_READ_REQUESTS_HLSQ"/>
- <value value="12" name="PERF_UCHE_READ_REQUESTS_LRZ"/>
- <value value="13" name="PERF_UCHE_READ_REQUESTS_SP"/>
- <value value="14" name="PERF_UCHE_WRITE_REQUESTS_LRZ"/>
- <value value="15" name="PERF_UCHE_WRITE_REQUESTS_SP"/>
- <value value="16" name="PERF_UCHE_WRITE_REQUESTS_VPC"/>
- <value value="17" name="PERF_UCHE_WRITE_REQUESTS_VSC"/>
- <value value="18" name="PERF_UCHE_EVICTS"/>
- <value value="19" name="PERF_UCHE_BANK_REQ0"/>
- <value value="20" name="PERF_UCHE_BANK_REQ1"/>
- <value value="21" name="PERF_UCHE_BANK_REQ2"/>
- <value value="22" name="PERF_UCHE_BANK_REQ3"/>
- <value value="23" name="PERF_UCHE_BANK_REQ4"/>
- <value value="24" name="PERF_UCHE_BANK_REQ5"/>
- <value value="25" name="PERF_UCHE_BANK_REQ6"/>
- <value value="26" name="PERF_UCHE_BANK_REQ7"/>
- <value value="27" name="PERF_UCHE_VBIF_READ_BEATS_CH0"/>
- <value value="28" name="PERF_UCHE_VBIF_READ_BEATS_CH1"/>
- <value value="29" name="PERF_UCHE_GMEM_READ_BEATS"/>
- <value value="30" name="PERF_UCHE_TPH_REF_FULL"/>
- <value value="31" name="PERF_UCHE_TPH_VICTIM_FULL"/>
- <value value="32" name="PERF_UCHE_TPH_EXT_FULL"/>
- <value value="33" name="PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
- <value value="34" name="PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
- <value value="35" name="PERF_UCHE_DCMP_LATENCY_CYCLES"/>
- <value value="36" name="PERF_UCHE_VBIF_READ_BEATS_PC"/>
- <value value="37" name="PERF_UCHE_READ_REQUESTS_PC"/>
- <value value="38" name="PERF_UCHE_RAM_READ_REQ"/>
- <value value="39" name="PERF_UCHE_RAM_WRITE_REQ"/>
-</enum>
-
-<enum name="a6xx_tp_perfcounter_select">
- <value value="0" name="PERF_TP_BUSY_CYCLES"/>
- <value value="1" name="PERF_TP_STALL_CYCLES_UCHE"/>
- <value value="2" name="PERF_TP_LATENCY_CYCLES"/>
- <value value="3" name="PERF_TP_LATENCY_TRANS"/>
- <value value="4" name="PERF_TP_FLAG_CACHE_REQUEST_SAMPLES"/>
- <value value="5" name="PERF_TP_FLAG_CACHE_REQUEST_LATENCY"/>
- <value value="6" name="PERF_TP_L1_CACHELINE_REQUESTS"/>
- <value value="7" name="PERF_TP_L1_CACHELINE_MISSES"/>
- <value value="8" name="PERF_TP_SP_TP_TRANS"/>
- <value value="9" name="PERF_TP_TP_SP_TRANS"/>
- <value value="10" name="PERF_TP_OUTPUT_PIXELS"/>
- <value value="11" name="PERF_TP_FILTER_WORKLOAD_16BIT"/>
- <value value="12" name="PERF_TP_FILTER_WORKLOAD_32BIT"/>
- <value value="13" name="PERF_TP_QUADS_RECEIVED"/>
- <value value="14" name="PERF_TP_QUADS_OFFSET"/>
- <value value="15" name="PERF_TP_QUADS_SHADOW"/>
- <value value="16" name="PERF_TP_QUADS_ARRAY"/>
- <value value="17" name="PERF_TP_QUADS_GRADIENT"/>
- <value value="18" name="PERF_TP_QUADS_1D"/>
- <value value="19" name="PERF_TP_QUADS_2D"/>
- <value value="20" name="PERF_TP_QUADS_BUFFER"/>
- <value value="21" name="PERF_TP_QUADS_3D"/>
- <value value="22" name="PERF_TP_QUADS_CUBE"/>
- <value value="23" name="PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
- <value value="24" name="PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
- <value value="25" name="PERF_TP_OUTPUT_PIXELS_POINT"/>
- <value value="26" name="PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
- <value value="27" name="PERF_TP_OUTPUT_PIXELS_MIP"/>
- <value value="28" name="PERF_TP_OUTPUT_PIXELS_ANISO"/>
- <value value="29" name="PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
- <value value="30" name="PERF_TP_FLAG_CACHE_REQUESTS"/>
- <value value="31" name="PERF_TP_FLAG_CACHE_MISSES"/>
- <value value="32" name="PERF_TP_L1_5_L2_REQUESTS"/>
- <value value="33" name="PERF_TP_2D_OUTPUT_PIXELS"/>
- <value value="34" name="PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
- <value value="35" name="PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
- <value value="36" name="PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
- <value value="37" name="PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
- <value value="38" name="PERF_TP_TPA2TPC_TRANS"/>
- <value value="39" name="PERF_TP_L1_MISSES_ASTC_1TILE"/>
- <value value="40" name="PERF_TP_L1_MISSES_ASTC_2TILE"/>
- <value value="41" name="PERF_TP_L1_MISSES_ASTC_4TILE"/>
- <value value="42" name="PERF_TP_L1_5_L2_COMPRESS_REQS"/>
- <value value="43" name="PERF_TP_L1_5_L2_COMPRESS_MISS"/>
- <value value="44" name="PERF_TP_L1_BANK_CONFLICT"/>
- <value value="45" name="PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
- <value value="46" name="PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
- <value value="47" name="PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
- <value value="48" name="PERF_TP_FRONTEND_WORKING_CYCLES"/>
- <value value="49" name="PERF_TP_L1_TAG_WORKING_CYCLES"/>
- <value value="50" name="PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
- <value value="51" name="PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
- <value value="52" name="PERF_TP_BACKEND_WORKING_CYCLES"/>
- <value value="53" name="PERF_TP_FLAG_CACHE_WORKING_CYCLES"/>
- <value value="54" name="PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
- <value value="55" name="PERF_TP_STARVE_CYCLES_SP"/>
- <value value="56" name="PERF_TP_STARVE_CYCLES_UCHE"/>
-</enum>
-
-<enum name="a6xx_sp_perfcounter_select">
- <value value="0" name="PERF_SP_BUSY_CYCLES"/>
- <value value="1" name="PERF_SP_ALU_WORKING_CYCLES"/>
- <value value="2" name="PERF_SP_EFU_WORKING_CYCLES"/>
- <value value="3" name="PERF_SP_STALL_CYCLES_VPC"/>
- <value value="4" name="PERF_SP_STALL_CYCLES_TP"/>
- <value value="5" name="PERF_SP_STALL_CYCLES_UCHE"/>
- <value value="6" name="PERF_SP_STALL_CYCLES_RB"/>
- <value value="7" name="PERF_SP_NON_EXECUTION_CYCLES"/>
- <value value="8" name="PERF_SP_WAVE_CONTEXTS"/>
- <value value="9" name="PERF_SP_WAVE_CONTEXT_CYCLES"/>
- <value value="10" name="PERF_SP_FS_STAGE_WAVE_CYCLES"/>
- <value value="11" name="PERF_SP_FS_STAGE_WAVE_SAMPLES"/>
- <value value="12" name="PERF_SP_VS_STAGE_WAVE_CYCLES"/>
- <value value="13" name="PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
- <value value="14" name="PERF_SP_FS_STAGE_DURATION_CYCLES"/>
- <value value="15" name="PERF_SP_VS_STAGE_DURATION_CYCLES"/>
- <value value="16" name="PERF_SP_WAVE_CTRL_CYCLES"/>
- <value value="17" name="PERF_SP_WAVE_LOAD_CYCLES"/>
- <value value="18" name="PERF_SP_WAVE_EMIT_CYCLES"/>
- <value value="19" name="PERF_SP_WAVE_NOP_CYCLES"/>
- <value value="20" name="PERF_SP_WAVE_WAIT_CYCLES"/>
- <value value="21" name="PERF_SP_WAVE_FETCH_CYCLES"/>
- <value value="22" name="PERF_SP_WAVE_IDLE_CYCLES"/>
- <value value="23" name="PERF_SP_WAVE_END_CYCLES"/>
- <value value="24" name="PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
- <value value="25" name="PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
- <value value="26" name="PERF_SP_WAVE_JOIN_CYCLES"/>
- <value value="27" name="PERF_SP_LM_LOAD_INSTRUCTIONS"/>
- <value value="28" name="PERF_SP_LM_STORE_INSTRUCTIONS"/>
- <value value="29" name="PERF_SP_LM_ATOMICS"/>
- <value value="30" name="PERF_SP_GM_LOAD_INSTRUCTIONS"/>
- <value value="31" name="PERF_SP_GM_STORE_INSTRUCTIONS"/>
- <value value="32" name="PERF_SP_GM_ATOMICS"/>
- <value value="33" name="PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
- <value value="34" name="PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
- <value value="35" name="PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
- <value value="36" name="PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
- <value value="37" name="PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
- <value value="38" name="PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
- <value value="39" name="PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
- <value value="40" name="PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
- <value value="41" name="PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
- <value value="42" name="PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
- <value value="43" name="PERF_SP_VS_INSTRUCTIONS"/>
- <value value="44" name="PERF_SP_FS_INSTRUCTIONS"/>
- <value value="45" name="PERF_SP_ADDR_LOCK_COUNT"/>
- <value value="46" name="PERF_SP_UCHE_READ_TRANS"/>
- <value value="47" name="PERF_SP_UCHE_WRITE_TRANS"/>
- <value value="48" name="PERF_SP_EXPORT_VPC_TRANS"/>
- <value value="49" name="PERF_SP_EXPORT_RB_TRANS"/>
- <value value="50" name="PERF_SP_PIXELS_KILLED"/>
- <value value="51" name="PERF_SP_ICL1_REQUESTS"/>
- <value value="52" name="PERF_SP_ICL1_MISSES"/>
- <value value="53" name="PERF_SP_HS_INSTRUCTIONS"/>
- <value value="54" name="PERF_SP_DS_INSTRUCTIONS"/>
- <value value="55" name="PERF_SP_GS_INSTRUCTIONS"/>
- <value value="56" name="PERF_SP_CS_INSTRUCTIONS"/>
- <value value="57" name="PERF_SP_GPR_READ"/>
- <value value="58" name="PERF_SP_GPR_WRITE"/>
- <value value="59" name="PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
- <value value="60" name="PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
- <value value="61" name="PERF_SP_LM_BANK_CONFLICTS"/>
- <value value="62" name="PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
- <value value="63" name="PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
- <value value="64" name="PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
- <value value="65" name="PERF_SP_LM_WORKING_CYCLES"/>
- <value value="66" name="PERF_SP_DISPATCHER_WORKING_CYCLES"/>
- <value value="67" name="PERF_SP_SEQUENCER_WORKING_CYCLES"/>
- <value value="68" name="PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
- <value value="69" name="PERF_SP_STARVE_CYCLES_HLSQ"/>
- <value value="70" name="PERF_SP_NON_EXECUTION_LS_CYCLES"/>
- <value value="71" name="PERF_SP_WORKING_EU"/>
- <value value="72" name="PERF_SP_ANY_EU_WORKING"/>
- <value value="73" name="PERF_SP_WORKING_EU_FS_STAGE"/>
- <value value="74" name="PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
- <value value="75" name="PERF_SP_WORKING_EU_VS_STAGE"/>
- <value value="76" name="PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
- <value value="77" name="PERF_SP_WORKING_EU_CS_STAGE"/>
- <value value="78" name="PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
- <value value="79" name="PERF_SP_GPR_READ_PREFETCH"/>
- <value value="80" name="PERF_SP_GPR_READ_CONFLICT"/>
- <value value="81" name="PERF_SP_GPR_WRITE_CONFLICT"/>
- <value value="82" name="PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
- <value value="83" name="PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
- <value value="84" name="PERF_SP_EXECUTABLE_WAVES"/>
-</enum>
-
-<enum name="a6xx_rb_perfcounter_select">
- <value value="0" name="PERF_RB_BUSY_CYCLES"/>
- <value value="1" name="PERF_RB_STALL_CYCLES_HLSQ"/>
- <value value="2" name="PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
- <value value="3" name="PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
- <value value="4" name="PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
- <value value="5" name="PERF_RB_STARVE_CYCLES_SP"/>
- <value value="6" name="PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
- <value value="7" name="PERF_RB_STARVE_CYCLES_CCU"/>
- <value value="8" name="PERF_RB_STARVE_CYCLES_Z_PLANE"/>
- <value value="9" name="PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
- <value value="10" name="PERF_RB_Z_WORKLOAD"/>
- <value value="11" name="PERF_RB_HLSQ_ACTIVE"/>
- <value value="12" name="PERF_RB_Z_READ"/>
- <value value="13" name="PERF_RB_Z_WRITE"/>
- <value value="14" name="PERF_RB_C_READ"/>
- <value value="15" name="PERF_RB_C_WRITE"/>
- <value value="16" name="PERF_RB_TOTAL_PASS"/>
- <value value="17" name="PERF_RB_Z_PASS"/>
- <value value="18" name="PERF_RB_Z_FAIL"/>
- <value value="19" name="PERF_RB_S_FAIL"/>
- <value value="20" name="PERF_RB_BLENDED_FXP_COMPONENTS"/>
- <value value="21" name="PERF_RB_BLENDED_FP16_COMPONENTS"/>
- <value value="22" name="PERF_RB_PS_INVOCATIONS"/>
- <value value="23" name="PERF_RB_2D_ALIVE_CYCLES"/>
- <value value="24" name="PERF_RB_2D_STALL_CYCLES_A2D"/>
- <value value="25" name="PERF_RB_2D_STARVE_CYCLES_SRC"/>
- <value value="26" name="PERF_RB_2D_STARVE_CYCLES_SP"/>
- <value value="27" name="PERF_RB_2D_STARVE_CYCLES_DST"/>
- <value value="28" name="PERF_RB_2D_VALID_PIXELS"/>
- <value value="29" name="PERF_RB_3D_PIXELS"/>
- <value value="30" name="PERF_RB_BLENDER_WORKING_CYCLES"/>
- <value value="31" name="PERF_RB_ZPROC_WORKING_CYCLES"/>
- <value value="32" name="PERF_RB_CPROC_WORKING_CYCLES"/>
- <value value="33" name="PERF_RB_SAMPLER_WORKING_CYCLES"/>
- <value value="34" name="PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
- <value value="35" name="PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
- <value value="36" name="PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
- <value value="37" name="PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
- <value value="38" name="PERF_RB_STALL_CYCLES_VPC"/>
- <value value="39" name="PERF_RB_2D_INPUT_TRANS"/>
- <value value="40" name="PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
- <value value="41" name="PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
- <value value="42" name="PERF_RB_BLENDED_FP32_COMPONENTS"/>
- <value value="43" name="PERF_RB_COLOR_PIX_TILES"/>
- <value value="44" name="PERF_RB_STALL_CYCLES_CCU"/>
- <value value="45" name="PERF_RB_EARLY_Z_ARB3_GRANT"/>
- <value value="46" name="PERF_RB_LATE_Z_ARB3_GRANT"/>
- <value value="47" name="PERF_RB_EARLY_Z_SKIP_GRANT"/>
-</enum>
-
-<enum name="a6xx_vsc_perfcounter_select">
- <value value="0" name="PERF_VSC_BUSY_CYCLES"/>
- <value value="1" name="PERF_VSC_WORKING_CYCLES"/>
- <value value="2" name="PERF_VSC_STALL_CYCLES_UCHE"/>
- <value value="3" name="PERF_VSC_EOT_NUM"/>
- <value value="4" name="PERF_VSC_INPUT_TILES"/>
-</enum>
-
-<enum name="a6xx_ccu_perfcounter_select">
- <value value="0" name="PERF_CCU_BUSY_CYCLES"/>
- <value value="1" name="PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
- <value value="2" name="PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
- <value value="3" name="PERF_CCU_STARVE_CYCLES_FLAG_RETURN"/>
- <value value="4" name="PERF_CCU_DEPTH_BLOCKS"/>
- <value value="5" name="PERF_CCU_COLOR_BLOCKS"/>
- <value value="6" name="PERF_CCU_DEPTH_BLOCK_HIT"/>
- <value value="7" name="PERF_CCU_COLOR_BLOCK_HIT"/>
- <value value="8" name="PERF_CCU_PARTIAL_BLOCK_READ"/>
- <value value="9" name="PERF_CCU_GMEM_READ"/>
- <value value="10" name="PERF_CCU_GMEM_WRITE"/>
- <value value="11" name="PERF_CCU_DEPTH_READ_FLAG0_COUNT"/>
- <value value="12" name="PERF_CCU_DEPTH_READ_FLAG1_COUNT"/>
- <value value="13" name="PERF_CCU_DEPTH_READ_FLAG2_COUNT"/>
- <value value="14" name="PERF_CCU_DEPTH_READ_FLAG3_COUNT"/>
- <value value="15" name="PERF_CCU_DEPTH_READ_FLAG4_COUNT"/>
- <value value="16" name="PERF_CCU_DEPTH_READ_FLAG5_COUNT"/>
- <value value="17" name="PERF_CCU_DEPTH_READ_FLAG6_COUNT"/>
- <value value="18" name="PERF_CCU_DEPTH_READ_FLAG8_COUNT"/>
- <value value="19" name="PERF_CCU_COLOR_READ_FLAG0_COUNT"/>
- <value value="20" name="PERF_CCU_COLOR_READ_FLAG1_COUNT"/>
- <value value="21" name="PERF_CCU_COLOR_READ_FLAG2_COUNT"/>
- <value value="22" name="PERF_CCU_COLOR_READ_FLAG3_COUNT"/>
- <value value="23" name="PERF_CCU_COLOR_READ_FLAG4_COUNT"/>
- <value value="24" name="PERF_CCU_COLOR_READ_FLAG5_COUNT"/>
- <value value="25" name="PERF_CCU_COLOR_READ_FLAG6_COUNT"/>
- <value value="26" name="PERF_CCU_COLOR_READ_FLAG8_COUNT"/>
- <value value="27" name="PERF_CCU_2D_RD_REQ"/>
- <value value="28" name="PERF_CCU_2D_WR_REQ"/>
-</enum>
-
-<enum name="a6xx_lrz_perfcounter_select">
- <value value="0" name="PERF_LRZ_BUSY_CYCLES"/>
- <value value="1" name="PERF_LRZ_STARVE_CYCLES_RAS"/>
- <value value="2" name="PERF_LRZ_STALL_CYCLES_RB"/>
- <value value="3" name="PERF_LRZ_STALL_CYCLES_VSC"/>
- <value value="4" name="PERF_LRZ_STALL_CYCLES_VPC"/>
- <value value="5" name="PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
- <value value="6" name="PERF_LRZ_STALL_CYCLES_UCHE"/>
- <value value="7" name="PERF_LRZ_LRZ_READ"/>
- <value value="8" name="PERF_LRZ_LRZ_WRITE"/>
- <value value="9" name="PERF_LRZ_READ_LATENCY"/>
- <value value="10" name="PERF_LRZ_MERGE_CACHE_UPDATING"/>
- <value value="11" name="PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
- <value value="12" name="PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
- <value value="13" name="PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
- <value value="14" name="PERF_LRZ_FULL_8X8_TILES"/>
- <value value="15" name="PERF_LRZ_PARTIAL_8X8_TILES"/>
- <value value="16" name="PERF_LRZ_TILE_KILLED"/>
- <value value="17" name="PERF_LRZ_TOTAL_PIXEL"/>
- <value value="18" name="PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
- <value value="19" name="PERF_LRZ_FULLY_COVERED_TILES"/>
- <value value="20" name="PERF_LRZ_PARTIAL_COVERED_TILES"/>
- <value value="21" name="PERF_LRZ_FEEDBACK_ACCEPT"/>
- <value value="22" name="PERF_LRZ_FEEDBACK_DISCARD"/>
- <value value="23" name="PERF_LRZ_FEEDBACK_STALL"/>
- <value value="24" name="PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
- <value value="25" name="PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
- <value value="26" name="PERF_LRZ_STALL_CYCLES_VC"/>
- <value value="27" name="PERF_LRZ_RAS_MASK_TRANS"/>
-</enum>
-
-<enum name="a6xx_cmp_perfcounter_select">
- <value value="0" name="PERF_CMPDECMP_STALL_CYCLES_ARB"/>
- <value value="1" name="PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
- <value value="2" name="PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
- <value value="3" name="PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
- <value value="4" name="PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
- <value value="5" name="PERF_CMPDECMP_VBIF_READ_REQUEST"/>
- <value value="6" name="PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
- <value value="7" name="PERF_CMPDECMP_VBIF_READ_DATA"/>
- <value value="8" name="PERF_CMPDECMP_VBIF_WRITE_DATA"/>
- <value value="9" name="PERF_CMPDECMP_FLAG_FETCH_CYCLES"/>
- <value value="10" name="PERF_CMPDECMP_FLAG_FETCH_SAMPLES"/>
- <value value="11" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
- <value value="12" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
- <value value="13" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
- <value value="14" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
- <value value="15" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
- <value value="16" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
- <value value="17" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
- <value value="18" name="PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
- <value value="19" name="PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
- <value value="20" name="PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
- <value value="21" name="PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
- <value value="22" name="PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
- <value value="23" name="PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
- <value value="24" name="PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
- <value value="25" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ"/>
- <value value="26" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR"/>
- <value value="27" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN"/>
- <value value="28" name="PERF_CMPDECMP_2D_RD_DATA"/>
- <value value="29" name="PERF_CMPDECMP_2D_WR_DATA"/>
- <value value="30" name="PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
- <value value="31" name="PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
- <value value="32" name="PERF_CMPDECMP_2D_OUTPUT_TRANS"/>
- <value value="33" name="PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
- <value value="34" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
- <value value="35" name="PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
- <value value="36" name="PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
- <value value="37" name="PERF_CMPDECMP_2D_BUSY_CYCLES"/>
- <value value="38" name="PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES"/>
- <value value="39" name="PERF_CMPDECMP_2D_PIXELS"/>
-</enum>
-
-<!--
-Used in a6xx_2d_blit_cntl.. the value mostly seems to correlate to the
-component type/size, so I think it relates to internal format used for
-blending? The one exception is that 16b unorm and 32b float use the
-same value... maybe 16b unorm is uncommon enough that it was just easier
-to upconvert to 32b float internally?
-
- 8b unorm: 10 (sometimes 0, is the high bit part of something else?)
-16b unorm: 4
-
-32b int: 7
-16b int: 6
- 8b int: 5
-
-32b float: 4
-16b float: 3
- -->
-<enum name="a6xx_2d_ifmt">
- <value value="0x10" name="R2D_UNORM8"/>
- <value value="0x7" name="R2D_INT32"/>
- <value value="0x6" name="R2D_INT16"/>
- <value value="0x5" name="R2D_INT8"/>
- <value value="0x4" name="R2D_FLOAT32"/>
- <value value="0x3" name="R2D_FLOAT16"/>
- <value value="0x1" name="R2D_UNORM8_SRGB"/>
- <value value="0x0" name="R2D_RAW"/>
-</enum>
-
-<enum name="a6xx_ztest_mode">
- <doc>Allow early z-test and early-lrz (if applicable)</doc>
- <value value="0x0" name="A6XX_EARLY_Z"/>
- <doc>Disable early z-test and early-lrz test (if applicable)</doc>
- <value value="0x1" name="A6XX_LATE_Z"/>
- <doc>
- A special mode that allows early-lrz test but disables
- early-z test. Which might sound a bit funny, since
- lrz-test happens before z-test. But as long as a couple
- conditions are maintained this allows using lrz-test in
- cases where fragment shader has kill/discard:
-
- 1) Disable lrz-write in cases where it is uncertain during
- binning pass that a fragment will pass. Ie. if frag
- shader has-kill, writes-z, or alpha/stencil test is
- enabled. (For correctness, lrz-write must be disabled
- when blend is enabled.) This is analogous to how a
- z-prepass works.
-
- 2) Disable lrz-write and test if a depth-test direction
- reversal is detected. Due to condition (1), the contents
- of the lrz buffer are a conservative estimation of the
- depth buffer during the draw pass. Meaning that geometry
- that we know for certain will not be visible will not pass
- lrz-test. But geometry which may be (or contributes to
- blend) will pass the lrz-test.
-
- This allows us to keep early-lrz-test in cases where the frag
- shader does not write-z (ie. we know the z-value before FS)
- and does not have side-effects (image/ssbo writes, etc), but
- does have kill/discard. Which turns out to be a common
- enough case that it is useful to keep early-lrz test against
- the conservative lrz buffer to discard fragments that we
- know will definitely not be visible.
- </doc>
- <value value="0x2" name="A6XX_EARLY_LRZ_LATE_Z"/>
- <doc>Not a real hw value, used internally by mesa</doc>
- <value value="0x3" name="A6XX_INVALID_ZTEST"/>
-</enum>
-
-<enum name="a6xx_tess_spacing">
- <value value="0x0" name="TESS_EQUAL"/>
- <value value="0x2" name="TESS_FRACTIONAL_ODD"/>
- <value value="0x3" name="TESS_FRACTIONAL_EVEN"/>
-</enum>
-<enum name="a6xx_tess_output">
- <value value="0x0" name="TESS_POINTS"/>
- <value value="0x1" name="TESS_LINES"/>
- <value value="0x2" name="TESS_CW_TRIS"/>
- <value value="0x3" name="TESS_CCW_TRIS"/>
-</enum>
-
-<enum name="a7xx_cp_perfcounter_select">
- <value value="0" name="A7XX_PERF_CP_ALWAYS_COUNT"/>
- <value value="1" name="A7XX_PERF_CP_BUSY_GFX_CORE_IDLE"/>
- <value value="2" name="A7XX_PERF_CP_BUSY_CYCLES"/>
- <value value="3" name="A7XX_PERF_CP_NUM_PREEMPTIONS"/>
- <value value="4" name="A7XX_PERF_CP_PREEMPTION_REACTION_DELAY"/>
- <value value="5" name="A7XX_PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
- <value value="6" name="A7XX_PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
- <value value="7" name="A7XX_PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
- <value value="8" name="A7XX_PERF_CP_PREDICATED_DRAWS_KILLED"/>
- <value value="9" name="A7XX_PERF_CP_MODE_SWITCH"/>
- <value value="10" name="A7XX_PERF_CP_ZPASS_DONE"/>
- <value value="11" name="A7XX_PERF_CP_CONTEXT_DONE"/>
- <value value="12" name="A7XX_PERF_CP_CACHE_FLUSH"/>
- <value value="13" name="A7XX_PERF_CP_LONG_PREEMPTIONS"/>
- <value value="14" name="A7XX_PERF_CP_SQE_I_CACHE_STARVE"/>
- <value value="15" name="A7XX_PERF_CP_SQE_IDLE"/>
- <value value="16" name="A7XX_PERF_CP_SQE_PM4_STARVE_RB_IB"/>
- <value value="17" name="A7XX_PERF_CP_SQE_PM4_STARVE_SDS"/>
- <value value="18" name="A7XX_PERF_CP_SQE_MRB_STARVE"/>
- <value value="19" name="A7XX_PERF_CP_SQE_RRB_STARVE"/>
- <value value="20" name="A7XX_PERF_CP_SQE_VSD_STARVE"/>
- <value value="21" name="A7XX_PERF_CP_VSD_DECODE_STARVE"/>
- <value value="22" name="A7XX_PERF_CP_SQE_PIPE_OUT_STALL"/>
- <value value="23" name="A7XX_PERF_CP_SQE_SYNC_STALL"/>
- <value value="24" name="A7XX_PERF_CP_SQE_PM4_WFI_STALL"/>
- <value value="25" name="A7XX_PERF_CP_SQE_SYS_WFI_STALL"/>
- <value value="26" name="A7XX_PERF_CP_SQE_T4_EXEC"/>
- <value value="27" name="A7XX_PERF_CP_SQE_LOAD_STATE_EXEC"/>
- <value value="28" name="A7XX_PERF_CP_SQE_SAVE_SDS_STATE"/>
- <value value="29" name="A7XX_PERF_CP_SQE_DRAW_EXEC"/>
- <value value="30" name="A7XX_PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
- <value value="31" name="A7XX_PERF_CP_SQE_EXEC_PROFILED"/>
- <value value="32" name="A7XX_PERF_CP_MEMORY_POOL_EMPTY"/>
- <value value="33" name="A7XX_PERF_CP_MEMORY_POOL_SYNC_STALL"/>
- <value value="34" name="A7XX_PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
- <value value="35" name="A7XX_PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
- <value value="36" name="A7XX_PERF_CP_AHB_STALL_SQE_GMU"/>
- <value value="37" name="A7XX_PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
- <value value="38" name="A7XX_PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
- <value value="39" name="A7XX_PERF_CP_CLUSTER0_EMPTY"/>
- <value value="40" name="A7XX_PERF_CP_CLUSTER1_EMPTY"/>
- <value value="41" name="A7XX_PERF_CP_CLUSTER2_EMPTY"/>
- <value value="42" name="A7XX_PERF_CP_CLUSTER3_EMPTY"/>
- <value value="43" name="A7XX_PERF_CP_CLUSTER4_EMPTY"/>
- <value value="44" name="A7XX_PERF_CP_CLUSTER5_EMPTY"/>
- <value value="45" name="A7XX_PERF_CP_PM4_DATA"/>
- <value value="46" name="A7XX_PERF_CP_PM4_HEADERS"/>
- <value value="47" name="A7XX_PERF_CP_VBIF_READ_BEATS"/>
- <value value="48" name="A7XX_PERF_CP_VBIF_WRITE_BEATS"/>
- <value value="49" name="A7XX_PERF_CP_SQE_INSTR_COUNTER"/>
- <value value="50" name="A7XX_PERF_CP_RESERVED_50"/>
- <value value="51" name="A7XX_PERF_CP_RESERVED_51"/>
- <value value="52" name="A7XX_PERF_CP_RESERVED_52"/>
- <value value="53" name="A7XX_PERF_CP_RESERVED_53"/>
- <value value="54" name="A7XX_PERF_CP_RESERVED_54"/>
- <value value="55" name="A7XX_PERF_CP_RESERVED_55"/>
- <value value="56" name="A7XX_PERF_CP_RESERVED_56"/>
- <value value="57" name="A7XX_PERF_CP_RESERVED_57"/>
- <value value="58" name="A7XX_PERF_CP_RESERVED_58"/>
- <value value="59" name="A7XX_PERF_CP_RESERVED_59"/>
- <value value="60" name="A7XX_PERF_CP_CLUSTER0_FULL"/>
- <value value="61" name="A7XX_PERF_CP_CLUSTER1_FULL"/>
- <value value="62" name="A7XX_PERF_CP_CLUSTER2_FULL"/>
- <value value="63" name="A7XX_PERF_CP_CLUSTER3_FULL"/>
- <value value="64" name="A7XX_PERF_CP_CLUSTER4_FULL"/>
- <value value="65" name="A7XX_PERF_CP_CLUSTER5_FULL"/>
- <value value="66" name="A7XX_PERF_CP_CLUSTER6_FULL"/>
- <value value="67" name="A7XX_PERF_CP_CLUSTER6_EMPTY"/>
- <value value="68" name="A7XX_PERF_CP_ICACHE_MISSES"/>
- <value value="69" name="A7XX_PERF_CP_ICACHE_HITS"/>
- <value value="70" name="A7XX_PERF_CP_ICACHE_STALL"/>
- <value value="71" name="A7XX_PERF_CP_DCACHE_MISSES"/>
- <value value="72" name="A7XX_PERF_CP_DCACHE_HITS"/>
- <value value="73" name="A7XX_PERF_CP_DCACHE_STALLS"/>
- <value value="74" name="A7XX_PERF_CP_AQE_SQE_STALL"/>
- <value value="75" name="A7XX_PERF_CP_SQE_AQE_STARVE"/>
- <value value="76" name="A7XX_PERF_CP_PREEMPT_LATENCY"/>
- <value value="77" name="A7XX_PERF_CP_SQE_MD8_STALL_CYCLES"/>
- <value value="78" name="A7XX_PERF_CP_SQE_MESH_EXEC_CYCLES"/>
- <value value="79" name="A7XX_PERF_CP_AQE_NUM_AS_CHUNKS"/>
- <value value="80" name="A7XX_PERF_CP_AQE_NUM_MS_CHUNKS"/>
-</enum>
-
-<enum name="a7xx_rbbm_perfcounter_select">
- <value value="0" name="A7XX_PERF_RBBM_ALWAYS_COUNT"/>
- <value value="1" name="A7XX_PERF_RBBM_ALWAYS_ON"/>
- <value value="2" name="A7XX_PERF_RBBM_TSE_BUSY"/>
- <value value="3" name="A7XX_PERF_RBBM_RAS_BUSY"/>
- <value value="4" name="A7XX_PERF_RBBM_PC_DCALL_BUSY"/>
- <value value="5" name="A7XX_PERF_RBBM_PC_VSD_BUSY"/>
- <value value="6" name="A7XX_PERF_RBBM_STATUS_MASKED"/>
- <value value="7" name="A7XX_PERF_RBBM_COM_BUSY"/>
- <value value="8" name="A7XX_PERF_RBBM_DCOM_BUSY"/>
- <value value="9" name="A7XX_PERF_RBBM_VBIF_BUSY"/>
- <value value="10" name="A7XX_PERF_RBBM_VSC_BUSY"/>
- <value value="11" name="A7XX_PERF_RBBM_TESS_BUSY"/>
- <value value="12" name="A7XX_PERF_RBBM_UCHE_BUSY"/>
- <value value="13" name="A7XX_PERF_RBBM_HLSQ_BUSY"/>
-</enum>
-
-<enum name="a7xx_pc_perfcounter_select">
- <value value="0" name="A7XX_PERF_PC_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_PC_WORKING_CYCLES"/>
- <value value="2" name="A7XX_PERF_PC_STALL_CYCLES_VFD"/>
- <value value="3" name="A7XX_PERF_PC_RESERVED"/>
- <value value="4" name="A7XX_PERF_PC_STALL_CYCLES_VPC"/>
- <value value="5" name="A7XX_PERF_PC_STALL_CYCLES_UCHE"/>
- <value value="6" name="A7XX_PERF_PC_STALL_CYCLES_TESS"/>
- <value value="7" name="A7XX_PERF_PC_STALL_CYCLES_VFD_ONLY"/>
- <value value="8" name="A7XX_PERF_PC_STALL_CYCLES_VPC_ONLY"/>
- <value value="9" name="A7XX_PERF_PC_PASS1_TF_STALL_CYCLES"/>
- <value value="10" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
- <value value="11" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
- <value value="12" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
- <value value="13" name="A7XX_PERF_PC_STARVE_CYCLES_DI"/>
- <value value="14" name="A7XX_PERF_PC_VIS_STREAMS_LOADED"/>
- <value value="15" name="A7XX_PERF_PC_INSTANCES"/>
- <value value="16" name="A7XX_PERF_PC_VPC_PRIMITIVES"/>
- <value value="17" name="A7XX_PERF_PC_DEAD_PRIM"/>
- <value value="18" name="A7XX_PERF_PC_LIVE_PRIM"/>
- <value value="19" name="A7XX_PERF_PC_VERTEX_HITS"/>
- <value value="20" name="A7XX_PERF_PC_IA_VERTICES"/>
- <value value="21" name="A7XX_PERF_PC_IA_PRIMITIVES"/>
- <value value="22" name="A7XX_PERF_PC_RESERVED_22"/>
- <value value="23" name="A7XX_PERF_PC_HS_INVOCATIONS"/>
- <value value="24" name="A7XX_PERF_PC_DS_INVOCATIONS"/>
- <value value="25" name="A7XX_PERF_PC_VS_INVOCATIONS"/>
- <value value="26" name="A7XX_PERF_PC_GS_INVOCATIONS"/>
- <value value="27" name="A7XX_PERF_PC_DS_PRIMITIVES"/>
- <value value="28" name="A7XX_PERF_PC_3D_DRAWCALLS"/>
- <value value="29" name="A7XX_PERF_PC_2D_DRAWCALLS"/>
- <value value="30" name="A7XX_PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
- <value value="31" name="A7XX_PERF_PC_TESS_BUSY_CYCLES"/>
- <value value="32" name="A7XX_PERF_PC_TESS_WORKING_CYCLES"/>
- <value value="33" name="A7XX_PERF_PC_TESS_STALL_CYCLES_PC"/>
- <value value="34" name="A7XX_PERF_PC_TESS_STARVE_CYCLES_PC"/>
- <value value="35" name="A7XX_PERF_PC_TESS_SINGLE_PRIM_CYCLES"/>
- <value value="36" name="A7XX_PERF_PC_TESS_PC_UV_TRANS"/>
- <value value="37" name="A7XX_PERF_PC_TESS_PC_UV_PATCHES"/>
- <value value="38" name="A7XX_PERF_PC_TESS_FACTOR_TRANS"/>
- <value value="39" name="A7XX_PERF_PC_TAG_CHECKED_VERTICES"/>
- <value value="40" name="A7XX_PERF_PC_MESH_VS_WAVES"/>
- <value value="41" name="A7XX_PERF_PC_MESH_DRAWS"/>
- <value value="42" name="A7XX_PERF_PC_MESH_DEAD_DRAWS"/>
- <value value="43" name="A7XX_PERF_PC_MESH_MVIS_EN_DRAWS"/>
- <value value="44" name="A7XX_PERF_PC_MESH_DEAD_PRIM"/>
- <value value="45" name="A7XX_PERF_PC_MESH_LIVE_PRIM"/>
- <value value="46" name="A7XX_PERF_PC_MESH_PA_EN_PRIM"/>
- <value value="47" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_MVIS_STREAM"/>
- <value value="48" name="A7XX_PERF_PC_STARVE_CYCLES_PREDRAW"/>
- <value value="49" name="A7XX_PERF_PC_STALL_CYCLES_COMPUTE_GFX"/>
- <value value="50" name="A7XX_PERF_PC_STALL_CYCLES_GFX_COMPUTE"/>
- <value value="51" name="A7XX_PERF_PC_TESS_PC_MULTI_PATCH_TRANS"/>
-</enum>
-
-<enum name="a7xx_vfd_perfcounter_select">
- <value value="0" name="A7XX_PERF_VFD_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_VFD_STALL_CYCLES_UCHE"/>
- <value value="2" name="A7XX_PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
- <value value="3" name="A7XX_PERF_VFD_STALL_CYCLES_SP_INFO"/>
- <value value="4" name="A7XX_PERF_VFD_STALL_CYCLES_SP_ATTR"/>
- <value value="5" name="A7XX_PERF_VFD_STARVE_CYCLES_UCHE"/>
- <value value="6" name="A7XX_PERF_VFD_RBUFFER_FULL"/>
- <value value="7" name="A7XX_PERF_VFD_ATTR_INFO_FIFO_FULL"/>
- <value value="8" name="A7XX_PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
- <value value="9" name="A7XX_PERF_VFD_NUM_ATTRIBUTES"/>
- <value value="10" name="A7XX_PERF_VFD_UPPER_SHADER_FIBERS"/>
- <value value="11" name="A7XX_PERF_VFD_LOWER_SHADER_FIBERS"/>
- <value value="12" name="A7XX_PERF_VFD_MODE_0_FIBERS"/>
- <value value="13" name="A7XX_PERF_VFD_MODE_1_FIBERS"/>
- <value value="14" name="A7XX_PERF_VFD_MODE_2_FIBERS"/>
- <value value="15" name="A7XX_PERF_VFD_MODE_3_FIBERS"/>
- <value value="16" name="A7XX_PERF_VFD_MODE_4_FIBERS"/>
- <value value="17" name="A7XX_PERF_VFD_TOTAL_VERTICES"/>
- <value value="18" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD"/>
- <value value="19" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
- <value value="20" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
- <value value="21" name="A7XX_PERF_VFDP_STARVE_CYCLES_PC"/>
- <value value="22" name="A7XX_PERF_VFDP_VS_STAGE_WAVES"/>
- <value value="23" name="A7XX_PERF_VFD_STALL_CYCLES_PRG_END_FE"/>
- <value value="24" name="A7XX_PERF_VFD_STALL_CYCLES_CBSYNC"/>
-</enum>
-
-<enum name="a7xx_hlsq_perfcounter_select">
- <value value="0" name="A7XX_PERF_HLSQ_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_HLSQ_STALL_CYCLES_UCHE"/>
- <value value="2" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
- <value value="3" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
- <value value="4" name="A7XX_PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
- <value value="5" name="A7XX_PERF_HLSQ_UCHE_LATENCY_COUNT"/>
- <value value="6" name="A7XX_PERF_HLSQ_RESERVED_6"/>
- <value value="7" name="A7XX_PERF_HLSQ_RESERVED_7"/>
- <value value="8" name="A7XX_PERF_HLSQ_RESERVED_8"/>
- <value value="9" name="A7XX_PERF_HLSQ_RESERVED_9"/>
- <value value="10" name="A7XX_PERF_HLSQ_COMPUTE_DRAWCALLS"/>
- <value value="11" name="A7XX_PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
- <value value="12" name="A7XX_PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
- <value value="13" name="A7XX_PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
- <value value="14" name="A7XX_PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
- <value value="15" name="A7XX_PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
- <value value="16" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
- <value value="17" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
- <value value="18" name="A7XX_PERF_HLSQ_STALL_CYCLES_VPC"/>
- <value value="19" name="A7XX_PERF_HLSQ_RESERVED_19"/>
- <value value="20" name="A7XX_PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
- <value value="21" name="A7XX_PERF_HLSQ_VSBR_STALL_CYCLES"/>
- <value value="22" name="A7XX_PERF_HLSQ_FS_STALL_CYCLES"/>
- <value value="23" name="A7XX_PERF_HLSQ_LPAC_STALL_CYCLES"/>
- <value value="24" name="A7XX_PERF_HLSQ_BV_STALL_CYCLES"/>
- <value value="25" name="A7XX_PERF_HLSQ_VSBR_DEREF_CYCLES"/>
- <value value="26" name="A7XX_PERF_HLSQ_FS_DEREF_CYCLES"/>
- <value value="27" name="A7XX_PERF_HLSQ_LPAC_DEREF_CYCLES"/>
- <value value="28" name="A7XX_PERF_HLSQ_BV_DEREF_CYCLES"/>
- <value value="29" name="A7XX_PERF_HLSQ_VSBR_S2W_CYCLES"/>
- <value value="30" name="A7XX_PERF_HLSQ_FS_S2W_CYCLES"/>
- <value value="31" name="A7XX_PERF_HLSQ_LPAC_S2W_CYCLES"/>
- <value value="32" name="A7XX_PERF_HLSQ_BV_S2W_CYCLES"/>
- <value value="33" name="A7XX_PERF_HLSQ_VSBR_WAIT_FS_S2W"/>
- <value value="34" name="A7XX_PERF_HLSQ_FS_WAIT_VS_S2W"/>
- <value value="35" name="A7XX_PERF_HLSQ_LPAC_WAIT_VS_S2W"/>
- <value value="36" name="A7XX_PERF_HLSQ_BV_WAIT_FS_S2W"/>
- <value value="37" name="A7XX_PERF_HLSQ_VS_WAIT_CONST_RESOURCE"/>
- <value value="38" name="A7XX_PERF_HLSQ_FS_WAIT_SAME_VS_S2W"/>
- <value value="39" name="A7XX_PERF_HLSQ_FS_STARVING_SP"/>
- <value value="40" name="A7XX_PERF_HLSQ_VS_DATA_WAIT_PROGRAMMING"/>
- <value value="41" name="A7XX_PERF_HLSQ_BV_DATA_WAIT_PROGRAMMING"/>
- <value value="42" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_VS"/>
- <value value="43" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_VS"/>
- <value value="44" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_FS"/>
- <value value="45" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_FS"/>
- <value value="46" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_BV"/>
- <value value="47" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_BV"/>
- <value value="48" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_LPAC"/>
- <value value="49" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_LPAC"/>
- <value value="50" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_VS"/>
- <value value="51" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_FS"/>
- <value value="52" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_BV"/>
- <value value="53" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_LPAC"/>
- <value value="54" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_VS"/>
- <value value="55" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_FS"/>
- <value value="56" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_BV"/>
- <value value="57" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_LPAC"/>
-</enum>
-
-<enum name="a7xx_vpc_perfcounter_select">
- <value value="0" name="A7XX_PERF_VPC_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_VPC_WORKING_CYCLES"/>
- <value value="2" name="A7XX_PERF_VPC_STALL_CYCLES_UCHE"/>
- <value value="3" name="A7XX_PERF_VPC_STALL_CYCLES_VFD_WACK"/>
- <value value="4" name="A7XX_PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
- <value value="5" name="A7XX_PERF_VPC_RESERVED_5"/>
- <value value="6" name="A7XX_PERF_VPC_STALL_CYCLES_SP_LM"/>
- <value value="7" name="A7XX_PERF_VPC_STARVE_CYCLES_SP"/>
- <value value="8" name="A7XX_PERF_VPC_STARVE_CYCLES_LRZ"/>
- <value value="9" name="A7XX_PERF_VPC_PC_PRIMITIVES"/>
- <value value="10" name="A7XX_PERF_VPC_SP_COMPONENTS"/>
- <value value="11" name="A7XX_PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
- <value value="12" name="A7XX_PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
- <value value="13" name="A7XX_PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
- <value value="14" name="A7XX_PERF_VPC_LM_TRANSACTION"/>
- <value value="15" name="A7XX_PERF_VPC_STREAMOUT_TRANSACTION"/>
- <value value="16" name="A7XX_PERF_VPC_VS_BUSY_CYCLES"/>
- <value value="17" name="A7XX_PERF_VPC_PS_BUSY_CYCLES"/>
- <value value="18" name="A7XX_PERF_VPC_VS_WORKING_CYCLES"/>
- <value value="19" name="A7XX_PERF_VPC_PS_WORKING_CYCLES"/>
- <value value="20" name="A7XX_PERF_VPC_STARVE_CYCLES_RB"/>
- <value value="21" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_POS"/>
- <value value="22" name="A7XX_PERF_VPC_WIT_FULL_CYCLES"/>
- <value value="23" name="A7XX_PERF_VPC_VPCRAM_FULL_CYCLES"/>
- <value value="24" name="A7XX_PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
- <value value="25" name="A7XX_PERF_VPC_NUM_VPCRAM_WRITE"/>
- <value value="26" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_SO"/>
- <value value="27" name="A7XX_PERF_VPC_NUM_ATTR_REQ_LM"/>
- <value value="28" name="A7XX_PERF_VPC_STALL_CYCLE_TSE"/>
- <value value="29" name="A7XX_PERF_VPC_TSE_PRIMITIVES"/>
- <value value="30" name="A7XX_PERF_VPC_GS_PRIMITIVES"/>
- <value value="31" name="A7XX_PERF_VPC_TSE_TRANSACTIONS"/>
- <value value="32" name="A7XX_PERF_VPC_STALL_CYCLES_CCU"/>
- <value value="33" name="A7XX_PERF_VPC_NUM_WM_HIT"/>
- <value value="34" name="A7XX_PERF_VPC_STALL_DQ_WACK"/>
- <value value="35" name="A7XX_PERF_VPC_STALL_CYCLES_CCHE"/>
- <value value="36" name="A7XX_PERF_VPC_STARVE_CYCLES_CCHE"/>
- <value value="37" name="A7XX_PERF_VPC_NUM_PA_REQ"/>
- <value value="38" name="A7XX_PERF_VPC_NUM_LM_REQ_HIT"/>
- <value value="39" name="A7XX_PERF_VPC_CCHE_REQBUF_FULL"/>
- <value value="40" name="A7XX_PERF_VPC_STALL_CYCLES_LM_ACK"/>
- <value value="41" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_FE"/>
- <value value="42" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_PCVS"/>
- <value value="43" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_VPCPS"/>
-</enum>
-
-<enum name="a7xx_tse_perfcounter_select">
- <value value="0" name="A7XX_PERF_TSE_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_TSE_CLIPPING_CYCLES"/>
- <value value="2" name="A7XX_PERF_TSE_STALL_CYCLES_RAS"/>
- <value value="3" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
- <value value="4" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
- <value value="5" name="A7XX_PERF_TSE_STARVE_CYCLES_PC"/>
- <value value="6" name="A7XX_PERF_TSE_INPUT_PRIM"/>
- <value value="7" name="A7XX_PERF_TSE_INPUT_NULL_PRIM"/>
- <value value="8" name="A7XX_PERF_TSE_TRIVAL_REJ_PRIM"/>
- <value value="9" name="A7XX_PERF_TSE_CLIPPED_PRIM"/>
- <value value="10" name="A7XX_PERF_TSE_ZERO_AREA_PRIM"/>
- <value value="11" name="A7XX_PERF_TSE_FACENESS_CULLED_PRIM"/>
- <value value="12" name="A7XX_PERF_TSE_ZERO_PIXEL_PRIM"/>
- <value value="13" name="A7XX_PERF_TSE_OUTPUT_NULL_PRIM"/>
- <value value="14" name="A7XX_PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
- <value value="15" name="A7XX_PERF_TSE_CINVOCATION"/>
- <value value="16" name="A7XX_PERF_TSE_CPRIMITIVES"/>
- <value value="17" name="A7XX_PERF_TSE_2D_INPUT_PRIM"/>
- <value value="18" name="A7XX_PERF_TSE_2D_ALIVE_CYCLES"/>
- <value value="19" name="A7XX_PERF_TSE_CLIP_PLANES"/>
-</enum>
-
-<enum name="a7xx_ras_perfcounter_select">
- <value value="0" name="A7XX_PERF_RAS_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
- <value value="2" name="A7XX_PERF_RAS_STALL_CYCLES_LRZ"/>
- <value value="3" name="A7XX_PERF_RAS_STARVE_CYCLES_TSE"/>
- <value value="4" name="A7XX_PERF_RAS_SUPER_TILES"/>
- <value value="5" name="A7XX_PERF_RAS_8X4_TILES"/>
- <value value="6" name="A7XX_PERF_RAS_MASKGEN_ACTIVE"/>
- <value value="7" name="A7XX_PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
- <value value="8" name="A7XX_PERF_RAS_FULLY_COVERED_8X4_TILES"/>
- <value value="9" name="A7XX_PERF_RAS_PRIM_KILLED_INVISILBE"/>
- <value value="10" name="A7XX_PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
- <value value="11" name="A7XX_PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
- <value value="12" name="A7XX_PERF_RAS_BLOCKS"/>
- <value value="13" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_0_WORKING_CC_l2"/>
- <value value="14" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_1_WORKING_CC_l2"/>
- <value value="15" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_2_WORKING_CC_l2"/>
- <value value="16" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_3_WORKING_CC_l2"/>
- <value value="17" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_4_WORKING_CC_l2"/>
- <value value="18" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_5_WORKING_CC_l2"/>
- <value value="19" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_6_WORKING_CC_l2"/>
- <value value="20" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_7_WORKING_CC_l2"/>
- <value value="21" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_8_WORKING_CC_l2"/>
- <value value="22" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_9_WORKING_CC_l2"/>
- <value value="23" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_10_WORKING_CC_l2"/>
- <value value="24" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_11_WORKING_CC_l2"/>
- <value value="25" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_12_WORKING_CC_l2"/>
- <value value="26" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_13_WORKING_CC_l2"/>
- <value value="27" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_14_WORKING_CC_l2"/>
- <value value="28" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_15_WORKING_CC_l2"/>
- <value value="29" name="A7XX_PERF_RAS_FALSE_PARTIAL_STILE"/>
-
-</enum>
-
-<enum name="a7xx_uche_perfcounter_select">
- <value value="0" name="A7XX_PERF_UCHE_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_UCHE_STALL_CYCLES_ARBITER"/>
- <value value="2" name="A7XX_PERF_UCHE_VBIF_LATENCY_CYCLES"/>
- <value value="3" name="A7XX_PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
- <value value="4" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_TP"/>
- <value value="5" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_VFD"/>
- <value value="6" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
- <value value="7" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
- <value value="8" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_SP"/>
- <value value="9" name="A7XX_PERF_UCHE_READ_REQUESTS_TP"/>
- <value value="10" name="A7XX_PERF_UCHE_READ_REQUESTS_VFD"/>
- <value value="11" name="A7XX_PERF_UCHE_READ_REQUESTS_HLSQ"/>
- <value value="12" name="A7XX_PERF_UCHE_READ_REQUESTS_LRZ"/>
- <value value="13" name="A7XX_PERF_UCHE_READ_REQUESTS_SP"/>
- <value value="14" name="A7XX_PERF_UCHE_WRITE_REQUESTS_LRZ"/>
- <value value="15" name="A7XX_PERF_UCHE_WRITE_REQUESTS_SP"/>
- <value value="16" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VPC"/>
- <value value="17" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VSC"/>
- <value value="18" name="A7XX_PERF_UCHE_EVICTS"/>
- <value value="19" name="A7XX_PERF_UCHE_BANK_REQ0"/>
- <value value="20" name="A7XX_PERF_UCHE_BANK_REQ1"/>
- <value value="21" name="A7XX_PERF_UCHE_BANK_REQ2"/>
- <value value="22" name="A7XX_PERF_UCHE_BANK_REQ3"/>
- <value value="23" name="A7XX_PERF_UCHE_BANK_REQ4"/>
- <value value="24" name="A7XX_PERF_UCHE_BANK_REQ5"/>
- <value value="25" name="A7XX_PERF_UCHE_BANK_REQ6"/>
- <value value="26" name="A7XX_PERF_UCHE_BANK_REQ7"/>
- <value value="27" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH0"/>
- <value value="28" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH1"/>
- <value value="29" name="A7XX_PERF_UCHE_GMEM_READ_BEATS"/>
- <value value="30" name="A7XX_PERF_UCHE_TPH_REF_FULL"/>
- <value value="31" name="A7XX_PERF_UCHE_TPH_VICTIM_FULL"/>
- <value value="32" name="A7XX_PERF_UCHE_TPH_EXT_FULL"/>
- <value value="33" name="A7XX_PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
- <value value="34" name="A7XX_PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
- <value value="35" name="A7XX_PERF_UCHE_DCMP_LATENCY_CYCLES"/>
- <value value="36" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_PC"/>
- <value value="37" name="A7XX_PERF_UCHE_READ_REQUESTS_PC"/>
- <value value="38" name="A7XX_PERF_UCHE_RAM_READ_REQ"/>
- <value value="39" name="A7XX_PERF_UCHE_RAM_WRITE_REQ"/>
- <value value="40" name="A7XX_PERF_UCHE_STARVED_CYCLES_VBIF_DECMP"/>
- <value value="41" name="A7XX_PERF_UCHE_STALL_CYCLES_DECMP"/>
- <value value="42" name="A7XX_PERF_UCHE_ARBITER_STALL_CYCLES_VBIF"/>
- <value value="43" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_UBWC"/>
- <value value="44" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_NONUBWC"/>
- <value value="45" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_GMEM"/>
- <value value="46" name="A7XX_PERF_UCHE_LONG_LINE_ALL_EVICTS_KAILUA"/>
- <value value="47" name="A7XX_PERF_UCHE_LONG_LINE_PARTIAL_EVICTS_KAILUA"/>
- <value value="48" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_CCHE"/>
- <value value="49" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_OTHER_KAILUA"/>
- <value value="50" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_CCHE"/>
- <value value="51" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_OTHER_CLIENTS"/>
- <value value="52" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH0"/>
- <value value="53" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH1"/>
- <value value="54" name="A7XX_PERF_UCHE_CCHE_TPH_QUEUE_FULL"/>
- <value value="55" name="A7XX_PERF_UCHE_CCHE_DPH_QUEUE_FULL"/>
- <value value="56" name="A7XX_PERF_UCHE_GMEM_WRITE_BEATS"/>
- <value value="57" name="A7XX_PERF_UCHE_UBWC_READ_BEATS"/>
- <value value="58" name="A7XX_PERF_UCHE_UBWC_WRITE_BEATS"/>
-</enum>
-
-<enum name="a7xx_tp_perfcounter_select">
- <value value="0" name="A7XX_PERF_TP_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_TP_STALL_CYCLES_UCHE"/>
- <value value="2" name="A7XX_PERF_TP_LATENCY_CYCLES"/>
- <value value="3" name="A7XX_PERF_TP_LATENCY_TRANS"/>
- <value value="4" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_SAMPLES"/>
- <value value="5" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_CYCLES"/>
- <value value="6" name="A7XX_PERF_TP_L1_CACHELINE_REQUESTS"/>
- <value value="7" name="A7XX_PERF_TP_L1_CACHELINE_MISSES"/>
- <value value="8" name="A7XX_PERF_TP_SP_TP_TRANS"/>
- <value value="9" name="A7XX_PERF_TP_TP_SP_TRANS"/>
- <value value="10" name="A7XX_PERF_TP_OUTPUT_PIXELS"/>
- <value value="11" name="A7XX_PERF_TP_FILTER_WORKLOAD_16BIT"/>
- <value value="12" name="A7XX_PERF_TP_FILTER_WORKLOAD_32BIT"/>
- <value value="13" name="A7XX_PERF_TP_QUADS_RECEIVED"/>
- <value value="14" name="A7XX_PERF_TP_QUADS_OFFSET"/>
- <value value="15" name="A7XX_PERF_TP_QUADS_SHADOW"/>
- <value value="16" name="A7XX_PERF_TP_QUADS_ARRAY"/>
- <value value="17" name="A7XX_PERF_TP_QUADS_GRADIENT"/>
- <value value="18" name="A7XX_PERF_TP_QUADS_1D"/>
- <value value="19" name="A7XX_PERF_TP_QUADS_2D"/>
- <value value="20" name="A7XX_PERF_TP_QUADS_BUFFER"/>
- <value value="21" name="A7XX_PERF_TP_QUADS_3D"/>
- <value value="22" name="A7XX_PERF_TP_QUADS_CUBE"/>
- <value value="23" name="A7XX_PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
- <value value="24" name="A7XX_PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
- <value value="25" name="A7XX_PERF_TP_OUTPUT_PIXELS_POINT"/>
- <value value="26" name="A7XX_PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
- <value value="27" name="A7XX_PERF_TP_OUTPUT_PIXELS_MIP"/>
- <value value="28" name="A7XX_PERF_TP_OUTPUT_PIXELS_ANISO"/>
- <value value="29" name="A7XX_PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
- <value value="30" name="A7XX_PERF_TP_FLAG_CACHE_REQUESTS"/>
- <value value="31" name="A7XX_PERF_TP_FLAG_CACHE_MISSES"/>
- <value value="32" name="A7XX_PERF_TP_L1_5_L2_REQUESTS"/>
- <value value="33" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS"/>
- <value value="34" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
- <value value="35" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
- <value value="36" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
- <value value="37" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
- <value value="38" name="A7XX_PERF_TP_TPA2TPC_TRANS"/>
- <value value="39" name="A7XX_PERF_TP_L1_MISSES_ASTC_1TILE"/>
- <value value="40" name="A7XX_PERF_TP_L1_MISSES_ASTC_2TILE"/>
- <value value="41" name="A7XX_PERF_TP_L1_MISSES_ASTC_4TILE"/>
- <value value="42" name="A7XX_PERF_TP_L1_5_COMPRESS_REQS"/>
- <value value="43" name="A7XX_PERF_TP_L1_5_L2_COMPRESS_MISS"/>
- <value value="44" name="A7XX_PERF_TP_L1_BANK_CONFLICT"/>
- <value value="45" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
- <value value="46" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
- <value value="47" name="A7XX_PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
- <value value="48" name="A7XX_PERF_TP_FRONTEND_WORKING_CYCLES"/>
- <value value="49" name="A7XX_PERF_TP_L1_TAG_WORKING_CYCLES"/>
- <value value="50" name="A7XX_PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
- <value value="51" name="A7XX_PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
- <value value="52" name="A7XX_PERF_TP_BACKEND_WORKING_CYCLES"/>
- <value value="53" name="A7XX_PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
- <value value="54" name="A7XX_PERF_TP_STARVE_CYCLES_SP"/>
- <value value="55" name="A7XX_PERF_TP_STARVE_CYCLES_UCHE"/>
- <value value="56" name="A7XX_PERF_TP_STALL_CYCLES_UFC"/>
- <value value="57" name="A7XX_PERF_TP_FORMAT_DECOMP"/>
- <value value="58" name="A7XX_PERF_TP_FILTER_POINT_FP16"/>
- <value value="59" name="A7XX_PERF_TP_FILTER_POINT_FP32"/>
- <value value="60" name="A7XX_PERF_TP_LATENCY_FIFO_FULL"/>
- <value value="61" name="A7XX_PERF_TP_RESERVED_61"/>
- <value value="62" name="A7XX_PERF_TP_RESERVED_62"/>
- <value value="63" name="A7XX_PERF_TP_RESERVED_63"/>
- <value value="64" name="A7XX_PERF_TP_RESERVED_64"/>
- <value value="65" name="A7XX_PERF_TP_RESERVED_65"/>
- <value value="66" name="A7XX_PERF_TP_RESERVED_66"/>
- <value value="67" name="A7XX_PERF_TP_RESERVED_67"/>
- <value value="68" name="A7XX_PERF_TP_RESERVED_68"/>
- <value value="69" name="A7XX_PERF_TP_RESERVED_69"/>
- <value value="70" name="A7XX_PERF_TP_RESERVED_70"/>
- <value value="71" name="A7XX_PERF_TP_RESERVED_71"/>
- <value value="72" name="A7XX_PERF_TP_RESERVED_72"/>
- <value value="73" name="A7XX_PERF_TP_RESERVED_73"/>
- <value value="74" name="A7XX_PERF_TP_RESERVED_74"/>
- <value value="75" name="A7XX_PERF_TP_RESERVED_75"/>
- <value value="76" name="A7XX_PERF_TP_RESERVED_76"/>
- <value value="77" name="A7XX_PERF_TP_RESERVED_77"/>
- <value value="78" name="A7XX_PERF_TP_RESERVED_78"/>
- <value value="79" name="A7XX_PERF_TP_RESERVED_79"/>
- <value value="80" name="A7XX_PERF_TP_RESERVED_80"/>
- <value value="81" name="A7XX_PERF_TP_RESERVED_81"/>
- <value value="82" name="A7XX_PERF_TP_RESERVED_82"/>
- <value value="83" name="A7XX_PERF_TP_RESERVED_83"/>
- <value value="84" name="A7XX_PERF_TP_RESERVED_84"/>
- <value value="85" name="A7XX_PERF_TP_RESERVED_85"/>
- <value value="86" name="A7XX_PERF_TP_RESERVED_86"/>
- <value value="87" name="A7XX_PERF_TP_RESERVED_87"/>
- <value value="88" name="A7XX_PERF_TP_RESERVED_88"/>
- <value value="89" name="A7XX_PERF_TP_RESERVED_89"/>
- <value value="90" name="A7XX_PERF_TP_RESERVED_90"/>
- <value value="91" name="A7XX_PERF_TP_RESERVED_91"/>
- <value value="92" name="A7XX_PERF_TP_RESERVED_92"/>
- <value value="93" name="A7XX_PERF_TP_RESERVED_93"/>
- <value value="94" name="A7XX_PERF_TP_RESERVED_94"/>
- <value value="95" name="A7XX_PERF_TP_RESERVED_95"/>
- <value value="96" name="A7XX_PERF_TP_RESERVED_96"/>
- <value value="97" name="A7XX_PERF_TP_RESERVED_97"/>
- <value value="98" name="A7XX_PERF_TP_RESERVED_98"/>
- <value value="99" name="A7XX_PERF_TP_RESERVED_99"/>
- <value value="100" name="A7XX_PERF_TP_RESERVED_100"/>
- <value value="101" name="A7XX_PERF_TP_RESERVED_101"/>
- <value value="102" name="A7XX_PERF_TP_RESERVED_102"/>
- <value value="103" name="A7XX_PERF_TP_RESERVED_103"/>
- <value value="104" name="A7XX_PERF_TP_RESERVED_104"/>
- <value value="105" name="A7XX_PERF_TP_RESERVED_105"/>
- <value value="106" name="A7XX_PERF_TP_RESERVED_106"/>
- <value value="107" name="A7XX_PERF_TP_RESERVED_107"/>
- <value value="108" name="A7XX_PERF_TP_RESERVED_108"/>
- <value value="109" name="A7XX_PERF_TP_RESERVED_109"/>
- <value value="110" name="A7XX_PERF_TP_RESERVED_110"/>
- <value value="111" name="A7XX_PERF_TP_RESERVED_111"/>
- <value value="112" name="A7XX_PERF_TP_RESERVED_112"/>
- <value value="113" name="A7XX_PERF_TP_RESERVED_113"/>
- <value value="114" name="A7XX_PERF_TP_RESERVED_114"/>
- <value value="115" name="A7XX_PERF_TP_RESERVED_115"/>
- <value value="116" name="A7XX_PERF_TP_RESERVED_116"/>
- <value value="117" name="A7XX_PERF_TP_RESERVED_117"/>
- <value value="118" name="A7XX_PERF_TP_RESERVED_118"/>
- <value value="119" name="A7XX_PERF_TP_RESERVED_119"/>
- <value value="120" name="A7XX_PERF_TP_RESERVED_120"/>
- <value value="121" name="A7XX_PERF_TP_RESERVED_121"/>
- <value value="122" name="A7XX_PERF_TP_RESERVED_122"/>
- <value value="123" name="A7XX_PERF_TP_RESERVED_123"/>
- <value value="124" name="A7XX_PERF_TP_RESERVED_124"/>
- <value value="125" name="A7XX_PERF_TP_RESERVED_125"/>
- <value value="126" name="A7XX_PERF_TP_RESERVED_126"/>
- <value value="127" name="A7XX_PERF_TP_RESERVED_127"/>
- <value value="128" name="A7XX_PERF_TP_FORMAT_DECOMP_BILINEAR"/>
- <value value="129" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP16"/>
- <value value="130" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP16"/>
- <value value="131" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP32"/>
- <value value="132" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP32"/>
-</enum>
-
-<enum name="a7xx_sp_perfcounter_select">
- <value value="0" name="A7XX_PERF_SP_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_SP_ALU_WORKING_CYCLES"/>
- <value value="2" name="A7XX_PERF_SP_EFU_WORKING_CYCLES"/>
- <value value="3" name="A7XX_PERF_SP_STALL_CYCLES_VPC"/>
- <value value="4" name="A7XX_PERF_SP_STALL_CYCLES_TP"/>
- <value value="5" name="A7XX_PERF_SP_STALL_CYCLES_UCHE"/>
- <value value="6" name="A7XX_PERF_SP_STALL_CYCLES_RB"/>
- <value value="7" name="A7XX_PERF_SP_NON_EXECUTION_CYCLES"/>
- <value value="8" name="A7XX_PERF_SP_WAVE_CONTEXTS"/>
- <value value="9" name="A7XX_PERF_SP_WAVE_CONTEXT_CYCLES"/>
- <value value="10" name="A7XX_PERF_SP_STAGE_WAVE_CYCLES"/>
- <value value="11" name="A7XX_PERF_SP_STAGE_WAVE_SAMPLES"/>
- <value value="12" name="A7XX_PERF_SP_VS_STAGE_WAVE_CYCLES"/>
- <value value="13" name="A7XX_PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
- <value value="14" name="A7XX_PERF_SP_FS_STAGE_DURATION_CYCLES"/>
- <value value="15" name="A7XX_PERF_SP_VS_STAGE_DURATION_CYCLES"/>
- <value value="16" name="A7XX_PERF_SP_WAVE_CTRL_CYCLES"/>
- <value value="17" name="A7XX_PERF_SP_WAVE_LOAD_CYCLES"/>
- <value value="18" name="A7XX_PERF_SP_WAVE_EMIT_CYCLES"/>
- <value value="19" name="A7XX_PERF_SP_WAVE_NOP_CYCLES"/>
- <value value="20" name="A7XX_PERF_SP_WAVE_WAIT_CYCLES"/>
- <value value="21" name="A7XX_PERF_SP_WAVE_FETCH_CYCLES"/>
- <value value="22" name="A7XX_PERF_SP_WAVE_IDLE_CYCLES"/>
- <value value="23" name="A7XX_PERF_SP_WAVE_END_CYCLES"/>
- <value value="24" name="A7XX_PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
- <value value="25" name="A7XX_PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
- <value value="26" name="A7XX_PERF_SP_WAVE_JOIN_CYCLES"/>
- <value value="27" name="A7XX_PERF_SP_LM_LOAD_INSTRUCTIONS"/>
- <value value="28" name="A7XX_PERF_SP_LM_STORE_INSTRUCTIONS"/>
- <value value="29" name="A7XX_PERF_SP_LM_ATOMICS"/>
- <value value="30" name="A7XX_PERF_SP_GM_LOAD_INSTRUCTIONS"/>
- <value value="31" name="A7XX_PERF_SP_GM_STORE_INSTRUCTIONS"/>
- <value value="32" name="A7XX_PERF_SP_GM_ATOMICS"/>
- <value value="33" name="A7XX_PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
- <value value="34" name="A7XX_PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
- <value value="35" name="A7XX_PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
- <value value="36" name="A7XX_PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
- <value value="37" name="A7XX_PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
- <value value="38" name="A7XX_PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
- <value value="39" name="A7XX_PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
- <value value="40" name="A7XX_PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
- <value value="41" name="A7XX_PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
- <value value="42" name="A7XX_PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
- <value value="43" name="A7XX_PERF_SP_VS_INSTRUCTIONS"/>
- <value value="44" name="A7XX_PERF_SP_FS_INSTRUCTIONS"/>
- <value value="45" name="A7XX_PERF_SP_ADDR_LOCK_COUNT"/>
- <value value="46" name="A7XX_PERF_SP_UCHE_READ_TRANS"/>
- <value value="47" name="A7XX_PERF_SP_UCHE_WRITE_TRANS"/>
- <value value="48" name="A7XX_PERF_SP_EXPORT_VPC_TRANS"/>
- <value value="49" name="A7XX_PERF_SP_EXPORT_RB_TRANS"/>
- <value value="50" name="A7XX_PERF_SP_PIXELS_KILLED"/>
- <value value="51" name="A7XX_PERF_SP_ICL1_REQUESTS"/>
- <value value="52" name="A7XX_PERF_SP_ICL1_MISSES"/>
- <value value="53" name="A7XX_PERF_SP_HS_INSTRUCTIONS"/>
- <value value="54" name="A7XX_PERF_SP_DS_INSTRUCTIONS"/>
- <value value="55" name="A7XX_PERF_SP_GS_INSTRUCTIONS"/>
- <value value="56" name="A7XX_PERF_SP_CS_INSTRUCTIONS"/>
- <value value="57" name="A7XX_PERF_SP_GPR_READ"/>
- <value value="58" name="A7XX_PERF_SP_GPR_WRITE"/>
- <value value="59" name="A7XX_PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
- <value value="60" name="A7XX_PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
- <value value="61" name="A7XX_PERF_SP_LM_BANK_CONFLICTS"/>
- <value value="62" name="A7XX_PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
- <value value="63" name="A7XX_PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
- <value value="64" name="A7XX_PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
- <value value="65" name="A7XX_PERF_SP_LM_WORKING_CYCLES"/>
- <value value="66" name="A7XX_PERF_SP_DISPATCHER_WORKING_CYCLES"/>
- <value value="67" name="A7XX_PERF_SP_SEQUENCER_WORKING_CYCLES"/>
- <value value="68" name="A7XX_PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
- <value value="69" name="A7XX_PERF_SP_STARVE_CYCLES_HLSQ"/>
- <value value="70" name="A7XX_PERF_SP_NON_EXECUTION_LS_CYCLES"/>
- <value value="71" name="A7XX_PERF_SP_WORKING_EU"/>
- <value value="72" name="A7XX_PERF_SP_ANY_EU_WORKING"/>
- <value value="73" name="A7XX_PERF_SP_WORKING_EU_FS_STAGE"/>
- <value value="74" name="A7XX_PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
- <value value="75" name="A7XX_PERF_SP_WORKING_EU_VS_STAGE"/>
- <value value="76" name="A7XX_PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
- <value value="77" name="A7XX_PERF_SP_WORKING_EU_CS_STAGE"/>
- <value value="78" name="A7XX_PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
- <value value="79" name="A7XX_PERF_SP_GPR_READ_PREFETCH"/>
- <value value="80" name="A7XX_PERF_SP_GPR_READ_CONFLICT"/>
- <value value="81" name="A7XX_PERF_SP_GPR_WRITE_CONFLICT"/>
- <value value="82" name="A7XX_PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
- <value value="83" name="A7XX_PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
- <value value="84" name="A7XX_PERF_SP_EXECUTABLE_WAVES"/>
- <value value="85" name="A7XX_PERF_SP_ICL1_MISS_FETCH_CYCLES"/>
- <value value="86" name="A7XX_PERF_SP_WORKING_EU_LPAC"/>
- <value value="87" name="A7XX_PERF_SP_BYPASS_BUSY_CYCLES"/>
- <value value="88" name="A7XX_PERF_SP_ANY_EU_WORKING_LPAC"/>
- <value value="89" name="A7XX_PERF_SP_WAVE_ALU_CYCLES"/>
- <value value="90" name="A7XX_PERF_SP_WAVE_EFU_CYCLES"/>
- <value value="91" name="A7XX_PERF_SP_WAVE_INT_CYCLES"/>
- <value value="92" name="A7XX_PERF_SP_WAVE_CSP_CYCLES"/>
- <value value="93" name="A7XX_PERF_SP_EWAVE_CONTEXTS"/>
- <value value="94" name="A7XX_PERF_SP_EWAVE_CONTEXT_CYCLES"/>
- <value value="95" name="A7XX_PERF_SP_LPAC_BUSY_CYCLES"/>
- <value value="96" name="A7XX_PERF_SP_LPAC_INSTRUCTIONS"/>
- <value value="97" name="A7XX_PERF_SP_FS_STAGE_1X_WAVES"/>
- <value value="98" name="A7XX_PERF_SP_FS_STAGE_2X_WAVES"/>
- <value value="99" name="A7XX_PERF_SP_QUADS"/>
- <value value="100" name="A7XX_PERF_SP_CS_INVOCATIONS"/>
- <value value="101" name="A7XX_PERF_SP_PIXELS"/>
- <value value="102" name="A7XX_PERF_SP_LPAC_DRAWCALLS"/>
- <value value="103" name="A7XX_PERF_SP_PI_WORKING_CYCLES"/>
- <value value="104" name="A7XX_PERF_SP_WAVE_INPUT_CYCLES"/>
- <value value="105" name="A7XX_PERF_SP_WAVE_OUTPUT_CYCLES"/>
- <value value="106" name="A7XX_PERF_SP_WAVE_HWAVE_WAIT_CYCLES"/>
- <value value="107" name="A7XX_PERF_SP_WAVE_HWAVE_SYNC"/>
- <value value="108" name="A7XX_PERF_SP_OUTPUT_3D_PIXELS"/>
- <value value="109" name="A7XX_PERF_SP_FULL_ALU_MAD_INSTRUCTIONS"/>
- <value value="110" name="A7XX_PERF_SP_HALF_ALU_MAD_INSTRUCTIONS"/>
- <value value="111" name="A7XX_PERF_SP_FULL_ALU_MUL_INSTRUCTIONS"/>
- <value value="112" name="A7XX_PERF_SP_HALF_ALU_MUL_INSTRUCTIONS"/>
- <value value="113" name="A7XX_PERF_SP_FULL_ALU_ADD_INSTRUCTIONS"/>
- <value value="114" name="A7XX_PERF_SP_HALF_ALU_ADD_INSTRUCTIONS"/>
- <value value="115" name="A7XX_PERF_SP_BARY_FP32_INSTRUCTIONS"/>
- <value value="116" name="A7XX_PERF_SP_ALU_GPR_READ_CYCLES"/>
- <value value="117" name="A7XX_PERF_SP_ALU_DATA_FORWARDING_CYCLES"/>
- <value value="118" name="A7XX_PERF_SP_LM_FULL_CYCLES"/>
- <value value="119" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_CYCLES"/>
- <value value="120" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_SAMPLES"/>
- <value value="121" name="A7XX_PERF_SP_FS_STAGE_PI_TEX_INSTRUCTION"/>
- <value value="122" name="A7XX_PERF_SP_RAY_QUERY_INSTRUCTIONS"/>
- <value value="123" name="A7XX_PERF_SP_RBRT_KICKOFF_FIBERS"/>
- <value value="124" name="A7XX_PERF_SP_RBRT_KICKOFF_DQUADS"/>
- <value value="125" name="A7XX_PERF_SP_RTU_BUSY_CYCLES"/>
- <value value="126" name="A7XX_PERF_SP_RTU_L0_HITS"/>
- <value value="127" name="A7XX_PERF_SP_RTU_L0_MISSES"/>
- <value value="128" name="A7XX_PERF_SP_RTU_L0_HIT_ON_MISS"/>
- <value value="129" name="A7XX_PERF_SP_RTU_STALL_CYCLES_WAVE_QUEUE"/>
- <value value="130" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_HIT_QUEUE"/>
- <value value="131" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_MISS_QUEUE"/>
- <value value="132" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0D_IDX_QUEUE"/>
- <value value="133" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0DATA"/>
- <value value="134" name="A7XX_PERF_SP_RTU_STALL_CYCLES_REPLACE_CNT"/>
- <value value="135" name="A7XX_PERF_SP_RTU_STALL_CYCLES_MRG_CNT"/>
- <value value="136" name="A7XX_PERF_SP_RTU_STALL_CYCLES_UCHE"/>
- <value value="137" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_L0"/>
- <value value="138" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_INS_FIFO"/>
- <value value="139" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_CYCLES"/>
- <value value="140" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_SAMPLES"/>
- <value value="141" name="A7XX_PERF_SP_STCHE_MISS_INC_VS"/>
- <value value="142" name="A7XX_PERF_SP_STCHE_MISS_INC_FS"/>
- <value value="143" name="A7XX_PERF_SP_STCHE_MISS_INC_BV"/>
- <value value="144" name="A7XX_PERF_SP_STCHE_MISS_INC_LPAC"/>
- <value value="145" name="A7XX_PERF_SP_VGPR_ACTIVE_CONTEXTS"/>
- <value value="146" name="A7XX_PERF_SP_PGPR_ALLOC_CONTEXTS"/>
- <value value="147" name="A7XX_PERF_SP_VGPR_ALLOC_CONTEXTS"/>
- <value value="148" name="A7XX_PERF_SP_RTU_RAY_BOX_INTERSECTIONS"/>
- <value value="149" name="A7XX_PERF_SP_RTU_RAY_TRIANGLE_INTERSECTIONS"/>
- <value value="150" name="A7XX_PERF_SP_SCH_STALL_CYCLES_RTU"/>
-</enum>
-
-<enum name="a7xx_rb_perfcounter_select">
- <value value="0" name="A7XX_PERF_RB_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_RB_STALL_CYCLES_HLSQ"/>
- <value value="2" name="A7XX_PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
- <value value="3" name="A7XX_PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
- <value value="4" name="A7XX_PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
- <value value="5" name="A7XX_PERF_RB_STARVE_CYCLES_SP"/>
- <value value="6" name="A7XX_PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
- <value value="7" name="A7XX_PERF_RB_STARVE_CYCLES_CCU"/>
- <value value="8" name="A7XX_PERF_RB_STARVE_CYCLES_Z_PLANE"/>
- <value value="9" name="A7XX_PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
- <value value="10" name="A7XX_PERF_RB_Z_WORKLOAD"/>
- <value value="11" name="A7XX_PERF_RB_HLSQ_ACTIVE"/>
- <value value="12" name="A7XX_PERF_RB_Z_READ"/>
- <value value="13" name="A7XX_PERF_RB_Z_WRITE"/>
- <value value="14" name="A7XX_PERF_RB_C_READ"/>
- <value value="15" name="A7XX_PERF_RB_C_WRITE"/>
- <value value="16" name="A7XX_PERF_RB_TOTAL_PASS"/>
- <value value="17" name="A7XX_PERF_RB_Z_PASS"/>
- <value value="18" name="A7XX_PERF_RB_Z_FAIL"/>
- <value value="19" name="A7XX_PERF_RB_S_FAIL"/>
- <value value="20" name="A7XX_PERF_RB_BLENDED_FXP_COMPONENTS"/>
- <value value="21" name="A7XX_PERF_RB_BLENDED_FP16_COMPONENTS"/>
- <value value="22" name="A7XX_PERF_RB_PS_INVOCATIONS"/>
- <value value="23" name="A7XX_PERF_RB_2D_ALIVE_CYCLES"/>
- <value value="24" name="A7XX_PERF_RB_2D_STALL_CYCLES_A2D"/>
- <value value="25" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SRC"/>
- <value value="26" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SP"/>
- <value value="27" name="A7XX_PERF_RB_2D_STARVE_CYCLES_DST"/>
- <value value="28" name="A7XX_PERF_RB_2D_VALID_PIXELS"/>
- <value value="29" name="A7XX_PERF_RB_3D_PIXELS"/>
- <value value="30" name="A7XX_PERF_RB_BLENDER_WORKING_CYCLES"/>
- <value value="31" name="A7XX_PERF_RB_ZPROC_WORKING_CYCLES"/>
- <value value="32" name="A7XX_PERF_RB_CPROC_WORKING_CYCLES"/>
- <value value="33" name="A7XX_PERF_RB_SAMPLER_WORKING_CYCLES"/>
- <value value="34" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
- <value value="35" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
- <value value="36" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
- <value value="37" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
- <value value="38" name="A7XX_PERF_RB_STALL_CYCLES_VPC"/>
- <value value="39" name="A7XX_PERF_RB_2D_INPUT_TRANS"/>
- <value value="40" name="A7XX_PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
- <value value="41" name="A7XX_PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
- <value value="42" name="A7XX_PERF_RB_BLENDED_FP32_COMPONENTS"/>
- <value value="43" name="A7XX_PERF_RB_COLOR_PIX_TILES"/>
- <value value="44" name="A7XX_PERF_RB_STALL_CYCLES_CCU"/>
- <value value="45" name="A7XX_PERF_RB_EARLY_Z_ARB3_GRANT"/>
- <value value="46" name="A7XX_PERF_RB_LATE_Z_ARB3_GRANT"/>
- <value value="47" name="A7XX_PERF_RB_EARLY_Z_SKIP_GRANT"/>
- <value value="48" name="A7XX_PERF_RB_VRS_1x1_QUADS"/>
- <value value="49" name="A7XX_PERF_RB_VRS_2x1_QUADS"/>
- <value value="50" name="A7XX_PERF_RB_VRS_1x2_QUADS"/>
- <value value="51" name="A7XX_PERF_RB_VRS_2x2_QUADS"/>
- <value value="52" name="A7XX_PERF_RB_VRS_4x2_QUADS"/>
- <value value="53" name="A7XX_PERF_RB_VRS_4x4_QUADS"/>
-</enum>
-
-<enum name="a7xx_vsc_perfcounter_select">
- <value value="0" name="A7XX_PERF_VSC_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_VSC_WORKING_CYCLES"/>
- <value value="2" name="A7XX_PERF_VSC_STALL_CYCLES_UCHE"/>
- <value value="3" name="A7XX_PERF_VSC_EOT_NUM"/>
- <value value="4" name="A7XX_PERF_VSC_INPUT_TILES"/>
-</enum>
-
-<enum name="a7xx_ccu_perfcounter_select">
- <value value="0" name="A7XX_PERF_CCU_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
- <value value="2" name="A7XX_PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
- <value value="3" name="A7XX_PERF_CCU_DEPTH_BLOCKS"/>
- <value value="4" name="A7XX_PERF_CCU_COLOR_BLOCKS"/>
- <value value="5" name="A7XX_PERF_CCU_DEPTH_BLOCK_HIT"/>
- <value value="6" name="A7XX_PERF_CCU_COLOR_BLOCK_HIT"/>
- <value value="7" name="A7XX_PERF_CCU_PARTIAL_BLOCK_READ"/>
- <value value="8" name="A7XX_PERF_CCU_GMEM_READ"/>
- <value value="9" name="A7XX_PERF_CCU_GMEM_WRITE"/>
- <value value="10" name="A7XX_PERF_CCU_2D_RD_REQ"/>
- <value value="11" name="A7XX_PERF_CCU_2D_WR_REQ"/>
- <value value="12" name="A7XX_PERF_CCU_UBWC_COLOR_BLOCKS_CONCURRENT"/>
- <value value="13" name="A7XX_PERF_CCU_UBWC_DEPTH_BLOCKS_CONCURRENT"/>
- <value value="14" name="A7XX_PERF_CCU_COLOR_RESOLVE_DROPPED"/>
- <value value="15" name="A7XX_PERF_CCU_DEPTH_RESOLVE_DROPPED"/>
- <value value="16" name="A7XX_PERF_CCU_COLOR_RENDER_CONCURRENT"/>
- <value value="17" name="A7XX_PERF_CCU_DEPTH_RENDER_CONCURRENT"/>
- <value value="18" name="A7XX_PERF_CCU_COLOR_RESOLVE_AFTER_RENDER"/>
- <value value="19" name="A7XX_PERF_CCU_DEPTH_RESOLVE_AFTER_RENDER"/>
- <value value="20" name="A7XX_PERF_CCU_GMEM_EXTRA_DEPTH_READ"/>
- <value value="21" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA"/>
- <value value="22" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA_FULL"/>
-</enum>
-
-<enum name="a7xx_lrz_perfcounter_select">
- <value value="0" name="A7XX_PERF_LRZ_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_LRZ_STARVE_CYCLES_RAS"/>
- <value value="2" name="A7XX_PERF_LRZ_STALL_CYCLES_RB"/>
- <value value="3" name="A7XX_PERF_LRZ_STALL_CYCLES_VSC"/>
- <value value="4" name="A7XX_PERF_LRZ_STALL_CYCLES_VPC"/>
- <value value="5" name="A7XX_PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
- <value value="6" name="A7XX_PERF_LRZ_STALL_CYCLES_UCHE"/>
- <value value="7" name="A7XX_PERF_LRZ_LRZ_READ"/>
- <value value="8" name="A7XX_PERF_LRZ_LRZ_WRITE"/>
- <value value="9" name="A7XX_PERF_LRZ_READ_LATENCY"/>
- <value value="10" name="A7XX_PERF_LRZ_MERGE_CACHE_UPDATING"/>
- <value value="11" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
- <value value="12" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
- <value value="13" name="A7XX_PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
- <value value="14" name="A7XX_PERF_LRZ_FULL_8X8_TILES"/>
- <value value="15" name="A7XX_PERF_LRZ_PARTIAL_8X8_TILES"/>
- <value value="16" name="A7XX_PERF_LRZ_TILE_KILLED"/>
- <value value="17" name="A7XX_PERF_LRZ_TOTAL_PIXEL"/>
- <value value="18" name="A7XX_PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
- <value value="19" name="A7XX_PERF_LRZ_FEEDBACK_ACCEPT"/>
- <value value="20" name="A7XX_PERF_LRZ_FEEDBACK_DISCARD"/>
- <value value="21" name="A7XX_PERF_LRZ_FEEDBACK_STALL"/>
- <value value="22" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
- <value value="23" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
- <value value="24" name="A7XX_PERF_LRZ_RAS_MASK_TRANS"/>
- <value value="25" name="A7XX_PERF_LRZ_STALL_CYCLES_MVC"/>
- <value value="26" name="A7XX_PERF_LRZ_TILE_KILLED_BY_IMAGE_VRS"/>
- <value value="27" name="A7XX_PERF_LRZ_TILE_KILLED_BY_Z"/>
-</enum>
-
-<enum name="a7xx_cmp_perfcounter_select">
- <value value="0" name="A7XX_PERF_CMPDECMP_STALL_CYCLES_ARB"/>
- <value value="1" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
- <value value="2" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
- <value value="3" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
- <value value="4" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
- <value value="5" name="A7XX_PERF_CMPDECMP_VBIF_READ_REQUEST"/>
- <value value="6" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
- <value value="7" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA"/>
- <value value="8" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA"/>
- <value value="9" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
- <value value="10" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
- <value value="11" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
- <value value="12" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
- <value value="13" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
- <value value="14" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
- <value value="15" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
- <value value="16" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
- <value value="17" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
- <value value="18" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
- <value value="19" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
- <value value="20" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
- <value value="21" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
- <value value="22" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
- <value value="23" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
- <value value="24" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
- <value value="25" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
- <value value="26" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
- <value value="27" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
- <value value="28" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
- <value value="29" name="A7XX_PERF_CMPDECMP_RESOLVE_EVENTS"/>
- <value value="30" name="A7XX_PERF_CMPDECMP_CONCURRENT_RESOLVE_EVENTS"/>
- <value value="31" name="A7XX_PERF_CMPDECMP_DROPPED_CLEAR_EVENTS"/>
- <value value="32" name="A7XX_PERF_CMPDECMP_ST_BLOCKS_CONCURRENT"/>
- <value value="33" name="A7XX_PERF_CMPDECMP_LRZ_ST_BLOCKS_CONCURRENT"/>
- <value value="34" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG0_COUNT"/>
- <value value="35" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG1_COUNT"/>
- <value value="36" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG2_COUNT"/>
- <value value="37" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG3_COUNT"/>
- <value value="38" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG4_COUNT"/>
- <value value="39" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG5_COUNT"/>
- <value value="40" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG6_COUNT"/>
- <value value="41" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG8_COUNT"/>
- <value value="42" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG0_COUNT"/>
- <value value="43" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG1_COUNT"/>
- <value value="44" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG2_COUNT"/>
- <value value="45" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG3_COUNT"/>
- <value value="46" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG4_COUNT"/>
- <value value="47" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG5_COUNT"/>
- <value value="48" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG6_COUNT"/>
- <value value="49" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG8_COUNT"/>
-</enum>
-
-<enum name="a7xx_gbif_perfcounter_select">
- <value value="0" name="A7XX_PERF_GBIF_RESERVED_0"/>
- <value value="1" name="A7XX_PERF_GBIF_RESERVED_1"/>
- <value value="2" name="A7XX_PERF_GBIF_RESERVED_2"/>
- <value value="3" name="A7XX_PERF_GBIF_RESERVED_3"/>
- <value value="4" name="A7XX_PERF_GBIF_RESERVED_4"/>
- <value value="5" name="A7XX_PERF_GBIF_RESERVED_5"/>
- <value value="6" name="A7XX_PERF_GBIF_RESERVED_6"/>
- <value value="7" name="A7XX_PERF_GBIF_RESERVED_7"/>
- <value value="8" name="A7XX_PERF_GBIF_RESERVED_8"/>
- <value value="9" name="A7XX_PERF_GBIF_RESERVED_9"/>
- <value value="10" name="A7XX_PERF_GBIF_AXI0_READ_REQUESTS_TOTAL"/>
- <value value="11" name="A7XX_PERF_GBIF_AXI1_READ_REQUESTS_TOTAL"/>
- <value value="12" name="A7XX_PERF_GBIF_RESERVED_12"/>
- <value value="13" name="A7XX_PERF_GBIF_RESERVED_13"/>
- <value value="14" name="A7XX_PERF_GBIF_RESERVED_14"/>
- <value value="15" name="A7XX_PERF_GBIF_RESERVED_15"/>
- <value value="16" name="A7XX_PERF_GBIF_RESERVED_16"/>
- <value value="17" name="A7XX_PERF_GBIF_RESERVED_17"/>
- <value value="18" name="A7XX_PERF_GBIF_RESERVED_18"/>
- <value value="19" name="A7XX_PERF_GBIF_RESERVED_19"/>
- <value value="20" name="A7XX_PERF_GBIF_RESERVED_20"/>
- <value value="21" name="A7XX_PERF_GBIF_RESERVED_21"/>
- <value value="22" name="A7XX_PERF_GBIF_AXI0_WRITE_REQUESTS_TOTAL"/>
- <value value="23" name="A7XX_PERF_GBIF_AXI1_WRITE_REQUESTS_TOTAL"/>
- <value value="24" name="A7XX_PERF_GBIF_RESERVED_24"/>
- <value value="25" name="A7XX_PERF_GBIF_RESERVED_25"/>
- <value value="26" name="A7XX_PERF_GBIF_RESERVED_26"/>
- <value value="27" name="A7XX_PERF_GBIF_RESERVED_27"/>
- <value value="28" name="A7XX_PERF_GBIF_RESERVED_28"/>
- <value value="29" name="A7XX_PERF_GBIF_RESERVED_29"/>
- <value value="30" name="A7XX_PERF_GBIF_RESERVED_30"/>
- <value value="31" name="A7XX_PERF_GBIF_RESERVED_31"/>
- <value value="32" name="A7XX_PERF_GBIF_RESERVED_32"/>
- <value value="33" name="A7XX_PERF_GBIF_RESERVED_33"/>
- <value value="34" name="A7XX_PERF_GBIF_AXI0_READ_DATA_BEATS_TOTAL"/>
- <value value="35" name="A7XX_PERF_GBIF_AXI1_READ_DATA_BEATS_TOTAL"/>
- <value value="36" name="A7XX_PERF_GBIF_RESERVED_36"/>
- <value value="37" name="A7XX_PERF_GBIF_RESERVED_37"/>
- <value value="38" name="A7XX_PERF_GBIF_RESERVED_38"/>
- <value value="39" name="A7XX_PERF_GBIF_RESERVED_39"/>
- <value value="40" name="A7XX_PERF_GBIF_RESERVED_40"/>
- <value value="41" name="A7XX_PERF_GBIF_RESERVED_41"/>
- <value value="42" name="A7XX_PERF_GBIF_RESERVED_42"/>
- <value value="43" name="A7XX_PERF_GBIF_RESERVED_43"/>
- <value value="44" name="A7XX_PERF_GBIF_RESERVED_44"/>
- <value value="45" name="A7XX_PERF_GBIF_RESERVED_45"/>
- <value value="46" name="A7XX_PERF_GBIF_AXI0_WRITE_DATA_BEATS_TOTAL"/>
- <value value="47" name="A7XX_PERF_GBIF_AXI1_WRITE_DATA_BEATS_TOTAL"/>
- <value value="48" name="A7XX_PERF_GBIF_RESERVED_48"/>
- <value value="49" name="A7XX_PERF_GBIF_RESERVED_49"/>
- <value value="50" name="A7XX_PERF_GBIF_RESERVED_50"/>
- <value value="51" name="A7XX_PERF_GBIF_RESERVED_51"/>
- <value value="52" name="A7XX_PERF_GBIF_RESERVED_52"/>
- <value value="53" name="A7XX_PERF_GBIF_RESERVED_53"/>
- <value value="54" name="A7XX_PERF_GBIF_RESERVED_54"/>
- <value value="55" name="A7XX_PERF_GBIF_RESERVED_55"/>
- <value value="56" name="A7XX_PERF_GBIF_RESERVED_56"/>
- <value value="57" name="A7XX_PERF_GBIF_RESERVED_57"/>
- <value value="58" name="A7XX_PERF_GBIF_RESERVED_58"/>
- <value value="59" name="A7XX_PERF_GBIF_RESERVED_59"/>
- <value value="60" name="A7XX_PERF_GBIF_RESERVED_60"/>
- <value value="61" name="A7XX_PERF_GBIF_RESERVED_61"/>
- <value value="62" name="A7XX_PERF_GBIF_RESERVED_62"/>
- <value value="63" name="A7XX_PERF_GBIF_RESERVED_63"/>
- <value value="64" name="A7XX_PERF_GBIF_RESERVED_64"/>
- <value value="65" name="A7XX_PERF_GBIF_RESERVED_65"/>
- <value value="66" name="A7XX_PERF_GBIF_RESERVED_66"/>
- <value value="67" name="A7XX_PERF_GBIF_RESERVED_67"/>
- <value value="68" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_RD_ALL"/>
- <value value="69" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_RD_ALL"/>
- <value value="70" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_WR_ALL"/>
- <value value="71" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_WR_ALL"/>
- <value value="72" name="A7XX_PERF_GBIF_AXI_CH0_REQUEST_HELD_OFF"/>
- <value value="73" name="A7XX_PERF_GBIF_AXI_CH1_REQUEST_HELD_OFF"/>
- <value value="74" name="A7XX_PERF_GBIF_AXI_REQUEST_HELD_OFF"/>
- <value value="75" name="A7XX_PERF_GBIF_AXI_CH0_WRITE_DATA_HELD_OFF"/>
- <value value="76" name="A7XX_PERF_GBIF_AXI_CH1_WRITE_DATA_HELD_OFF"/>
- <value value="77" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_DATA_HELD_OFF"/>
- <value value="78" name="A7XX_PERF_GBIF_AXI_ALL_READ_BEATS"/>
- <value value="79" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_BEATS"/>
- <value value="80" name="A7XX_PERF_GBIF_AXI_ALL_BEATS"/>
-</enum>
-
-<enum name="a7xx_ufc_perfcounter_select">
- <value value="0" name="A7XX_PERF_UFC_BUSY_CYCLES"/>
- <value value="1" name="A7XX_PERF_UFC_READ_DATA_VBIF"/>
- <value value="2" name="A7XX_PERF_UFC_WRITE_DATA_VBIF"/>
- <value value="3" name="A7XX_PERF_UFC_READ_REQUEST_VBIF"/>
- <value value="4" name="A7XX_PERF_UFC_WRITE_REQUEST_VBIF"/>
- <value value="5" name="A7XX_PERF_UFC_LRZ_FILTER_HIT"/>
- <value value="6" name="A7XX_PERF_UFC_LRZ_FILTER_MISS"/>
- <value value="7" name="A7XX_PERF_UFC_CRE_FILTER_HIT"/>
- <value value="8" name="A7XX_PERF_UFC_CRE_FILTER_MISS"/>
- <value value="9" name="A7XX_PERF_UFC_SP_FILTER_HIT"/>
- <value value="10" name="A7XX_PERF_UFC_SP_FILTER_MISS"/>
- <value value="11" name="A7XX_PERF_UFC_SP_REQUESTS"/>
- <value value="12" name="A7XX_PERF_UFC_TP_FILTER_HIT"/>
- <value value="13" name="A7XX_PERF_UFC_TP_FILTER_MISS"/>
- <value value="14" name="A7XX_PERF_UFC_TP_REQUESTS"/>
- <value value="15" name="A7XX_PERF_UFC_MAIN_HIT_LRZ_PREFETCH"/>
- <value value="16" name="A7XX_PERF_UFC_MAIN_HIT_CRE_PREFETCH"/>
- <value value="17" name="A7XX_PERF_UFC_MAIN_HIT_SP_PREFETCH"/>
- <value value="18" name="A7XX_PERF_UFC_MAIN_HIT_TP_PREFETCH"/>
- <value value="19" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_READ"/>
- <value value="20" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_WRITE"/>
- <value value="21" name="A7XX_PERF_UFC_MAIN_MISS_LRZ_PREFETCH"/>
- <value value="22" name="A7XX_PERF_UFC_MAIN_MISS_CRE_PREFETCH"/>
- <value value="23" name="A7XX_PERF_UFC_MAIN_MISS_SP_PREFETCH"/>
- <value value="24" name="A7XX_PERF_UFC_MAIN_MISS_TP_PREFETCH"/>
- <value value="25" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_READ"/>
- <value value="26" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_WRITE"/>
- <value value="27" name="A7XX_PERF_UFC_UBWC_READ_UFC_TRANS"/>
- <value value="28" name="A7XX_PERF_UFC_UBWC_WRITE_UFC_TRANS"/>
- <value value="29" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_CMD"/>
- <value value="30" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_RDATA"/>
- <value value="31" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_WDATA"/>
- <value value="32" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_WR_FLAG"/>
- <value value="33" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_FLAG_RTN"/>
- <value value="34" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_EVENT"/>
- <value value="35" name="A7XX_PERF_UFC_LRZ_PREFETCH_STALLED_CYCLES"/>
- <value value="36" name="A7XX_PERF_UFC_CRE_PREFETCH_STALLED_CYCLES"/>
- <value value="37" name="A7XX_PERF_UFC_SPTP_PREFETCH_STALLED_CYCLES"/>
- <value value="38" name="A7XX_PERF_UFC_UBWC_RD_STALLED_CYCLES"/>
- <value value="39" name="A7XX_PERF_UFC_UBWC_WR_STALLED_CYCLES"/>
- <value value="40" name="A7XX_PERF_UFC_PREFETCH_STALLED_CYCLES"/>
- <value value="41" name="A7XX_PERF_UFC_EVICTION_STALLED_CYCLES"/>
- <value value="42" name="A7XX_PERF_UFC_LOCK_STALLED_CYCLES"/>
- <value value="43" name="A7XX_PERF_UFC_MISS_LATENCY_CYCLES"/>
- <value value="44" name="A7XX_PERF_UFC_MISS_LATENCY_SAMPLES"/>
- <value value="45" name="A7XX_PERF_UFC_UBWC_REQ_STALLED_CYCLES"/>
- <value value="46" name="A7XX_PERF_UFC_TP_HINT_TAG_MISS"/>
- <value value="47" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_RDY"/>
- <value value="48" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_NRDY"/>
- <value value="49" name="A7XX_PERF_UFC_TP_HINT_IS_FCLEAR"/>
- <value value="50" name="A7XX_PERF_UFC_TP_HINT_IS_ALPHA0"/>
- <value value="51" name="A7XX_PERF_UFC_SP_L1_FILTER_HIT"/>
- <value value="52" name="A7XX_PERF_UFC_SP_L1_FILTER_MISS"/>
- <value value="53" name="A7XX_PERF_UFC_SP_L1_FILTER_REQUESTS"/>
- <value value="54" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_RDY"/>
- <value value="55" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_NRDY"/>
- <value value="56" name="A7XX_PERF_UFC_TP_L1_TAG_MISS"/>
- <value value="57" name="A7XX_PERF_UFC_TP_L1_FILTER_REQUESTS"/>
-</enum>
-
<domain name="A6XX" width="32" prefix="variant" varset="chip">
<bitset name="A6XX_RBBM_INT_0_MASK" inline="no" varset="chip">
<bitfield name="RBBM_GPU_IDLE" pos="0" type="boolean"/>
@@ -2371,7 +177,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x08ab" name="CP_CONTEXT_SWITCH_LEVEL_STATUS" variants="A7XX-"/>
<array offset="0x08D0" name="CP_PERFCTR_CP_SEL" stride="1" length="14"/>
<array offset="0x08e0" name="CP_BV_PERFCTR_CP_SEL" stride="1" length="7" variants="A7XX-"/>
- <reg64 offset="0x0900" name="CP_CRASH_SCRIPT_BASE"/>
+ <reg64 offset="0x0900" name="CP_CRASH_DUMP_SCRIPT_BASE"/>
<reg32 offset="0x0902" name="CP_CRASH_DUMP_CNTL"/>
<reg32 offset="0x0903" name="CP_CRASH_DUMP_STATUS"/>
<reg32 offset="0x0908" name="CP_SQE_STAT_ADDR"/>
@@ -2400,22 +206,22 @@ to upconvert to 32b float internally?
-->
<reg64 offset="0x0934" name="CP_VSD_BASE"/>
- <bitset name="a6xx_roq_stat" inline="yes">
+ <bitset name="a6xx_roq_status" inline="yes">
<bitfield name="RPTR" low="0" high="9"/>
<bitfield name="WPTR" low="16" high="25"/>
</bitset>
- <reg32 offset="0x0939" name="CP_ROQ_RB_STAT" type="a6xx_roq_stat"/>
- <reg32 offset="0x093a" name="CP_ROQ_IB1_STAT" type="a6xx_roq_stat"/>
- <reg32 offset="0x093b" name="CP_ROQ_IB2_STAT" type="a6xx_roq_stat"/>
- <reg32 offset="0x093c" name="CP_ROQ_SDS_STAT" type="a6xx_roq_stat"/>
- <reg32 offset="0x093d" name="CP_ROQ_MRB_STAT" type="a6xx_roq_stat"/>
- <reg32 offset="0x093e" name="CP_ROQ_VSD_STAT" type="a6xx_roq_stat"/>
-
- <reg32 offset="0x0943" name="CP_IB1_DWORDS"/>
- <reg32 offset="0x0944" name="CP_IB2_DWORDS"/>
- <reg32 offset="0x0945" name="CP_SDS_DWORDS"/>
- <reg32 offset="0x0946" name="CP_MRB_DWORDS"/>
- <reg32 offset="0x0947" name="CP_VSD_DWORDS"/>
+ <reg32 offset="0x0939" name="CP_ROQ_RB_STATUS" type="a6xx_roq_status"/>
+ <reg32 offset="0x093a" name="CP_ROQ_IB1_STATUS" type="a6xx_roq_status"/>
+ <reg32 offset="0x093b" name="CP_ROQ_IB2_STATUS" type="a6xx_roq_status"/>
+ <reg32 offset="0x093c" name="CP_ROQ_SDS_STATUS" type="a6xx_roq_status"/>
+ <reg32 offset="0x093d" name="CP_ROQ_MRB_STATUS" type="a6xx_roq_status"/>
+ <reg32 offset="0x093e" name="CP_ROQ_VSD_STATUS" type="a6xx_roq_status"/>
+
+ <reg32 offset="0x0943" name="CP_IB1_INIT_SIZE"/>
+ <reg32 offset="0x0944" name="CP_IB2_INIT_SIZE"/>
+ <reg32 offset="0x0945" name="CP_SDS_INIT_SIZE"/>
+ <reg32 offset="0x0946" name="CP_MRB_INIT_SIZE"/>
+ <reg32 offset="0x0947" name="CP_VSD_INIT_SIZE"/>
<reg32 offset="0x0948" name="CP_ROQ_AVAIL_RB">
<doc>number of remaining dwords incl current dword being consumed?</doc>
@@ -2451,6 +257,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x098D" name="CP_AHB_CNTL"/>
<reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" variants="A6XX"/>
<reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" type="a7xx_aperture_cntl" variants="A7XX-"/>
+ <reg32 offset="0x0A01" name="CP_APERTURE_CNTL_SQE" variants="A6XX"/>
<reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" variants="A6XX"/>
<reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" type="a7xx_aperture_cntl" variants="A7XX-"/>
@@ -2468,8 +275,8 @@ to upconvert to 32b float internally?
<reg32 offset="0x0a97" name="CP_BV_MEM_POOL_DBG_DATA" variants="A7XX-"/>
<reg64 offset="0x0a98" name="CP_BV_RB_RPTR_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0a9a" name="CP_RESOURCE_TBL_DBG_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0a9b" name="CP_RESOURCE_TBL_DBG_DATA" variants="A7XX-"/>
+ <reg32 offset="0x0a9a" name="CP_RESOURCE_TABLE_DBG_ADDR" variants="A7XX-"/>
+ <reg32 offset="0x0a9b" name="CP_RESOURCE_TABLE_DBG_DATA" variants="A7XX-"/>
<reg32 offset="0x0ad0" name="CP_BV_APRIV_CNTL" variants="A7XX-"/>
<reg32 offset="0x0ada" name="CP_BV_CHICKEN_DBG" variants="A7XX-"/>
@@ -2619,28 +426,17 @@ to upconvert to 32b float internally?
vertices in, number of primnitives assembled etc.
-->
- <reg32 offset="0x0540" name="RBBM_PRIMCTR_0_LO"/> <!-- vs vertices in -->
- <reg32 offset="0x0541" name="RBBM_PRIMCTR_0_HI"/>
- <reg32 offset="0x0542" name="RBBM_PRIMCTR_1_LO"/> <!-- vs primitives out -->
- <reg32 offset="0x0543" name="RBBM_PRIMCTR_1_HI"/>
- <reg32 offset="0x0544" name="RBBM_PRIMCTR_2_LO"/> <!-- hs vertices in -->
- <reg32 offset="0x0545" name="RBBM_PRIMCTR_2_HI"/>
- <reg32 offset="0x0546" name="RBBM_PRIMCTR_3_LO"/> <!-- hs patches out -->
- <reg32 offset="0x0547" name="RBBM_PRIMCTR_3_HI"/>
- <reg32 offset="0x0548" name="RBBM_PRIMCTR_4_LO"/> <!-- dss vertices in -->
- <reg32 offset="0x0549" name="RBBM_PRIMCTR_4_HI"/>
- <reg32 offset="0x054a" name="RBBM_PRIMCTR_5_LO"/> <!-- ds primitives out -->
- <reg32 offset="0x054b" name="RBBM_PRIMCTR_5_HI"/>
- <reg32 offset="0x054c" name="RBBM_PRIMCTR_6_LO"/> <!-- gs primitives in -->
- <reg32 offset="0x054d" name="RBBM_PRIMCTR_6_HI"/>
- <reg32 offset="0x054e" name="RBBM_PRIMCTR_7_LO"/> <!-- gs primitives out -->
- <reg32 offset="0x054f" name="RBBM_PRIMCTR_7_HI"/>
- <reg32 offset="0x0550" name="RBBM_PRIMCTR_8_LO"/> <!-- gs primitives out -->
- <reg32 offset="0x0551" name="RBBM_PRIMCTR_8_HI"/>
- <reg32 offset="0x0552" name="RBBM_PRIMCTR_9_LO"/> <!-- raster primitives in -->
- <reg32 offset="0x0553" name="RBBM_PRIMCTR_9_HI"/>
- <reg32 offset="0x0554" name="RBBM_PRIMCTR_10_LO"/>
- <reg32 offset="0x0555" name="RBBM_PRIMCTR_10_HI"/>
+ <reg64 offset="0x0540" name="RBBM_PIPESTAT_IAVERTICES"/>
+ <reg64 offset="0x0542" name="RBBM_PIPESTAT_IAPRIMITIVES"/>
+ <reg64 offset="0x0544" name="RBBM_PIPESTAT_VSINVOCATIONS"/>
+ <reg64 offset="0x0546" name="RBBM_PIPESTAT_HSINVOCATIONS"/>
+ <reg64 offset="0x0548" name="RBBM_PIPESTAT_DSINVOCATIONS"/>
+ <reg64 offset="0x054a" name="RBBM_PIPESTAT_GSINVOCATIONS"/>
+ <reg64 offset="0x054c" name="RBBM_PIPESTAT_GSPRIMITIVES"/>
+ <reg64 offset="0x054e" name="RBBM_PIPESTAT_CINVOCATIONS"/>
+ <reg64 offset="0x0550" name="RBBM_PIPESTAT_CPRIMITIVES"/>
+ <reg64 offset="0x0552" name="RBBM_PIPESTAT_PSINVOCATIONS"/>
+ <reg64 offset="0x0554" name="RBBM_PIPESTAT_CSINVOCATIONS"/>
<reg32 offset="0xF400" name="RBBM_SECVID_TRUST_CNTL"/>
<reg64 offset="0xF800" name="RBBM_SECVID_TSB_TRUSTED_BASE"/>
@@ -2779,7 +575,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x0011f" name="RBBM_CGC_P2S_TRIG_CMD" variants="A7XX-"/>
<reg32 offset="0x00120" name="RBBM_CLOCK_CNTL_TEX_FCHE"/>
<reg32 offset="0x00121" name="RBBM_CLOCK_DELAY_TEX_FCHE"/>
- <reg32 offset="0x00122" name="RBBM_CLOCK_HYST_TEX_FCHE"/>
+ <reg32 offset="0x00122" name="RBBM_CLOCK_HYST_TEX_FCHE" variants="A6XX"/>
<reg32 offset="0x00122" name="RBBM_CGC_P2S_STATUS" variants="A7XX-">
<bitfield name="TXDONE" pos="0" type="boolean"/>
</reg32>
@@ -2840,7 +636,7 @@ to upconvert to 32b float internally?
</reg32>
<reg32 offset="0x062f" name="DBGC_CFG_DBGBUS_TRACE_BUF1"/>
<reg32 offset="0x0630" name="DBGC_CFG_DBGBUS_TRACE_BUF2"/>
- <array offset="0x0CD8" name="VSC_PERFCTR_VSC_SEL" stride="1" length="2"/>
+ <array offset="0x0CD8" name="VSC_PERFCTR_VSC_SEL" stride="1" length="2" variants="A6XX"/>
<reg32 offset="0x0CD8" name="VSC_UNKNOWN_0CD8" variants="A7XX">
<doc>
Set to true when binning, isn't changed afterwards
@@ -2936,8 +732,8 @@ to upconvert to 32b float internally?
<bitfield name="WIDTH" low="0" high="7" shr="5" type="uint"/>
<bitfield name="HEIGHT" low="8" high="16" shr="4" type="uint"/>
</reg32>
- <reg64 offset="0x0c03" name="VSC_DRAW_STRM_SIZE_ADDRESS" type="waddress" usage="cmd"/>
- <reg32 offset="0x0c06" name="VSC_BIN_COUNT" usage="rp_blit">
+ <reg64 offset="0x0c03" name="VSC_SIZE_BASE" type="waddress" usage="cmd"/>
+ <reg32 offset="0x0c06" name="VSC_EXPANDED_BIN_CNTL" usage="rp_blit">
<bitfield name="NX" low="1" high="10" type="uint"/>
<bitfield name="NY" low="11" high="20" type="uint"/>
</reg32>
@@ -2967,14 +763,14 @@ to upconvert to 32b float internally?
LIMIT is set to PITCH - 64, to make room for a bit of overflow
-->
- <reg64 offset="0x0c30" name="VSC_PRIM_STRM_ADDRESS" type="waddress" usage="cmd"/>
- <reg32 offset="0x0c32" name="VSC_PRIM_STRM_PITCH" usage="cmd"/>
- <reg32 offset="0x0c33" name="VSC_PRIM_STRM_LIMIT" usage="cmd"/>
- <reg64 offset="0x0c34" name="VSC_DRAW_STRM_ADDRESS" type="waddress" usage="cmd"/>
- <reg32 offset="0x0c36" name="VSC_DRAW_STRM_PITCH" usage="cmd"/>
- <reg32 offset="0x0c37" name="VSC_DRAW_STRM_LIMIT" usage="cmd"/>
-
- <array offset="0x0c38" name="VSC_STATE" stride="1" length="32" usage="rp_blit">
+ <reg64 offset="0x0c30" name="VSC_PIPE_DATA_PRIM_BASE" type="waddress" usage="cmd"/>
+ <reg32 offset="0x0c32" name="VSC_PIPE_DATA_PRIM_STRIDE" usage="cmd"/>
+ <reg32 offset="0x0c33" name="VSC_PIPE_DATA_PRIM_LENGTH" usage="cmd"/>
+ <reg64 offset="0x0c34" name="VSC_PIPE_DATA_DRAW_BASE" type="waddress" usage="cmd"/>
+ <reg32 offset="0x0c36" name="VSC_PIPE_DATA_DRAW_STRIDE" usage="cmd"/>
+ <reg32 offset="0x0c37" name="VSC_PIPE_DATA_DRAW_LENGTH" usage="cmd"/>
+
+ <array offset="0x0c38" name="VSC_CHANNEL_VISIBILITY" stride="1" length="32" usage="rp_blit">
<doc>
Seems to be a bitmap of which tiles mapped to the VSC
pipe contain geometry.
@@ -2985,7 +781,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x0" name="REG"/>
</array>
- <array offset="0x0c58" name="VSC_PRIM_STRM_SIZE" stride="1" length="32" variants="A6XX" usage="rp_blit">
+ <array offset="0x0c58" name="VSC_PIPE_DATA_PRIM_SIZE" stride="1" length="32" variants="A6XX" usage="rp_blit">
<doc>
Has the size of data written to corresponding VSC_PRIM_STRM
buffer.
@@ -2993,10 +789,10 @@ to upconvert to 32b float internally?
<reg32 offset="0x0" name="REG"/>
</array>
- <array offset="0x0c78" name="VSC_DRAW_STRM_SIZE" stride="1" length="32" variants="A6XX" usage="rp_blit">
+ <array offset="0x0c78" name="VSC_PIPE_DATA_DRAW_SIZE" stride="1" length="32" variants="A6XX" usage="rp_blit">
<doc>
Has the size of data written to corresponding VSC pipe, ie.
- same thing that is written out to VSC_DRAW_STRM_SIZE_ADDRESS_LO/HI
+ same thing that is written out to VSC_SIZE_BASE
</doc>
<reg32 offset="0x0" name="REG"/>
</array>
@@ -3028,17 +824,17 @@ to upconvert to 32b float internally?
<bitfield name="PERSP_DIVISION_DISABLE" pos="9" type="boolean"/>
</reg32>
- <bitset name="a6xx_gras_xs_cl_cntl" inline="yes">
+ <bitset name="a6xx_gras_xs_clip_cull_distance" inline="yes">
<bitfield name="CLIP_MASK" low="0" high="7"/>
<bitfield name="CULL_MASK" low="8" high="15"/>
</bitset>
- <reg32 offset="0x8001" name="GRAS_VS_CL_CNTL" type="a6xx_gras_xs_cl_cntl" usage="rp_blit"/>
- <reg32 offset="0x8002" name="GRAS_DS_CL_CNTL" type="a6xx_gras_xs_cl_cntl" usage="rp_blit"/>
- <reg32 offset="0x8003" name="GRAS_GS_CL_CNTL" type="a6xx_gras_xs_cl_cntl" usage="rp_blit"/>
- <reg32 offset="0x8004" name="GRAS_MAX_LAYER_INDEX" low="0" high="10" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x8001" name="GRAS_CL_VS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
+ <reg32 offset="0x8002" name="GRAS_CL_DS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
+ <reg32 offset="0x8003" name="GRAS_CL_GS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
+ <reg32 offset="0x8004" name="GRAS_CL_ARRAY_SIZE" low="0" high="10" type="uint" usage="rp_blit"/>
- <reg32 offset="0x8005" name="GRAS_CNTL" usage="rp_blit">
- <!-- see also RB_RENDER_CONTROL0 -->
+ <reg32 offset="0x8005" name="GRAS_CL_INTERP_CNTL" usage="rp_blit">
+ <!-- see also RB_INTERP_CNTL -->
<bitfield name="IJ_PERSP_PIXEL" pos="0" type="boolean"/>
<bitfield name="IJ_PERSP_CENTROID" pos="1" type="boolean"/>
<bitfield name="IJ_PERSP_SAMPLE" pos="2" type="boolean"/>
@@ -3067,7 +863,7 @@ to upconvert to 32b float internally?
<!-- <reg32 offset="0x80f0" name="GRAS_UNKNOWN_80F0" type="a6xx_reg_xy"/> -->
<!-- 0x8006-0x800f invalid -->
- <array offset="0x8010" name="GRAS_CL_VPORT" stride="6" length="16" usage="rp_blit">
+ <array offset="0x8010" name="GRAS_CL_VIEWPORT" stride="6" length="16" usage="rp_blit">
<reg32 offset="0" name="XOFFSET" type="float"/>
<reg32 offset="1" name="XSCALE" type="float"/>
<reg32 offset="2" name="YOFFSET" type="float"/>
@@ -3075,7 +871,7 @@ to upconvert to 32b float internally?
<reg32 offset="4" name="ZOFFSET" type="float"/>
<reg32 offset="5" name="ZSCALE" type="float"/>
</array>
- <array offset="0x8070" name="GRAS_CL_Z_CLAMP" stride="2" length="16" usage="rp_blit">
+ <array offset="0x8070" name="GRAS_CL_VIEWPORT_ZCLAMP" stride="2" length="16" usage="rp_blit">
<reg32 offset="0" name="MIN" type="float"/>
<reg32 offset="1" name="MAX" type="float"/>
</array>
@@ -3124,7 +920,12 @@ to upconvert to 32b float internally?
<reg32 offset="0x8099" name="GRAS_SU_CONSERVATIVE_RAS_CNTL" usage="cmd">
<bitfield name="CONSERVATIVERASEN" pos="0" type="boolean"/>
- <bitfield name="SHIFTAMOUNT" low="1" high="2"/>
+ <enum name="a6xx_shift_amount">
+ <value value="0" name="NO_SHIFT"/>
+ <value value="1" name="HALF_PIXEL_SHIFT"/>
+ <value value="2" name="FULL_PIXEL_SHIFT"/>
+ </enum>
+ <bitfield name="SHIFTAMOUNT" low="1" high="2" type="a6xx_shift_amount"/>
<bitfield name="INNERCONSERVATIVERASEN" pos="3" type="boolean"/>
<bitfield name="UNK4" low="4" high="5"/>
</reg32>
@@ -3133,13 +934,13 @@ to upconvert to 32b float internally?
<bitfield name="LINELENGTHEN" pos="1" type="boolean"/>
</reg32>
- <bitset name="a6xx_gras_layer_cntl" inline="yes">
+ <bitset name="a6xx_gras_us_xs_siv_cntl" inline="yes">
<bitfield name="WRITES_LAYER" pos="0" type="boolean"/>
<bitfield name="WRITES_VIEW" pos="1" type="boolean"/>
</bitset>
- <reg32 offset="0x809b" name="GRAS_VS_LAYER_CNTL" type="a6xx_gras_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x809c" name="GRAS_GS_LAYER_CNTL" type="a6xx_gras_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x809d" name="GRAS_DS_LAYER_CNTL" type="a6xx_gras_layer_cntl" usage="rp_blit"/>
+ <reg32 offset="0x809b" name="GRAS_SU_VS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x809c" name="GRAS_SU_GS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x809d" name="GRAS_SU_DS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
<!-- 0x809e/0x809f invalid -->
<enum name="a6xx_sequenced_thread_dist">
@@ -3213,13 +1014,13 @@ to upconvert to 32b float internally?
<enum name="a6xx_lrz_feedback_mask">
<value value="0x0" name="LRZ_FEEDBACK_NONE"/>
<value value="0x1" name="LRZ_FEEDBACK_EARLY_Z"/>
- <value value="0x2" name="LRZ_FEEDBACK_EARLY_LRZ_LATE_Z"/>
+ <value value="0x2" name="LRZ_FEEDBACK_EARLY_Z_LATE_Z"/>
<!-- We don't have a flag type and this flags combination is often used -->
- <value value="0x3" name="LRZ_FEEDBACK_EARLY_Z_OR_EARLY_LRZ_LATE_Z"/>
+ <value value="0x3" name="LRZ_FEEDBACK_EARLY_Z_OR_EARLY_Z_LATE_Z"/>
<value value="0x4" name="LRZ_FEEDBACK_LATE_Z"/>
</enum>
- <reg32 offset="0x80a1" name="GRAS_BIN_CONTROL" usage="rp_blit">
+ <reg32 offset="0x80a1" name="GRAS_SC_BIN_CNTL" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
@@ -3235,22 +1036,22 @@ to upconvert to 32b float internally?
<bitfield name="UNK27" pos="27"/>
</reg32>
- <reg32 offset="0x80a2" name="GRAS_RAS_MSAA_CNTL" usage="rp_blit">
+ <reg32 offset="0x80a2" name="GRAS_SC_RAS_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="UNK2" pos="2"/>
<bitfield name="UNK3" pos="3"/>
</reg32>
- <reg32 offset="0x80a3" name="GRAS_DEST_MSAA_CNTL" usage="rp_blit">
+ <reg32 offset="0x80a3" name="GRAS_SC_DEST_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
</reg32>
- <bitset name="a6xx_sample_config" inline="yes">
+ <bitset name="a6xx_msaa_sample_pos_cntl" inline="yes">
<bitfield name="UNK0" pos="0"/>
<bitfield name="LOCATION_ENABLE" pos="1" type="boolean"/>
</bitset>
- <bitset name="a6xx_sample_locations" inline="yes">
+ <bitset name="a6xx_programmable_msaa_pos" inline="yes">
<bitfield name="SAMPLE_0_X" low="0" high="3" radix="4" type="fixed"/>
<bitfield name="SAMPLE_0_Y" low="4" high="7" radix="4" type="fixed"/>
<bitfield name="SAMPLE_1_X" low="8" high="11" radix="4" type="fixed"/>
@@ -3261,9 +1062,9 @@ to upconvert to 32b float internally?
<bitfield name="SAMPLE_3_Y" low="28" high="31" radix="4" type="fixed"/>
</bitset>
- <reg32 offset="0x80a4" name="GRAS_SAMPLE_CONFIG" type="a6xx_sample_config" usage="rp_blit"/>
- <reg32 offset="0x80a5" name="GRAS_SAMPLE_LOCATION_0" type="a6xx_sample_locations" usage="rp_blit"/>
- <reg32 offset="0x80a6" name="GRAS_SAMPLE_LOCATION_1" type="a6xx_sample_locations" usage="rp_blit"/>
+ <reg32 offset="0x80a4" name="GRAS_SC_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
+ <reg32 offset="0x80a5" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0x80a6" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
<reg32 offset="0x80a7" name="GRAS_UNKNOWN_80A7" variants="A7XX-" usage="cmd"/>
@@ -3286,13 +1087,36 @@ to upconvert to 32b float internally?
<reg32 offset="0x80f0" name="GRAS_SC_WINDOW_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
<reg32 offset="0x80f1" name="GRAS_SC_WINDOW_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
- <!-- 0x80f4 - 0x80fa are used for VK_KHR_fragment_shading_rate -->
- <reg64 offset="0x80f4" name="GRAS_UNKNOWN_80F4" variants="A7XX-" usage="cmd"/>
- <reg64 offset="0x80f5" name="GRAS_UNKNOWN_80F5" variants="A7XX-" usage="cmd"/>
- <reg64 offset="0x80f6" name="GRAS_UNKNOWN_80F6" variants="A7XX-" usage="cmd"/>
- <reg64 offset="0x80f8" name="GRAS_UNKNOWN_80F8" variants="A7XX-" usage="cmd"/>
- <reg64 offset="0x80f9" name="GRAS_UNKNOWN_80F9" variants="A7XX-" usage="cmd"/>
- <reg64 offset="0x80fa" name="GRAS_UNKNOWN_80FA" variants="A7XX-" usage="cmd"/>
+ <enum name="a6xx_fsr_combiner">
+ <value value="0" name="FSR_COMBINER_OP_KEEP"/>
+ <value value="1" name="FSR_COMBINER_OP_REPLACE"/>
+ <value value="2" name="FSR_COMBINER_OP_MIN"/>
+ <value value="3" name="FSR_COMBINER_OP_MAX"/>
+ <value value="4" name="FSR_COMBINER_OP_MUL"/>
+ </enum>
+
+ <reg32 offset="0x80f4" name="GRAS_VRS_CONFIG" variants="A7XX-" usage="rp_blit">
+ <bitfield name="PIPELINE_FSR_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="FRAG_SIZE_X" low="1" high="2" type="uint"/>
+ <bitfield name="FRAG_SIZE_Y" low="3" high="4" type="uint"/>
+ <bitfield name="COMBINER_OP_1" low="5" high="7" type="a6xx_fsr_combiner"/>
+ <bitfield name="COMBINER_OP_2" low="8" high="10" type="a6xx_fsr_combiner"/>
+ <bitfield name="ATTACHMENT_FSR_ENABLE" pos="13" type="boolean"/>
+ <bitfield name="PRIMITIVE_FSR_ENABLE" pos="20" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x80f5" name="GRAS_QUALITY_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
+ <bitfield name="LAYERED" pos="0" type="boolean"/>
+ <bitfield name="TILE_MODE" low="1" high="2" type="a6xx_tile_mode"/>
+ </reg32>
+ <reg32 offset="0x80f6" name="GRAS_QUALITY_BUFFER_DIMENSION" variants="A7XX-" usage="rp_blit">
+ <bitfield name="WIDTH" low="0" high="15" type="uint"/>
+ <bitfield name="HEIGHT" low="16" high="31" type="uint"/>
+ </reg32>
+ <reg64 offset="0x80f8" name="GRAS_QUALITY_BUFFER_BASE" variants="A7XX-" type="waddress" usage="rp_blit"/>
+ <reg32 offset="0x80fa" name="GRAS_QUALITY_BUFFER_PITCH" variants="A7XX-" usage="rp_blit">
+ <bitfield name="PITCH" shr="6" low="0" high="7" type="uint"/>
+ <bitfield name="ARRAY_PITCH" shr="6" low="10" high="28" type="uint"/>
+ </reg32>
<enum name="a6xx_lrz_dir_status">
<value value="0x1" name="LRZ_DIR_LE"/>
@@ -3313,7 +1137,7 @@ to upconvert to 32b float internally?
</doc>
<bitfield name="FC_ENABLE" pos="3" type="boolean" variants="A6XX"/>
<!-- set when depth-test + depth-write enabled -->
- <bitfield name="Z_TEST_ENABLE" pos="4" type="boolean"/>
+ <bitfield name="Z_WRITE_ENABLE" pos="4" type="boolean"/>
<bitfield name="Z_BOUNDS_ENABLE" pos="5" type="boolean"/>
<bitfield name="DIR" low="6" high="7" type="a6xx_lrz_dir_status"/>
<doc>
@@ -3339,14 +1163,13 @@ to upconvert to 32b float internally?
<bitfield name="FRAGCOORDSAMPLEMODE" low="1" high="2" type="a6xx_fragcoord_sample_mode"/>
</reg32>
- <reg32 offset="0x8102" name="GRAS_LRZ_MRT_BUF_INFO_0" usage="rp_blit">
+ <reg32 offset="0x8102" name="GRAS_LRZ_MRT_BUFFER_INFO_0" usage="rp_blit">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
</reg32>
<reg64 offset="0x8103" name="GRAS_LRZ_BUFFER_BASE" align="256" type="waddress" usage="rp_blit"/>
<reg32 offset="0x8105" name="GRAS_LRZ_BUFFER_PITCH" usage="rp_blit">
- <!-- TODO: fix the shr fields -->
<bitfield name="PITCH" low="0" high="7" shr="5" type="uint"/>
- <bitfield name="ARRAY_PITCH" low="10" high="28" shr="4" type="uint"/>
+ <bitfield name="ARRAY_PITCH" low="10" high="28" shr="8" type="uint"/>
</reg32>
<!--
@@ -3381,18 +1204,18 @@ to upconvert to 32b float internally?
-->
<reg64 offset="0x8106" name="GRAS_LRZ_FAST_CLEAR_BUFFER_BASE" align="64" type="waddress" usage="rp_blit"/>
<!-- 0x8108 invalid -->
- <reg32 offset="0x8109" name="GRAS_SAMPLE_CNTL" usage="rp_blit">
+ <reg32 offset="0x8109" name="GRAS_LRZ_PS_SAMPLEFREQ_CNTL" usage="rp_blit">
<bitfield name="PER_SAMP_MODE" pos="0" type="boolean"/>
</reg32>
<!--
LRZ buffer represents a single array layer + mip level, and there is
a single buffer per depth image. Thus to reuse LRZ between renderpasses
it is necessary to track the depth view used in the past renderpass, which
- GRAS_LRZ_DEPTH_VIEW is for.
- GRAS_LRZ_CNTL checks if current value of GRAS_LRZ_DEPTH_VIEW is equal to
+ GRAS_LRZ_VIEW_INFO is for.
+ GRAS_LRZ_CNTL checks if current value of GRAS_LRZ_VIEW_INFO is equal to
the value stored in the LRZ buffer, if not - LRZ is disabled.
-->
- <reg32 offset="0x810a" name="GRAS_LRZ_DEPTH_VIEW" usage="cmd">
+ <reg32 offset="0x810a" name="GRAS_LRZ_VIEW_INFO" usage="cmd">
<bitfield name="BASE_LAYER" low="0" high="10" type="uint"/>
<bitfield name="LAYER_COUNT" low="16" high="26" type="uint"/>
<bitfield name="BASE_MIP_LEVEL" low="28" high="31" type="uint"/>
@@ -3408,7 +1231,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x8110" name="GRAS_UNKNOWN_8110" low="0" high="1" usage="cmd"/>
<!-- A bit tentative but it's a color and it is followed by LRZ_CLEAR -->
- <reg32 offset="0x8111" name="GRAS_LRZ_CLEAR_DEPTH_F32" type="float" variants="A7XX-"/>
+ <reg32 offset="0x8111" name="GRAS_LRZ_DEPTH_CLEAR" type="float" variants="A7XX-"/>
<reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
<bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
@@ -3430,7 +1253,7 @@ to upconvert to 32b float internally?
<value value="0x5" name="ROTATE_VFLIP"/>
</enum>
- <bitset name="a6xx_2d_blit_cntl" inline="yes">
+ <bitset name="a6xx_a2d_bit_cntl" inline="yes">
<bitfield name="ROTATE" low="0" high="2" type="a6xx_rotation"/>
<bitfield name="OVERWRITEEN" pos="3" type="boolean"/>
<bitfield name="UNK4" low="4" high="6"/>
@@ -3447,22 +1270,22 @@ to upconvert to 32b float internally?
<bitfield name="UNK30" pos="30" type="boolean" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x8400" name="GRAS_2D_BLIT_CNTL" type="a6xx_2d_blit_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8400" name="GRAS_A2D_BLT_CNTL" type="a6xx_a2d_bit_cntl" usage="rp_blit"/>
<!-- note: the low 8 bits for src coords are valid, probably fixed point
it would be a bit weird though, since we subtract 1 from BR coords
apparently signed, gallium driver uses negative coords and it works?
-->
- <reg32 offset="0x8401" name="GRAS_2D_SRC_TL_X" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8402" name="GRAS_2D_SRC_BR_X" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8403" name="GRAS_2D_SRC_TL_Y" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8404" name="GRAS_2D_SRC_BR_Y" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8405" name="GRAS_2D_DST_TL" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x8406" name="GRAS_2D_DST_BR" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x8401" name="GRAS_A2D_SRC_XMIN" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8402" name="GRAS_A2D_SRC_XMAX" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8403" name="GRAS_A2D_SRC_YMIN" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8404" name="GRAS_A2D_SRC_YMAX" low="8" high="24" type="int" usage="rp_blit"/>
+ <reg32 offset="0x8405" name="GRAS_A2D_DEST_TL" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x8406" name="GRAS_A2D_DEST_BR" type="a6xx_reg_xy" usage="rp_blit"/>
<reg32 offset="0x8407" name="GRAS_2D_UNKNOWN_8407" low="0" high="31"/>
<reg32 offset="0x8408" name="GRAS_2D_UNKNOWN_8408" low="0" high="31"/>
<reg32 offset="0x8409" name="GRAS_2D_UNKNOWN_8409" low="0" high="31"/>
- <reg32 offset="0x840a" name="GRAS_2D_RESOLVE_CNTL_1" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x840b" name="GRAS_2D_RESOLVE_CNTL_2" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x840a" name="GRAS_A2D_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x840b" name="GRAS_A2D_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
<!-- 0x840c-0x85ff invalid -->
<!-- always 0x880 ? (and 0 in a640/a650 traces?) -->
@@ -3481,7 +1304,7 @@ to upconvert to 32b float internally?
-->
<!-- same as GRAS_BIN_CONTROL, but without bit 27: -->
- <reg32 offset="0x8800" name="RB_BIN_CONTROL" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0x8800" name="RB_CNTL" variants="A6XX" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
@@ -3490,7 +1313,7 @@ to upconvert to 32b float internally?
<bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
</reg32>
- <reg32 offset="0x8800" name="RB_BIN_CONTROL" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x8800" name="RB_CNTL" variants="A7XX-" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
@@ -3501,8 +1324,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x8801" name="RB_RENDER_CNTL" variants="A6XX" usage="rp_blit">
<bitfield name="CCUSINGLECACHELINESIZE" low="3" high="5"/>
<bitfield name="EARLYVIZOUTEN" pos="6" type="boolean"/>
- <!-- set during binning pass: -->
- <bitfield name="BINNING" pos="7" type="boolean"/>
+ <bitfield name="FS_DISABLE" pos="7" type="boolean"/>
<bitfield name="UNK8" low="8" high="10"/>
<bitfield name="RASTER_MODE" pos="8" type="a6xx_raster_mode"/>
<bitfield name="RASTER_DIRECTION" low="9" high="10" type="a6xx_raster_direction"/>
@@ -3515,15 +1337,14 @@ to upconvert to 32b float internally?
</reg32>
<reg32 offset="0x8801" name="RB_RENDER_CNTL" variants="A7XX-" usage="rp_blit">
<bitfield name="EARLYVIZOUTEN" pos="6" type="boolean"/>
- <!-- set during binning pass: -->
- <bitfield name="BINNING" pos="7" type="boolean"/>
+ <bitfield name="FS_DISABLE" pos="7" type="boolean"/>
<bitfield name="RASTER_MODE" pos="8" type="a6xx_raster_mode"/>
<bitfield name="RASTER_DIRECTION" low="9" high="10" type="a6xx_raster_direction"/>
<bitfield name="CONSERVATIVERASEN" pos="11" type="boolean"/>
<bitfield name="INNERCONSERVATIVERASEN" pos="12" type="boolean"/>
</reg32>
<reg32 offset="0x8116" name="GRAS_SU_RENDER_CNTL" variants="A7XX-" usage="rp_blit">
- <bitfield name="BINNING" pos="7" type="boolean"/>
+ <bitfield name="FS_DISABLE" pos="7" type="boolean"/>
</reg32>
<reg32 offset="0x8802" name="RB_RAS_MSAA_CNTL" usage="rp_blit">
@@ -3536,16 +1357,16 @@ to upconvert to 32b float internally?
<bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
</reg32>
- <reg32 offset="0x8804" name="RB_SAMPLE_CONFIG" type="a6xx_sample_config" usage="rp_blit"/>
- <reg32 offset="0x8805" name="RB_SAMPLE_LOCATION_0" type="a6xx_sample_locations" usage="rp_blit"/>
- <reg32 offset="0x8806" name="RB_SAMPLE_LOCATION_1" type="a6xx_sample_locations" usage="rp_blit"/>
+ <reg32 offset="0x8804" name="RB_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8805" name="RB_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0x8806" name="RB_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
<!-- 0x8807-0x8808 invalid -->
<!--
note: maybe not actually called RB_RENDER_CONTROLn (since RB_RENDER_CNTL
name comes from kernel and is probably right)
-->
- <reg32 offset="0x8809" name="RB_RENDER_CONTROL0" usage="rp_blit">
- <!-- see also GRAS_CNTL -->
+ <reg32 offset="0x8809" name="RB_INTERP_CNTL" usage="rp_blit">
+ <!-- see also GRAS_CL_INTERP_CNTL -->
<bitfield name="IJ_PERSP_PIXEL" pos="0" type="boolean"/>
<bitfield name="IJ_PERSP_CENTROID" pos="1" type="boolean"/>
<bitfield name="IJ_PERSP_SAMPLE" pos="2" type="boolean"/>
@@ -3555,7 +1376,7 @@ to upconvert to 32b float internally?
<bitfield name="COORD_MASK" low="6" high="9" type="hex"/>
<bitfield name="UNK10" pos="10" type="boolean"/>
</reg32>
- <reg32 offset="0x880a" name="RB_RENDER_CONTROL1" usage="rp_blit">
+ <reg32 offset="0x880a" name="RB_PS_INPUT_CNTL" usage="rp_blit">
<!-- enable bits for various FS sysvalue regs: -->
<bitfield name="SAMPLEMASK" pos="0" type="boolean"/>
<bitfield name="POSTDEPTHCOVERAGE" pos="1" type="boolean"/>
@@ -3567,16 +1388,16 @@ to upconvert to 32b float internally?
<bitfield name="FOVEATION" pos="8" type="boolean"/>
</reg32>
- <reg32 offset="0x880b" name="RB_FS_OUTPUT_CNTL0" usage="rp_blit">
+ <reg32 offset="0x880b" name="RB_PS_OUTPUT_CNTL" usage="rp_blit">
<bitfield name="DUAL_COLOR_IN_ENABLE" pos="0" type="boolean"/>
<bitfield name="FRAG_WRITES_Z" pos="1" type="boolean"/>
<bitfield name="FRAG_WRITES_SAMPMASK" pos="2" type="boolean"/>
<bitfield name="FRAG_WRITES_STENCILREF" pos="3" type="boolean"/>
</reg32>
- <reg32 offset="0x880c" name="RB_FS_OUTPUT_CNTL1" usage="rp_blit">
+ <reg32 offset="0x880c" name="RB_PS_MRT_CNTL" usage="rp_blit">
<bitfield name="MRT" low="0" high="3" type="uint"/>
</reg32>
- <reg32 offset="0x880d" name="RB_RENDER_COMPONENTS" usage="rp_blit">
+ <reg32 offset="0x880d" name="RB_PS_OUTPUT_MASK" usage="rp_blit">
<bitfield name="RT0" low="0" high="3"/>
<bitfield name="RT1" low="4" high="7"/>
<bitfield name="RT2" low="8" high="11"/>
@@ -3608,7 +1429,7 @@ to upconvert to 32b float internally?
<bitfield name="SRGB_MRT7" pos="7" type="boolean"/>
</reg32>
- <reg32 offset="0x8810" name="RB_SAMPLE_CNTL" usage="rp_blit">
+ <reg32 offset="0x8810" name="RB_PS_SAMPLEFREQ_CNTL" usage="rp_blit">
<bitfield name="PER_SAMP_MODE" pos="0" type="boolean"/>
</reg32>
<reg32 offset="0x8811" name="RB_UNKNOWN_8811" low="4" high="6" usage="cmd"/>
@@ -3672,18 +1493,18 @@ to upconvert to 32b float internally?
<reg32 offset="0x7" name="BASE_GMEM" low="12" high="31" shr="12"/>
</array>
- <reg32 offset="0x8860" name="RB_BLEND_RED_F32" type="float" usage="rp_blit"/>
- <reg32 offset="0x8861" name="RB_BLEND_GREEN_F32" type="float" usage="rp_blit"/>
- <reg32 offset="0x8862" name="RB_BLEND_BLUE_F32" type="float" usage="rp_blit"/>
- <reg32 offset="0x8863" name="RB_BLEND_ALPHA_F32" type="float" usage="rp_blit"/>
- <reg32 offset="0x8864" name="RB_ALPHA_CONTROL" usage="cmd">
+ <reg32 offset="0x8860" name="RB_BLEND_CONSTANT_RED_FP32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8861" name="RB_BLEND_CONSTANT_GREEN_FP32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8862" name="RB_BLEND_CONSTANT_BLUE_FP32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8863" name="RB_BLEND_CONSTANT_ALPHA_FP32" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8864" name="RB_ALPHA_TEST_CNTL" usage="cmd">
<bitfield name="ALPHA_REF" low="0" high="7" type="hex"/>
<bitfield name="ALPHA_TEST" pos="8" type="boolean"/>
<bitfield name="ALPHA_TEST_FUNC" low="9" high="11" type="adreno_compare_func"/>
</reg32>
<reg32 offset="0x8865" name="RB_BLEND_CNTL" usage="rp_blit">
<!-- per-mrt enable bit -->
- <bitfield name="ENABLE_BLEND" low="0" high="7"/>
+ <bitfield name="BLEND_READS_DEST" low="0" high="7"/>
<bitfield name="INDEPENDENT_BLEND" pos="8" type="boolean"/>
<bitfield name="DUAL_COLOR_IN_ENABLE" pos="9" type="boolean"/>
<bitfield name="ALPHA_TO_COVERAGE" pos="10" type="boolean"/>
@@ -3726,12 +1547,12 @@ to upconvert to 32b float internally?
<reg32 offset="0x8873" name="RB_DEPTH_BUFFER_PITCH" low="0" high="13" shr="6" type="uint" usage="rp_blit"/>
<reg32 offset="0x8874" name="RB_DEPTH_BUFFER_ARRAY_PITCH" low="0" high="27" shr="6" type="uint" usage="rp_blit"/>
<reg64 offset="0x8875" name="RB_DEPTH_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8877" name="RB_DEPTH_BUFFER_BASE_GMEM" low="12" high="31" shr="12" usage="rp_blit"/>
+ <reg32 offset="0x8877" name="RB_DEPTH_GMEM_BASE" low="12" high="31" shr="12" usage="rp_blit"/>
- <reg32 offset="0x8878" name="RB_Z_BOUNDS_MIN" type="float" usage="rp_blit"/>
- <reg32 offset="0x8879" name="RB_Z_BOUNDS_MAX" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8878" name="RB_DEPTH_BOUND_MIN" type="float" usage="rp_blit"/>
+ <reg32 offset="0x8879" name="RB_DEPTH_BOUND_MAX" type="float" usage="rp_blit"/>
<!-- 0x887a-0x887f invalid -->
- <reg32 offset="0x8880" name="RB_STENCIL_CONTROL" usage="rp_blit">
+ <reg32 offset="0x8880" name="RB_STENCIL_CNTL" usage="rp_blit">
<bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
<bitfield name="STENCIL_ENABLE_BF" pos="1" type="boolean"/>
<!--
@@ -3753,11 +1574,11 @@ to upconvert to 32b float internally?
<reg32 offset="0x8115" name="GRAS_SU_STENCIL_CNTL" usage="rp_blit">
<bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0x8881" name="RB_STENCIL_INFO" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0x8881" name="RB_STENCIL_BUFFER_INFO" variants="A6XX" usage="rp_blit">
<bitfield name="SEPARATE_STENCIL" pos="0" type="boolean"/>
<bitfield name="UNK1" pos="1" type="boolean"/>
</reg32>
- <reg32 offset="0x8881" name="RB_STENCIL_INFO" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x8881" name="RB_STENCIL_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
<bitfield name="SEPARATE_STENCIL" pos="0" type="boolean"/>
<bitfield name="UNK1" pos="1" type="boolean"/>
<bitfield name="TILEMODE" low="2" high="3" type="a6xx_tile_mode"/>
@@ -3765,22 +1586,22 @@ to upconvert to 32b float internally?
<reg32 offset="0x8882" name="RB_STENCIL_BUFFER_PITCH" low="0" high="11" shr="6" type="uint" usage="rp_blit"/>
<reg32 offset="0x8883" name="RB_STENCIL_BUFFER_ARRAY_PITCH" low="0" high="23" shr="6" type="uint" usage="rp_blit"/>
<reg64 offset="0x8884" name="RB_STENCIL_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8886" name="RB_STENCIL_BUFFER_BASE_GMEM" low="12" high="31" shr="12" usage="rp_blit"/>
- <reg32 offset="0x8887" name="RB_STENCILREF" usage="rp_blit">
+ <reg32 offset="0x8886" name="RB_STENCIL_GMEM_BASE" low="12" high="31" shr="12" usage="rp_blit"/>
+ <reg32 offset="0x8887" name="RB_STENCIL_REF_CNTL" usage="rp_blit">
<bitfield name="REF" low="0" high="7"/>
<bitfield name="BFREF" low="8" high="15"/>
</reg32>
- <reg32 offset="0x8888" name="RB_STENCILMASK" usage="rp_blit">
+ <reg32 offset="0x8888" name="RB_STENCIL_MASK" usage="rp_blit">
<bitfield name="MASK" low="0" high="7"/>
<bitfield name="BFMASK" low="8" high="15"/>
</reg32>
- <reg32 offset="0x8889" name="RB_STENCILWRMASK" usage="rp_blit">
+ <reg32 offset="0x8889" name="RB_STENCIL_WRITE_MASK" usage="rp_blit">
<bitfield name="WRMASK" low="0" high="7"/>
<bitfield name="BFWRMASK" low="8" high="15"/>
</reg32>
<!-- 0x888a-0x888f invalid -->
<reg32 offset="0x8890" name="RB_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x8891" name="RB_SAMPLE_COUNT_CONTROL" usage="cmd">
+ <reg32 offset="0x8891" name="RB_SAMPLE_COUNTER_CNTL" usage="cmd">
<bitfield name="DISABLE" pos="0" type="boolean"/>
<bitfield name="COPY" pos="1" type="boolean"/>
</reg32>
@@ -3791,27 +1612,27 @@ to upconvert to 32b float internally?
<reg32 offset="0x8899" name="RB_UNKNOWN_8899" variants="A7XX-" usage="cmd"/>
<!-- 0x8899-0x88bf invalid -->
<!-- clamps depth value for depth test/write -->
- <reg32 offset="0x88c0" name="RB_Z_CLAMP_MIN" type="float" usage="rp_blit"/>
- <reg32 offset="0x88c1" name="RB_Z_CLAMP_MAX" type="float" usage="rp_blit"/>
+ <reg32 offset="0x88c0" name="RB_VIEWPORT_ZCLAMP_MIN" type="float" usage="rp_blit"/>
+ <reg32 offset="0x88c1" name="RB_VIEWPORT_ZCLAMP_MAX" type="float" usage="rp_blit"/>
<!-- 0x88c2-0x88cf invalid-->
- <reg32 offset="0x88d0" name="RB_UNKNOWN_88D0" usage="rp_blit">
+ <reg32 offset="0x88d0" name="RB_RESOLVE_CNTL_0" usage="rp_blit">
<bitfield name="UNK0" low="0" high="12"/>
<bitfield name="UNK16" low="16" high="26"/>
</reg32>
- <reg32 offset="0x88d1" name="RB_BLIT_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x88d2" name="RB_BLIT_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x88d1" name="RB_RESOLVE_CNTL_1" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x88d2" name="RB_RESOLVE_CNTL_2" type="a6xx_reg_xy" usage="rp_blit"/>
<!-- weird to duplicate other regs from same block?? -->
- <reg32 offset="0x88d3" name="RB_BIN_CONTROL2" usage="rp_blit">
+ <reg32 offset="0x88d3" name="RB_RESOLVE_CNTL_3" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
</reg32>
- <reg32 offset="0x88d4" name="RB_WINDOW_OFFSET2" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x88d5" name="RB_BLIT_GMEM_MSAA_CNTL" usage="rp_blit">
+ <reg32 offset="0x88d4" name="RB_RESOLVE_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x88d5" name="RB_RESOLVE_GMEM_BUFFER_INFO" usage="rp_blit">
<bitfield name="SAMPLES" low="3" high="4" type="a3xx_msaa_samples"/>
</reg32>
- <reg32 offset="0x88d6" name="RB_BLIT_BASE_GMEM" low="12" high="31" shr="12" usage="rp_blit"/>
+ <reg32 offset="0x88d6" name="RB_RESOLVE_GMEM_BUFFER_BASE" low="12" high="31" shr="12" usage="rp_blit"/>
<!-- s/DST_FORMAT/DST_INFO/ probably: -->
- <reg32 offset="0x88d7" name="RB_BLIT_DST_INFO" usage="rp_blit">
+ <reg32 offset="0x88d7" name="RB_RESOLVE_SYSTEM_BUFFER_INFO" usage="rp_blit">
<bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/>
<bitfield name="FLAGS" pos="2" type="boolean"/>
<bitfield name="SAMPLES" low="3" high="4" type="a3xx_msaa_samples"/>
@@ -3820,25 +1641,31 @@ to upconvert to 32b float internally?
<bitfield name="UNK15" pos="15" type="boolean"/>
<bitfield name="MUTABLEEN" pos="16" type="boolean" variants="A7XX-"/>
</reg32>
- <reg64 offset="0x88d8" name="RB_BLIT_DST" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x88da" name="RB_BLIT_DST_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x88d8" name="RB_RESOLVE_SYSTEM_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x88da" name="RB_RESOLVE_SYSTEM_BUFFER_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
<!-- array-pitch is size of layer -->
- <reg32 offset="0x88db" name="RB_BLIT_DST_ARRAY_PITCH" low="0" high="28" shr="6" type="uint" usage="rp_blit"/>
- <reg64 offset="0x88dc" name="RB_BLIT_FLAG_DST" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x88de" name="RB_BLIT_FLAG_DST_PITCH" usage="rp_blit">
+ <reg32 offset="0x88db" name="RB_RESOLVE_SYSTEM_BUFFER_ARRAY_PITCH" low="0" high="28" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x88dc" name="RB_RESOLVE_SYSTEM_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x88de" name="RB_RESOLVE_SYSTEM_FLAG_BUFFER_PITCH" usage="rp_blit">
<bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
<bitfield name="ARRAY_PITCH" low="11" high="27" shr="7" type="uint"/>
</reg32>
- <reg32 offset="0x88df" name="RB_BLIT_CLEAR_COLOR_DW0" usage="rp_blit"/>
- <reg32 offset="0x88e0" name="RB_BLIT_CLEAR_COLOR_DW1" usage="rp_blit"/>
- <reg32 offset="0x88e1" name="RB_BLIT_CLEAR_COLOR_DW2" usage="rp_blit"/>
- <reg32 offset="0x88e2" name="RB_BLIT_CLEAR_COLOR_DW3" usage="rp_blit"/>
+ <reg32 offset="0x88df" name="RB_RESOLVE_CLEAR_COLOR_DW0" usage="rp_blit"/>
+ <reg32 offset="0x88e0" name="RB_RESOLVE_CLEAR_COLOR_DW1" usage="rp_blit"/>
+ <reg32 offset="0x88e1" name="RB_RESOLVE_CLEAR_COLOR_DW2" usage="rp_blit"/>
+ <reg32 offset="0x88e2" name="RB_RESOLVE_CLEAR_COLOR_DW3" usage="rp_blit"/>
+
+ <enum name="a6xx_blit_event_type">
+ <value value="0x0" name="BLIT_EVENT_STORE"/>
+ <value value="0x1" name="BLIT_EVENT_STORE_AND_CLEAR"/>
+ <value value="0x2" name="BLIT_EVENT_CLEAR"/>
+ <value value="0x3" name="BLIT_EVENT_LOAD"/>
+ </enum>
<!-- seems somewhat similar to what we called RB_CLEAR_CNTL on a5xx: -->
- <reg32 offset="0x88e3" name="RB_BLIT_INFO" usage="rp_blit">
- <bitfield name="UNK0" pos="0" type="boolean"/> <!-- s8 stencil restore/clear? But also color restore? -->
- <bitfield name="GMEM" pos="1" type="boolean"/> <!-- set for restore and clear to gmem? -->
+ <reg32 offset="0x88e3" name="RB_RESOLVE_OPERATION" usage="rp_blit">
+ <bitfield name="TYPE" low="0" high="1" type="a6xx_blit_event_type"/>
<bitfield name="SAMPLE_0" pos="2" type="boolean"/> <!-- takes sample 0 instead of averaging -->
<bitfield name="DEPTH" pos="3" type="boolean"/> <!-- z16/z32/z24s8/x24x8 clear or resolve? -->
<doc>
@@ -3853,16 +1680,20 @@ to upconvert to 32b float internally?
<!-- set when this is the last resolve on a650+ -->
<bitfield name="LAST" low="8" high="9"/>
<!--
- a618 GLES: color render target number being resolved for RM6_RESOLVE, 0x8 for depth, 0x9 for separate stencil.
- a618 VK: 0x8 for depth RM6_RESOLVE, 0x9 for separate stencil, 0 otherwise.
-
- We believe this is related to concurrent resolves
+ a618 GLES: color render target number being resolved for CCU_RESOLVE, 0x8 for depth, 0x9 for separate stencil.
+ a618 VK: 0x8 for depth CCU_RESOLVE, 0x9 for separate stencil, 0 otherwise.
+ a7xx VK: 0x8 for depth, 0x9 for separate stencil, 0x0 to 0x7 used for concurrent resolves of color render
+ targets inside a given resolve group.
-->
<bitfield name="BUFFER_ID" low="12" high="15"/>
</reg32>
- <reg32 offset="0x88e4" name="RB_UNKNOWN_88E4" variants="A7XX-" usage="rp_blit">
- <!-- Value conditioned based on predicate, changed before blits -->
- <bitfield name="UNK0" pos="0" type="boolean"/>
+
+ <enum name="a7xx_blit_clear_mode">
+ <value value="0x0" name="CLEAR_MODE_SYSMEM"/>
+ <value value="0x1" name="CLEAR_MODE_GMEM"/>
+ </enum>
+ <reg32 offset="0x88e4" name="RB_CLEAR_TARGET" variants="A7XX-" usage="rp_blit">
+ <bitfield name="CLEAR_MODE" pos="0" type="a7xx_blit_clear_mode"/>
</reg32>
<enum name="a6xx_ccu_cache_size">
@@ -3871,7 +1702,7 @@ to upconvert to 32b float internally?
<value value="0x2" name="CCU_CACHE_SIZE_QUARTER"/>
<value value="0x3" name="CCU_CACHE_SIZE_EIGHTH"/>
</enum>
- <reg32 offset="0x88e5" name="RB_CCU_CNTL2" variants="A7XX-" usage="cmd">
+ <reg32 offset="0x88e5" name="RB_CCU_CACHE_CNTL" variants="A7XX-" usage="cmd">
<bitfield name="DEPTH_OFFSET_HI" pos="0" type="hex"/>
<bitfield name="COLOR_OFFSET_HI" pos="2" type="hex"/>
<bitfield name="DEPTH_CACHE_SIZE" low="10" high="11" type="a6xx_ccu_cache_size"/>
@@ -3895,7 +1726,13 @@ to upconvert to 32b float internally?
<bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
<bitfield name="ARRAY_PITCH" low="11" high="23" shr="7" type="uint"/>
</reg32>
- <reg32 offset="0x88f4" name="RB_UNKNOWN_88F4" low="0" high="2"/>
+
+ <reg32 offset="0x88f4" name="RB_VRS_CONFIG" usage="rp_blit">
+ <bitfield name="UNK2" pos="2" type="boolean"/>
+ <bitfield name="PIPELINE_FSR_ENABLE" pos="4" type="boolean"/>
+ <bitfield name="ATTACHMENT_FSR_ENABLE" pos="5" type="boolean"/>
+ <bitfield name="PRIMITIVE_FSR_ENABLE" pos="18" type="boolean"/>
+ </reg32>
<!-- Connected to VK_EXT_fragment_density_map? -->
<reg32 offset="0x88f5" name="RB_UNKNOWN_88F5" variants="A7XX-"/>
<!-- 0x88f6-0x88ff invalid -->
@@ -3906,7 +1743,7 @@ to upconvert to 32b float internally?
<bitfield name="UNK8" low="8" high="10"/>
<bitfield name="ARRAY_PITCH" low="11" high="27" shr="7" type="uint"/>
</reg32>
- <array offset="0x8903" name="RB_MRT_FLAG_BUFFER" stride="3" length="8" usage="rp_blit">
+ <array offset="0x8903" name="RB_COLOR_FLAG_BUFFER" stride="3" length="8" usage="rp_blit">
<reg64 offset="0" name="ADDR" type="waddress" align="64"/>
<reg32 offset="2" name="PITCH">
<bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
@@ -3915,10 +1752,10 @@ to upconvert to 32b float internally?
</array>
<!-- 0x891b-0x8926 invalid -->
<doc>
- RB_SAMPLE_COUNT_ADDR register is used up to (and including) a730. After that
+ RB_SAMPLE_COUNTER_BASE register is used up to (and including) a730. After that
the address is specified through CP_EVENT_WRITE7::WRITE_SAMPLE_COUNT.
</doc>
- <reg64 offset="0x8927" name="RB_SAMPLE_COUNT_ADDR" type="waddress" align="16" usage="cmd"/>
+ <reg64 offset="0x8927" name="RB_SAMPLE_COUNTER_BASE" type="waddress" align="16" usage="cmd"/>
<!-- 0x8929-0x89ff invalid -->
<!-- TODO: there are some registers in the 0x8a00-0x8bff range -->
@@ -3932,10 +1769,10 @@ to upconvert to 32b float internally?
<reg32 offset="0x8a20" name="RB_UNKNOWN_8A20" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0x8a30" name="RB_UNKNOWN_8A30" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0x8c00" name="RB_2D_BLIT_CNTL" type="a6xx_2d_blit_cntl" usage="rp_blit"/>
- <reg32 offset="0x8c01" name="RB_2D_UNKNOWN_8C01" low="0" high="31" usage="rp_blit"/>
+ <reg32 offset="0x8c00" name="RB_A2D_BLT_CNTL" type="a6xx_a2d_bit_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8c01" name="RB_A2D_PIXEL_CNTL" low="0" high="31" usage="rp_blit"/>
- <bitset name="a6xx_2d_src_surf_info" inline="yes">
+ <bitset name="a6xx_a2d_src_texture_info" inline="yes">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
<bitfield name="TILE_MODE" low="8" high="9" type="a6xx_tile_mode"/>
<bitfield name="COLOR_SWAP" low="10" high="11" type="a3xx_color_swap"/>
@@ -3954,7 +1791,7 @@ to upconvert to 32b float internally?
<bitfield name="MUTABLEEN" pos="29" type="boolean" variants="A7XX-"/>
</bitset>
- <bitset name="a6xx_2d_dst_surf_info" inline="yes">
+ <bitset name="a6xx_a2d_dest_buffer_info" inline="yes">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
<bitfield name="TILE_MODE" low="8" high="9" type="a6xx_tile_mode"/>
<bitfield name="COLOR_SWAP" low="10" high="11" type="a3xx_color_swap"/>
@@ -3965,26 +1802,26 @@ to upconvert to 32b float internally?
</bitset>
<!-- 0x8c02-0x8c16 invalid -->
- <reg32 offset="0x8c17" name="RB_2D_DST_INFO" type="a6xx_2d_dst_surf_info" usage="rp_blit"/>
- <reg64 offset="0x8c18" name="RB_2D_DST" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8c1a" name="RB_2D_DST_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x8c17" name="RB_A2D_DEST_BUFFER_INFO" type="a6xx_a2d_dest_buffer_info" usage="rp_blit"/>
+ <reg64 offset="0x8c18" name="RB_A2D_DEST_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c1a" name="RB_A2D_DEST_BUFFER_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
<!-- this is a guess but seems likely (for NV12/IYUV): -->
- <reg64 offset="0x8c1b" name="RB_2D_DST_PLANE1" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8c1d" name="RB_2D_DST_PLANE_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
- <reg64 offset="0x8c1e" name="RB_2D_DST_PLANE2" type="waddress" align="64" usage="rp_blit"/>
+ <reg64 offset="0x8c1b" name="RB_A2D_DEST_BUFFER_BASE_1" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c1d" name="RB_A2D_DEST_BUFFER_PITCH_1" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x8c1e" name="RB_A2D_DEST_BUFFER_BASE_2" type="waddress" align="64" usage="rp_blit"/>
- <reg64 offset="0x8c20" name="RB_2D_DST_FLAGS" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8c22" name="RB_2D_DST_FLAGS_PITCH" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x8c20" name="RB_A2D_DEST_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c22" name="RB_A2D_DEST_FLAG_BUFFER_PITCH" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
<!-- this is a guess but seems likely (for NV12 with UBWC): -->
- <reg64 offset="0x8c23" name="RB_2D_DST_FLAGS_PLANE" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8c25" name="RB_2D_DST_FLAGS_PLANE_PITCH" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x8c23" name="RB_A2D_DEST_FLAG_BUFFER_BASE_1" type="waddress" align="64" usage="rp_blit"/>
+ <reg32 offset="0x8c25" name="RB_A2D_DEST_FLAG_BUFFER_PITCH_1" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
<!-- TODO: 0x8c26-0x8c33 are all full 32-bit registers -->
<!-- unlike a5xx, these are per channel values rather than packed -->
- <reg32 offset="0x8c2c" name="RB_2D_SRC_SOLID_C0" usage="rp_blit"/>
- <reg32 offset="0x8c2d" name="RB_2D_SRC_SOLID_C1" usage="rp_blit"/>
- <reg32 offset="0x8c2e" name="RB_2D_SRC_SOLID_C2" usage="rp_blit"/>
- <reg32 offset="0x8c2f" name="RB_2D_SRC_SOLID_C3" usage="rp_blit"/>
+ <reg32 offset="0x8c2c" name="RB_A2D_CLEAR_COLOR_DW0" usage="rp_blit"/>
+ <reg32 offset="0x8c2d" name="RB_A2D_CLEAR_COLOR_DW1" usage="rp_blit"/>
+ <reg32 offset="0x8c2e" name="RB_A2D_CLEAR_COLOR_DW2" usage="rp_blit"/>
+ <reg32 offset="0x8c2f" name="RB_A2D_CLEAR_COLOR_DW3" usage="rp_blit"/>
<reg32 offset="0x8c34" name="RB_UNKNOWN_8C34" variants="A7XX-" usage="cmd"/>
@@ -3996,7 +1833,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x8e04" name="RB_DBG_ECO_CNTL" usage="cmd"/> <!-- TODO: valid mask 0xfffffeff -->
<reg32 offset="0x8e05" name="RB_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode"/>
<!-- 0x02080000 in GMEM, zero otherwise? -->
- <reg32 offset="0x8e06" name="RB_UNKNOWN_8E06" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x8e06" name="RB_CCU_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/>
<reg32 offset="0x8e07" name="RB_CCU_CNTL" usage="cmd" variants="A6XX">
<bitfield name="GMEM_FAST_CLEAR_DISABLE" pos="0" type="boolean"/>
@@ -4017,10 +1854,21 @@ to upconvert to 32b float internally?
<bitfield name="COLOR_OFFSET" low="23" high="31" shr="12" type="hex"/>
<!--TODO: valid mask 0xfffffc1f -->
</reg32>
+ <enum name="a7xx_concurrent_resolve_mode">
+ <value value="0x0" name="CONCURRENT_RESOLVE_MODE_DISABLED"/>
+ <value value="0x1" name="CONCURRENT_RESOLVE_MODE_1"/>
+ <value value="0x2" name="CONCURRENT_RESOLVE_MODE_2"/>
+ </enum>
+ <enum name="a7xx_concurrent_unresolve_mode">
+ <value value="0x0" name="CONCURRENT_UNRESOLVE_MODE_DISABLED"/>
+ <value value="0x1" name="CONCURRENT_UNRESOLVE_MODE_PARTIAL"/>
+ <value value="0x3" name="CONCURRENT_UNRESOLVE_MODE_FULL"/>
+ </enum>
<reg32 offset="0x8e07" name="RB_CCU_CNTL" usage="cmd" variants="A7XX-">
<bitfield name="GMEM_FAST_CLEAR_DISABLE" pos="0" type="boolean"/>
- <bitfield name="CONCURRENT_RESOLVE" pos="2" type="boolean"/>
- <!-- rest of the bits were moved to RB_CCU_CNTL2 -->
+ <bitfield name="CONCURRENT_RESOLVE_MODE" low="2" high="3" type="a7xx_concurrent_resolve_mode"/>
+ <bitfield name="CONCURRENT_UNRESOLVE_MODE" low="5" high="6" type="a7xx_concurrent_unresolve_mode"/>
+ <!-- rest of the bits were moved to RB_CCU_CACHE_CNTL -->
</reg32>
<reg32 offset="0x8e08" name="RB_NC_MODE_CNTL">
<bitfield name="MODE" pos="0" type="boolean"/>
@@ -4046,9 +1894,9 @@ to upconvert to 32b float internally?
<reg32 offset="0x8e3d" name="RB_RB_SUB_BLOCK_SEL_CNTL_CD"/>
<!-- 0x8e3e-0x8e4f invalid -->
<!-- GMEM save/restore for preemption: -->
- <reg32 offset="0x8e50" name="RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE" pos="0" type="boolean"/>
+ <reg32 offset="0x8e50" name="RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE" pos="0" type="boolean"/>
<!-- address for GMEM save/restore? -->
- <reg32 offset="0x8e51" name="RB_UNKNOWN_8E51" type="waddress" align="1"/>
+ <reg32 offset="0x8e51" name="RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ADDR" type="waddress" align="1"/>
<!-- 0x8e53-0x8e7f invalid -->
<reg32 offset="0x8e79" name="RB_UNKNOWN_8E79" variants="A7XX-" usage="cmd"/>
<!-- 0x8e80-0x8e83 are valid -->
@@ -4069,38 +1917,38 @@ to upconvert to 32b float internally?
<bitfield name="CLIP_DIST_03_LOC" low="8" high="15" type="uint"/>
<bitfield name="CLIP_DIST_47_LOC" low="16" high="23" type="uint"/>
</bitset>
- <reg32 offset="0x9101" name="VPC_VS_CLIP_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9102" name="VPC_GS_CLIP_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9103" name="VPC_DS_CLIP_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9101" name="VPC_VS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9102" name="VPC_GS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9103" name="VPC_DS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9311" name="VPC_VS_CLIP_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9312" name="VPC_GS_CLIP_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9313" name="VPC_DS_CLIP_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9311" name="VPC_VS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9312" name="VPC_GS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9313" name="VPC_DS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <bitset name="a6xx_vpc_xs_layer_cntl" inline="yes">
+ <bitset name="a6xx_vpc_xs_siv_cntl" inline="yes">
<bitfield name="LAYERLOC" low="0" high="7" type="uint"/>
<bitfield name="VIEWLOC" low="8" high="15" type="uint"/>
<bitfield name="SHADINGRATELOC" low="16" high="23" type="uint" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x9104" name="VPC_VS_LAYER_CNTL" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x9105" name="VPC_GS_LAYER_CNTL" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x9106" name="VPC_DS_LAYER_CNTL" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9104" name="VPC_VS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9105" name="VPC_GS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9106" name="VPC_DS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x9314" name="VPC_VS_LAYER_CNTL_V2" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x9315" name="VPC_GS_LAYER_CNTL_V2" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
- <reg32 offset="0x9316" name="VPC_DS_LAYER_CNTL_V2" type="a6xx_vpc_xs_layer_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9314" name="VPC_VS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9315" name="VPC_GS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9316" name="VPC_DS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
<reg32 offset="0x9107" name="VPC_UNKNOWN_9107" variants="A6XX" usage="rp_blit">
- <!-- this mirrors PC_RASTER_CNTL::DISCARD, although it seems it's unused -->
+ <!-- this mirrors VPC_RAST_STREAM_CNTL::DISCARD, although it seems it's unused -->
<bitfield name="RASTER_DISCARD" pos="0" type="boolean"/>
<bitfield name="UNK2" pos="2" type="boolean"/>
</reg32>
- <reg32 offset="0x9108" name="VPC_POLYGON_MODE" usage="rp_blit">
+ <reg32 offset="0x9108" name="VPC_RAST_CNTL" usage="rp_blit">
<bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
</reg32>
- <bitset name="a6xx_primitive_cntl_0" inline="yes">
+ <bitset name="a6xx_pc_cntl" inline="yes">
<bitfield name="PRIMITIVE_RESTART" pos="0" type="boolean"/>
<bitfield name="PROVOKING_VTX_LAST" pos="1" type="boolean"/>
<bitfield name="D3D_VERTEX_ORDERING" pos="2" type="boolean">
@@ -4113,7 +1961,7 @@ to upconvert to 32b float internally?
<bitfield name="UNK3" pos="3" type="boolean"/>
</bitset>
- <bitset name="a6xx_primitive_cntl_5" inline="yes">
+ <bitset name="a6xx_gs_param_0" inline="yes">
<doc>
geometry shader
</doc>
@@ -4125,7 +1973,7 @@ to upconvert to 32b float internally?
<bitfield name="UNK18" pos="18"/>
</bitset>
- <bitset name="a6xx_multiview_cntl" inline="yes">
+ <bitset name="a6xx_stereo_rendering_cntl" inline="yes">
<bitfield name="ENABLE" pos="0" type="boolean"/>
<bitfield name="DISABLEMULTIPOS" pos="1" type="boolean">
<doc>
@@ -4139,10 +1987,10 @@ to upconvert to 32b float internally?
<bitfield name="VIEWS" low="2" high="6" type="uint"/>
</bitset>
- <reg32 offset="0x9109" name="VPC_PRIMITIVE_CNTL_0" type="a6xx_primitive_cntl_0" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910a" name="VPC_PRIMITIVE_CNTL_5" type="a6xx_primitive_cntl_5" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910b" name="VPC_MULTIVIEW_MASK" type="hex" low="0" high="15" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910c" name="VPC_MULTIVIEW_CNTL" type="a6xx_multiview_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x9109" name="VPC_PC_CNTL" type="a6xx_pc_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x910a" name="VPC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x910b" name="VPC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x910c" name="VPC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A7XX-" usage="rp_blit"/>
<enum name="a6xx_varying_interp_mode">
<value value="0" name="INTERP_SMOOTH"/>
@@ -4159,11 +2007,11 @@ to upconvert to 32b float internally?
</enum>
<!-- 0x9109-0x91ff invalid -->
- <array offset="0x9200" name="VPC_VARYING_INTERP" stride="1" length="8" usage="rp_blit">
+ <array offset="0x9200" name="VPC_VARYING_INTERP_MODE" stride="1" length="8" usage="rp_blit">
<doc>Packed array of a6xx_varying_interp_mode</doc>
<reg32 offset="0x0" name="MODE"/>
</array>
- <array offset="0x9208" name="VPC_VARYING_PS_REPL" stride="1" length="8" usage="rp_blit">
+ <array offset="0x9208" name="VPC_VARYING_REPLACE_MODE_0" stride="1" length="8" usage="rp_blit">
<doc>Packed array of a6xx_varying_ps_repl_mode</doc>
<reg32 offset="0x0" name="MODE"/>
</array>
@@ -4172,12 +2020,12 @@ to upconvert to 32b float internally?
<reg32 offset="0x9210" name="VPC_UNKNOWN_9210" low="0" high="31" variants="A6XX" usage="cmd"/>
<reg32 offset="0x9211" name="VPC_UNKNOWN_9211" low="0" high="31" variants="A6XX" usage="cmd"/>
- <array offset="0x9212" name="VPC_VAR" stride="1" length="4" usage="rp_blit">
+ <array offset="0x9212" name="VPC_VARYING_LM_TRANSFER_CNTL_0" stride="1" length="4" usage="rp_blit">
<!-- one bit per varying component: -->
<reg32 offset="0" name="DISABLE"/>
</array>
- <reg32 offset="0x9216" name="VPC_SO_CNTL" usage="rp_blit">
+ <reg32 offset="0x9216" name="VPC_SO_MAPPING_WPTR" usage="rp_blit">
<!--
Choose which DWORD to write to. There is an array of
(4 * 64) DWORD's, dumped in the devcoredump at
@@ -4198,7 +2046,7 @@ to upconvert to 32b float internally?
When EmitStreamVertex(N) happens, the HW goes to DWORD
64 * N and then "executes" the next 64 DWORD's.
- This field is auto-incremented when VPC_SO_PROG is
+ This field is auto-incremented when VPC_SO_MAPPING_PORT is
written to.
-->
<bitfield name="ADDR" low="0" high="7" type="hex"/>
@@ -4206,7 +2054,7 @@ to upconvert to 32b float internally?
<bitfield name="RESET" pos="16" type="boolean"/>
</reg32>
<!-- special register, write multiple times to load SO program (not readable) -->
- <reg32 offset="0x9217" name="VPC_SO_PROG" usage="rp_blit">
+ <reg32 offset="0x9217" name="VPC_SO_MAPPING_PORT" usage="rp_blit">
<bitfield name="A_BUF" low="0" high="1" type="uint"/>
<bitfield name="A_OFF" low="2" high="10" shr="2" type="uint"/>
<bitfield name="A_EN" pos="11" type="boolean"/>
@@ -4215,7 +2063,7 @@ to upconvert to 32b float internally?
<bitfield name="B_EN" pos="23" type="boolean"/>
</reg32>
- <reg64 offset="0x9218" name="VPC_SO_STREAM_COUNTS" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9218" name="VPC_SO_QUERY_BASE" type="waddress" align="32" usage="cmd"/>
<array offset="0x921a" name="VPC_SO" stride="7" length="4" usage="cmd">
<reg64 offset="0" name="BUFFER_BASE" type="waddress" align="32"/>
@@ -4225,14 +2073,14 @@ to upconvert to 32b float internally?
<reg64 offset="5" name="FLUSH_BASE" type="waddress" align="32"/>
</array>
- <reg32 offset="0x9236" name="VPC_POINT_COORD_INVERT" usage="cmd">
+ <reg32 offset="0x9236" name="VPC_REPLACE_MODE_CNTL" usage="cmd">
<bitfield name="INVERT" pos="0" type="boolean"/>
</reg32>
<!-- 0x9237-0x92ff invalid -->
<!-- always 0x0 ? -->
<reg32 offset="0x9300" name="VPC_UNKNOWN_9300" low="0" high="2" usage="cmd"/>
- <bitset name="a6xx_vpc_xs_pack" inline="yes">
+ <bitset name="a6xx_vpc_xs_cntl" inline="yes">
<doc>
num of varyings plus four for gl_Position (plus one if gl_PointSize)
plus # of transform-feedback (streamout) varyings if using the
@@ -4249,11 +2097,11 @@ to upconvert to 32b float internally?
</doc>
</bitfield>
</bitset>
- <reg32 offset="0x9301" name="VPC_VS_PACK" type="a6xx_vpc_xs_pack" usage="rp_blit"/>
- <reg32 offset="0x9302" name="VPC_GS_PACK" type="a6xx_vpc_xs_pack" usage="rp_blit"/>
- <reg32 offset="0x9303" name="VPC_DS_PACK" type="a6xx_vpc_xs_pack" usage="rp_blit"/>
+ <reg32 offset="0x9301" name="VPC_VS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9302" name="VPC_GS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9303" name="VPC_DS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9304" name="VPC_CNTL_0" usage="rp_blit">
+ <reg32 offset="0x9304" name="VPC_PS_CNTL" usage="rp_blit">
<bitfield name="NUMNONPOSVAR" low="0" high="7" type="uint"/>
<!-- for fixed-function (i.e. no GS) gl_PrimitiveID in FS -->
<bitfield name="PRIMIDLOC" low="8" high="15" type="uint"/>
@@ -4272,7 +2120,7 @@ to upconvert to 32b float internally?
</bitfield>
</reg32>
- <reg32 offset="0x9305" name="VPC_SO_STREAM_CNTL" usage="rp_blit">
+ <reg32 offset="0x9305" name="VPC_SO_CNTL" usage="rp_blit">
<!--
It's offset by 1, and 0 means "disabled"
-->
@@ -4282,19 +2130,19 @@ to upconvert to 32b float internally?
<bitfield name="BUF3_STREAM" low="9" high="11" type="uint"/>
<bitfield name="STREAM_ENABLE" low="15" high="18" type="hex"/>
</reg32>
- <reg32 offset="0x9306" name="VPC_SO_DISABLE" usage="rp_blit">
+ <reg32 offset="0x9306" name="VPC_SO_OVERRIDE" usage="rp_blit">
<bitfield name="DISABLE" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0x9307" name="VPC_POLYGON_MODE2" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9307" name="VPC_PS_RAST_CNTL" variants="A6XX-" usage="rp_blit"> <!-- A702 + A7xx -->
<bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
</reg32>
- <reg32 offset="0x9308" name="VPC_ATTR_BUF_SIZE_GMEM" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9308" name="VPC_ATTR_BUF_GMEM_SIZE" variants="A7XX-" usage="rp_blit">
<bitfield name="SIZE_GMEM" low="0" high="31"/>
</reg32>
- <reg32 offset="0x9309" name="VPC_ATTR_BUF_BASE_GMEM" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9309" name="VPC_ATTR_BUF_GMEM_BASE" variants="A7XX-" usage="rp_blit">
<bitfield name="BASE_GMEM" low="0" high="31"/>
</reg32>
- <reg32 offset="0x9b09" name="PC_ATTR_BUF_SIZE_GMEM" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9b09" name="PC_ATTR_BUF_GMEM_SIZE" variants="A7XX-" usage="rp_blit">
<bitfield name="SIZE_GMEM" low="0" high="31"/>
</reg32>
@@ -4311,15 +2159,15 @@ to upconvert to 32b float internally?
<!-- TODO: regs from 0x9624-0x963a -->
<!-- 0x963b-0x97ff invalid -->
- <reg32 offset="0x9800" name="PC_TESS_NUM_VERTEX" low="0" high="5" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x9800" name="PC_HS_PARAM_0" low="0" high="5" type="uint" usage="rp_blit"/>
<!-- always 0x0 ? -->
- <reg32 offset="0x9801" name="PC_HS_INPUT_SIZE" usage="rp_blit">
+ <reg32 offset="0x9801" name="PC_HS_PARAM_1" usage="rp_blit">
<bitfield name="SIZE" low="0" high="10" type="uint"/>
<bitfield name="UNK13" pos="13"/>
</reg32>
- <reg32 offset="0x9802" name="PC_TESS_CNTL" usage="rp_blit">
+ <reg32 offset="0x9802" name="PC_DS_PARAM" usage="rp_blit">
<bitfield name="SPACING" low="0" high="1" type="a6xx_tess_spacing"/>
<bitfield name="OUTPUT" low="2" high="3" type="a6xx_tess_output"/>
</reg32>
@@ -4334,7 +2182,7 @@ to upconvert to 32b float internally?
</reg32>
<!-- New in a6xx gen3+ -->
- <reg32 offset="0x9808" name="PC_SO_STREAM_CNTL" usage="rp_blit">
+ <reg32 offset="0x9808" name="PC_DGEN_SO_CNTL" usage="rp_blit">
<bitfield name="STREAM_ENABLE" low="15" high="18" type="hex"/>
</reg32>
@@ -4344,15 +2192,15 @@ to upconvert to 32b float internally?
<!-- 0x980b-0x983f invalid -->
<!-- 0x9840 - 0x9842 are not readable -->
- <reg32 offset="0x9840" name="PC_DRAW_CMD">
+ <reg32 offset="0x9840" name="PC_DRAW_INITIATOR">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0x9841" name="PC_DISPATCH_CMD">
+ <reg32 offset="0x9841" name="PC_KERNEL_INITIATOR">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0x9842" name="PC_EVENT_CMD">
+ <reg32 offset="0x9842" name="PC_EVENT_INITIATOR">
<!-- I think only the low bit is actually used? -->
<bitfield name="STATE_ID" low="16" high="23"/>
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
@@ -4367,27 +2215,27 @@ to upconvert to 32b float internally?
<!-- 0x9843-0x997f invalid -->
- <reg32 offset="0x9981" name="PC_POLYGON_MODE" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0x9981" name="PC_DGEN_RAST_CNTL" variants="A6XX" usage="rp_blit">
<bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
</reg32>
- <reg32 offset="0x9809" name="PC_POLYGON_MODE" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9809" name="PC_DGEN_RAST_CNTL" variants="A7XX-" usage="rp_blit">
<bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
</reg32>
- <reg32 offset="0x9980" name="PC_RASTER_CNTL" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0x9980" name="VPC_RAST_STREAM_CNTL" variants="A6XX" usage="rp_blit">
<!-- which stream to send to GRAS -->
<bitfield name="STREAM" low="0" high="1" type="uint"/>
<!-- discard primitives before rasterization -->
<bitfield name="DISCARD" pos="2" type="boolean"/>
</reg32>
- <!-- VPC_RASTER_CNTL -->
- <reg32 offset="0x9107" name="PC_RASTER_CNTL" variants="A7XX-" usage="rp_blit">
+ <!-- VPC_RAST_STREAM_CNTL -->
+ <reg32 offset="0x9107" name="VPC_RAST_STREAM_CNTL" variants="A7XX-" usage="rp_blit">
<!-- which stream to send to GRAS -->
<bitfield name="STREAM" low="0" high="1" type="uint"/>
<!-- discard primitives before rasterization -->
<bitfield name="DISCARD" pos="2" type="boolean"/>
</reg32>
- <reg32 offset="0x9317" name="PC_RASTER_CNTL_V2" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0x9317" name="VPC_RAST_STREAM_CNTL_V2" variants="A7XX-" usage="rp_blit">
<!-- which stream to send to GRAS -->
<bitfield name="STREAM" low="0" high="1" type="uint"/>
<!-- discard primitives before rasterization -->
@@ -4397,17 +2245,17 @@ to upconvert to 32b float internally?
<!-- Both are a750+.
Probably needed to correctly overlap execution of several draws.
-->
- <reg32 offset="0x9885" name="PC_TESS_PARAM_SIZE" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x9885" name="PC_HS_BUFFER_SIZE" variants="A7XX-" usage="cmd"/>
<!-- Blob adds a bit more space {0x10, 0x20, 0x30, 0x40} bytes, but the meaning of
this additional space is not known.
-->
- <reg32 offset="0x9886" name="PC_TESS_FACTOR_SIZE" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x9886" name="PC_TF_BUFFER_SIZE" variants="A7XX-" usage="cmd"/>
<!-- 0x9982-0x9aff invalid -->
- <reg32 offset="0x9b00" name="PC_PRIMITIVE_CNTL_0" type="a6xx_primitive_cntl_0" usage="rp_blit"/>
+ <reg32 offset="0x9b00" name="PC_CNTL" type="a6xx_pc_cntl" usage="rp_blit"/>
- <bitset name="a6xx_xs_out_cntl" inline="yes">
+ <bitset name="a6xx_pc_xs_cntl" inline="yes">
<doc>
num of varyings plus four for gl_Position (plus one if gl_PointSize)
plus # of transform-feedback (streamout) varyings if using the
@@ -4417,19 +2265,19 @@ to upconvert to 32b float internally?
<bitfield name="PSIZE" pos="8" type="boolean"/>
<bitfield name="LAYER" pos="9" type="boolean"/>
<bitfield name="VIEW" pos="10" type="boolean"/>
- <!-- note: PC_VS_OUT_CNTL doesn't have the PRIMITIVE_ID bit -->
+ <!-- note: PC_VS_CNTL doesn't have the PRIMITIVE_ID bit -->
<bitfield name="PRIMITIVE_ID" pos="11" type="boolean"/>
<bitfield name="CLIP_MASK" low="16" high="23" type="uint"/>
<bitfield name="SHADINGRATE" pos="24" type="boolean" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x9b01" name="PC_VS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
- <reg32 offset="0x9b02" name="PC_GS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b01" name="PC_VS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b02" name="PC_GS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
<!-- since HS can't output anything, only PRIMITIVE_ID is valid -->
- <reg32 offset="0x9b03" name="PC_HS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
- <reg32 offset="0x9b04" name="PC_DS_OUT_CNTL" type="a6xx_xs_out_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b03" name="PC_HS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b04" name="PC_DS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9b05" name="PC_PRIMITIVE_CNTL_5" type="a6xx_primitive_cntl_5" usage="rp_blit"/>
+ <reg32 offset="0x9b05" name="PC_GS_PARAM_0" type="a6xx_gs_param_0" usage="rp_blit"/>
<reg32 offset="0x9b06" name="PC_PRIMITIVE_CNTL_6" variants="A6XX" usage="rp_blit">
<doc>
@@ -4438,9 +2286,9 @@ to upconvert to 32b float internally?
<bitfield name="STRIDE_IN_VPC" low="0" high="10" type="uint"/>
</reg32>
- <reg32 offset="0x9b07" name="PC_MULTIVIEW_CNTL" type="a6xx_multiview_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b07" name="PC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" usage="rp_blit"/>
<!-- mask of enabled views, doesn't exist on A630 -->
- <reg32 offset="0x9b08" name="PC_MULTIVIEW_MASK" type="hex" low="0" high="15" usage="rp_blit"/>
+ <reg32 offset="0x9b08" name="PC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" usage="rp_blit"/>
<!-- 0x9b09-0x9bff invalid -->
<reg32 offset="0x9c00" name="PC_2D_EVENT_CMD">
<!-- special register (but note first 8 bits can be written/read) -->
@@ -4451,31 +2299,31 @@ to upconvert to 32b float internally?
<!-- TODO: 0x9e00-0xa000 range incomplete -->
<reg32 offset="0x9e00" name="PC_DBG_ECO_CNTL"/>
<reg32 offset="0x9e01" name="PC_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
- <reg64 offset="0x9e04" name="PC_DRAW_INDX_BASE"/>
- <reg32 offset="0x9e06" name="PC_DRAW_FIRST_INDX" type="uint"/>
- <reg32 offset="0x9e07" name="PC_DRAW_MAX_INDICES" type="uint"/>
- <reg64 offset="0x9e08" name="PC_TESSFACTOR_ADDR" variants="A6XX" type="waddress" align="32" usage="cmd"/>
- <reg64 offset="0x9810" name="PC_TESSFACTOR_ADDR" variants="A7XX-" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9e04" name="PC_DMA_BASE"/>
+ <reg32 offset="0x9e06" name="PC_DMA_OFFSET" type="uint"/>
+ <reg32 offset="0x9e07" name="PC_DMA_SIZE" type="uint"/>
+ <reg64 offset="0x9e08" name="PC_TESS_BASE" variants="A6XX" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9810" name="PC_TESS_BASE" variants="A7XX-" type="waddress" align="32" usage="cmd"/>
- <reg32 offset="0x9e0b" name="PC_DRAW_INITIATOR" type="vgt_draw_initiator_a4xx">
+ <reg32 offset="0x9e0b" name="PC_DRAWCALL_CNTL" type="vgt_draw_initiator_a4xx">
<doc>
Possibly not really "initiating" the draw but the layout is similar
to VGT_DRAW_INITIATOR on older gens
</doc>
</reg32>
- <reg32 offset="0x9e0c" name="PC_DRAW_NUM_INSTANCES" type="uint"/>
- <reg32 offset="0x9e0d" name="PC_DRAW_NUM_INDICES" type="uint"/>
+ <reg32 offset="0x9e0c" name="PC_DRAWCALL_INSTANCE_NUM" type="uint"/>
+ <reg32 offset="0x9e0d" name="PC_DRAWCALL_SIZE" type="uint"/>
<!-- These match the contents of CP_SET_BIN_DATA (not written directly) -->
- <reg32 offset="0x9e11" name="PC_VSTREAM_CONTROL">
+ <reg32 offset="0x9e11" name="PC_VIS_STREAM_CNTL">
<bitfield name="UNK0" low="0" high="15"/>
<bitfield name="VSC_SIZE" low="16" high="21" type="uint"/>
<bitfield name="VSC_N" low="22" high="26" type="uint"/>
</reg32>
- <reg64 offset="0x9e12" name="PC_BIN_PRIM_STRM" type="waddress" align="32"/>
- <reg64 offset="0x9e14" name="PC_BIN_DRAW_STRM" type="waddress" align="32"/>
+ <reg64 offset="0x9e12" name="PC_PVIS_STREAM_BIN_BASE" type="waddress" align="32"/>
+ <reg64 offset="0x9e14" name="PC_DVIS_STREAM_BIN_BASE" type="waddress" align="32"/>
- <reg32 offset="0x9e1c" name="PC_VISIBILITY_OVERRIDE">
+ <reg32 offset="0x9e1c" name="PC_DRAWCALL_CNTL_OVERRIDE">
<doc>Written by CP_SET_VISIBILITY_OVERRIDE handler</doc>
<bitfield name="OVERRIDE" pos="0" type="boolean"/>
</reg32>
@@ -4488,18 +2336,18 @@ to upconvert to 32b float internally?
<!-- always 0x0 -->
<reg32 offset="0x9e72" name="PC_UNKNOWN_9E72" usage="cmd"/>
- <reg32 offset="0xa000" name="VFD_CONTROL_0" usage="rp_blit">
+ <reg32 offset="0xa000" name="VFD_CNTL_0" usage="rp_blit">
<bitfield name="FETCH_CNT" low="0" high="5" type="uint"/>
<bitfield name="DECODE_CNT" low="8" high="13" type="uint"/>
</reg32>
- <reg32 offset="0xa001" name="VFD_CONTROL_1" usage="rp_blit">
+ <reg32 offset="0xa001" name="VFD_CNTL_1" usage="rp_blit">
<bitfield name="REGID4VTX" low="0" high="7" type="a3xx_regid"/>
<bitfield name="REGID4INST" low="8" high="15" type="a3xx_regid"/>
<bitfield name="REGID4PRIMID" low="16" high="23" type="a3xx_regid"/>
<!-- only used for VS in non-multi-position-output case -->
<bitfield name="REGID4VIEWID" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa002" name="VFD_CONTROL_2" usage="rp_blit">
+ <reg32 offset="0xa002" name="VFD_CNTL_2" usage="rp_blit">
<bitfield name="REGID_HSRELPATCHID" low="0" high="7" type="a3xx_regid">
<doc>
This is the ID of the current patch within the
@@ -4512,32 +2360,32 @@ to upconvert to 32b float internally?
</bitfield>
<bitfield name="REGID_INVOCATIONID" low="8" high="15" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa003" name="VFD_CONTROL_3" usage="rp_blit">
+ <reg32 offset="0xa003" name="VFD_CNTL_3" usage="rp_blit">
<bitfield name="REGID_DSPRIMID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="REGID_DSRELPATCHID" low="8" high="15" type="a3xx_regid"/>
<bitfield name="REGID_TESSX" low="16" high="23" type="a3xx_regid"/>
<bitfield name="REGID_TESSY" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa004" name="VFD_CONTROL_4" usage="rp_blit">
+ <reg32 offset="0xa004" name="VFD_CNTL_4" usage="rp_blit">
<bitfield name="UNK0" low="0" high="7" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa005" name="VFD_CONTROL_5" usage="rp_blit">
+ <reg32 offset="0xa005" name="VFD_CNTL_5" usage="rp_blit">
<bitfield name="REGID_GSHEADER" low="0" high="7" type="a3xx_regid"/>
<bitfield name="UNK8" low="8" high="15" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa006" name="VFD_CONTROL_6" usage="rp_blit">
+ <reg32 offset="0xa006" name="VFD_CNTL_6" usage="rp_blit">
<!--
True if gl_PrimitiveID is read via the FS
-->
<bitfield name="PRIMID4PSEN" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0xa007" name="VFD_MODE_CNTL" usage="cmd">
+ <reg32 offset="0xa007" name="VFD_RENDER_MODE" usage="cmd">
<bitfield name="RENDER_MODE" low="0" high="2" type="a6xx_render_mode"/>
</reg32>
- <reg32 offset="0xa008" name="VFD_MULTIVIEW_CNTL" type="a6xx_multiview_cntl" usage="rp_blit"/>
- <reg32 offset="0xa009" name="VFD_ADD_OFFSET" usage="cmd">
+ <reg32 offset="0xa008" name="VFD_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" usage="rp_blit"/>
+ <reg32 offset="0xa009" name="VFD_MODE_CNTL" usage="cmd">
<!-- add VFD_INDEX_OFFSET to REGID4VTX -->
<bitfield name="VERTEX" pos="0" type="boolean"/>
<!-- add VFD_INSTANCE_START_OFFSET to REGID4INST -->
@@ -4546,14 +2394,14 @@ to upconvert to 32b float internally?
<reg32 offset="0xa00e" name="VFD_INDEX_OFFSET" usage="rp_blit"/>
<reg32 offset="0xa00f" name="VFD_INSTANCE_START_OFFSET" usage="rp_blit"/>
- <array offset="0xa010" name="VFD_FETCH" stride="4" length="32" usage="rp_blit">
+ <array offset="0xa010" name="VFD_VERTEX_BUFFER" stride="4" length="32" usage="rp_blit">
<reg64 offset="0x0" name="BASE" type="address" align="1"/>
<reg32 offset="0x2" name="SIZE" type="uint"/>
<reg32 offset="0x3" name="STRIDE" low="0" high="11" type="uint"/>
</array>
- <array offset="0xa090" name="VFD_DECODE" stride="2" length="32" usage="rp_blit">
+ <array offset="0xa090" name="VFD_FETCH_INSTR" stride="2" length="32" usage="rp_blit">
<reg32 offset="0x0" name="INSTR">
- <!-- IDX and byte OFFSET into VFD_FETCH -->
+ <!-- IDX and byte OFFSET into VFD_VERTEX_BUFFER -->
<bitfield name="IDX" low="0" high="4" type="uint"/>
<bitfield name="OFFSET" low="5" high="16"/>
<bitfield name="INSTANCED" pos="17" type="boolean"/>
@@ -4573,7 +2421,7 @@ to upconvert to 32b float internally?
<reg32 offset="0xa0f8" name="VFD_POWER_CNTL" low="0" high="2" usage="rp_blit"/>
- <reg32 offset="0xa600" name="VFD_UNKNOWN_A600" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa600" name="VFD_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xa601" name="VFD_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
<array offset="0xa610" name="VFD_PERFCTR_VFD_SEL" stride="1" length="8" variants="A6XX"/>
@@ -4588,7 +2436,7 @@ to upconvert to 32b float internally?
<value value="1" name="THREAD128"/>
</enum>
- <bitset name="a6xx_sp_xs_ctrl_reg0" inline="yes">
+ <bitset name="a6xx_sp_xs_cntl_0" inline="yes">
<!-- if set to SINGLE, only use 1 concurrent wave on each SP -->
<bitfield name="THREADMODE" pos="0" type="a3xx_threadmode"/>
<!--
@@ -4620,7 +2468,7 @@ to upconvert to 32b float internally?
-->
<bitfield name="BINDLESS_TEX" pos="0" type="boolean"/>
<bitfield name="BINDLESS_SAMP" pos="1" type="boolean"/>
- <bitfield name="BINDLESS_IBO" pos="2" type="boolean"/>
+ <bitfield name="BINDLESS_UAV" pos="2" type="boolean"/>
<bitfield name="BINDLESS_UBO" pos="3" type="boolean"/>
<bitfield name="ENABLED" pos="8" type="boolean"/>
@@ -4630,17 +2478,17 @@ to upconvert to 32b float internally?
-->
<bitfield name="NTEX" low="9" high="16" type="uint"/>
<bitfield name="NSAMP" low="17" high="21" type="uint"/>
- <bitfield name="NIBO" low="22" high="28" type="uint"/>
+ <bitfield name="NUAV" low="22" high="28" type="uint"/>
</bitset>
- <bitset name="a6xx_sp_xs_prim_cntl" inline="yes">
+ <bitset name="a6xx_sp_xs_output_cntl" inline="yes">
<!-- # of VS outputs including pos/psize -->
<bitfield name="OUT" low="0" high="5" type="uint"/>
<!-- FLAGS_REGID only for GS -->
<bitfield name="FLAGS_REGID" low="6" high="13" type="a3xx_regid"/>
</bitset>
- <reg32 offset="0xa800" name="SP_VS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <reg32 offset="0xa800" name="SP_VS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
<!--
This field actually controls all geometry stages. TCS, TES, and
GS must have the same mergedregs setting as VS.
@@ -4665,10 +2513,10 @@ to upconvert to 32b float internally?
</reg32>
<!-- bitmask of true/false conditions for VS brac.N instructions,
bit N corresponds to brac.N -->
- <reg32 offset="0xa801" name="SP_VS_BRANCH_COND" type="hex"/>
+ <reg32 offset="0xa801" name="SP_VS_BOOLEAN_CF_MASK" type="hex"/>
<!-- # of VS outputs including pos/psize -->
- <reg32 offset="0xa802" name="SP_VS_PRIMITIVE_CNTL" type="a6xx_sp_xs_prim_cntl" usage="rp_blit"/>
- <array offset="0xa803" name="SP_VS_OUT" stride="1" length="16" usage="rp_blit">
+ <reg32 offset="0xa802" name="SP_VS_OUTPUT_CNTL" type="a6xx_sp_xs_output_cntl" usage="rp_blit"/>
+ <array offset="0xa803" name="SP_VS_OUTPUT" stride="1" length="16" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="A_COMPMASK" low="8" high="11" type="hex"/>
@@ -4678,12 +2526,12 @@ to upconvert to 32b float internally?
</array>
<!--
Starting with a5xx, position/psize outputs from shader end up in the
- SP_VS_OUT map, with highest OUTLOCn position. (Generally they are
+ SP_VS_OUTPUT map, with highest OUTLOCn position. (Generally they are
the last entries too, except when gl_PointCoord is used, blob inserts
an extra varying after, but with a lower OUTLOC position. If present,
psize is last, preceded by position.
-->
- <array offset="0xa813" name="SP_VS_VPC_DST" stride="1" length="8" usage="rp_blit">
+ <array offset="0xa813" name="SP_VS_VPC_DEST" stride="1" length="8" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
<bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
@@ -4752,7 +2600,7 @@ to upconvert to 32b float internally?
</bitfield>
</bitset>
- <bitset name="a6xx_sp_xs_pvt_mem_hw_stack_offset" inline="yes">
+ <bitset name="a6xx_sp_xs_pvt_mem_stack_offset" inline="yes">
<doc>
This seems to be be the equivalent of HWSTACKOFFSET in
a3xx. The ldp/stp offset formula above isn't affected by
@@ -4763,18 +2611,18 @@ to upconvert to 32b float internally?
<bitfield name="OFFSET" low="0" high="18" shr="11"/>
</bitset>
- <reg32 offset="0xa81b" name="SP_VS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
- <reg64 offset="0xa81c" name="SP_VS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa81b" name="SP_VS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa81c" name="SP_VS_BASE" type="address" align="32" usage="rp_blit"/>
<reg32 offset="0xa81e" name="SP_VS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
- <reg64 offset="0xa81f" name="SP_VS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg64 offset="0xa81f" name="SP_VS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
<reg32 offset="0xa821" name="SP_VS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
- <reg32 offset="0xa822" name="SP_VS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa822" name="SP_VS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa823" name="SP_VS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
- <reg32 offset="0xa824" name="SP_VS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
- <reg32 offset="0xa825" name="SP_VS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
- <reg32 offset="0xa82d" name="SP_VS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa824" name="SP_VS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa825" name="SP_VS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa82d" name="SP_VS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xa830" name="SP_HS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <reg32 offset="0xa830" name="SP_HS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
<!-- There is no mergedregs bit, that comes from the VS. -->
<bitfield name="EARLYPREAMBLE" pos="20" type="boolean"/>
</reg32>
@@ -4782,32 +2630,32 @@ to upconvert to 32b float internally?
Total size of local storage in dwords divided by the wave size.
The maximum value is 64. With the wave size being always 64 for HS,
the maximum size of local storage should be:
- 64 (wavesize) * 64 (SP_HS_WAVE_INPUT_SIZE) * 4 = 16k
+ 64 (wavesize) * 64 (SP_HS_CNTL_1) * 4 = 16k
-->
- <reg32 offset="0xa831" name="SP_HS_WAVE_INPUT_SIZE" low="0" high="7" type="uint" usage="rp_blit"/>
- <reg32 offset="0xa832" name="SP_HS_BRANCH_COND" type="hex" usage="rp_blit"/>
+ <reg32 offset="0xa831" name="SP_HS_CNTL_1" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa832" name="SP_HS_BOOLEAN_CF_MASK" type="hex" usage="rp_blit"/>
<!-- TODO: exact same layout as 0xa81b-0xa825 -->
- <reg32 offset="0xa833" name="SP_HS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
- <reg64 offset="0xa834" name="SP_HS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa833" name="SP_HS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa834" name="SP_HS_BASE" type="address" align="32" usage="rp_blit"/>
<reg32 offset="0xa836" name="SP_HS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
- <reg64 offset="0xa837" name="SP_HS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg64 offset="0xa837" name="SP_HS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
<reg32 offset="0xa839" name="SP_HS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
- <reg32 offset="0xa83a" name="SP_HS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa83a" name="SP_HS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa83b" name="SP_HS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
- <reg32 offset="0xa83c" name="SP_HS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
- <reg32 offset="0xa83d" name="SP_HS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
- <reg32 offset="0xa82f" name="SP_HS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa83c" name="SP_HS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa83d" name="SP_HS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa82f" name="SP_HS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xa840" name="SP_DS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <reg32 offset="0xa840" name="SP_DS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
<!-- There is no mergedregs bit, that comes from the VS. -->
<bitfield name="EARLYPREAMBLE" pos="20" type="boolean"/>
</reg32>
- <reg32 offset="0xa841" name="SP_DS_BRANCH_COND" type="hex"/>
+ <reg32 offset="0xa841" name="SP_DS_BOOLEAN_CF_MASK" type="hex"/>
<!-- TODO: exact same layout as 0xa802-0xa81a -->
- <reg32 offset="0xa842" name="SP_DS_PRIMITIVE_CNTL" type="a6xx_sp_xs_prim_cntl" usage="rp_blit"/>
- <array offset="0xa843" name="SP_DS_OUT" stride="1" length="16" usage="rp_blit">
+ <reg32 offset="0xa842" name="SP_DS_OUTPUT_CNTL" type="a6xx_sp_xs_output_cntl" usage="rp_blit"/>
+ <array offset="0xa843" name="SP_DS_OUTPUT" stride="1" length="16" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="A_COMPMASK" low="8" high="11" type="hex"/>
@@ -4815,7 +2663,7 @@ to upconvert to 32b float internally?
<bitfield name="B_COMPMASK" low="24" high="27" type="hex"/>
</reg32>
</array>
- <array offset="0xa853" name="SP_DS_VPC_DST" stride="1" length="8" usage="rp_blit">
+ <array offset="0xa853" name="SP_DS_VPC_DEST" stride="1" length="8" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
<bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
@@ -4825,22 +2673,22 @@ to upconvert to 32b float internally?
</array>
<!-- TODO: exact same layout as 0xa81b-0xa825 -->
- <reg32 offset="0xa85b" name="SP_DS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
- <reg64 offset="0xa85c" name="SP_DS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa85b" name="SP_DS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa85c" name="SP_DS_BASE" type="address" align="32" usage="rp_blit"/>
<reg32 offset="0xa85e" name="SP_DS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
- <reg64 offset="0xa85f" name="SP_DS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg64 offset="0xa85f" name="SP_DS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
<reg32 offset="0xa861" name="SP_DS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
- <reg32 offset="0xa862" name="SP_DS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa862" name="SP_DS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa863" name="SP_DS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
- <reg32 offset="0xa864" name="SP_DS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
- <reg32 offset="0xa865" name="SP_DS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
- <reg32 offset="0xa868" name="SP_DS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa864" name="SP_DS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa865" name="SP_DS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa868" name="SP_DS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xa870" name="SP_GS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <reg32 offset="0xa870" name="SP_GS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
<!-- There is no mergedregs bit, that comes from the VS. -->
<bitfield name="EARLYPREAMBLE" pos="20" type="boolean"/>
</reg32>
- <reg32 offset="0xa871" name="SP_GS_PRIM_SIZE" low="0" high="7" type="uint" usage="rp_blit">
+ <reg32 offset="0xa871" name="SP_GS_CNTL_1" low="0" high="7" type="uint" usage="rp_blit">
<doc>
Normally the size of the output of the last stage in
dwords. It should be programmed as follows:
@@ -4854,11 +2702,11 @@ to upconvert to 32b float internally?
doesn't matter in practice.
</doc>
</reg32>
- <reg32 offset="0xa872" name="SP_GS_BRANCH_COND" type="hex" usage="rp_blit"/>
+ <reg32 offset="0xa872" name="SP_GS_BOOLEAN_CF_MASK" type="hex" usage="rp_blit"/>
<!-- TODO: exact same layout as 0xa802-0xa81a -->
- <reg32 offset="0xa873" name="SP_GS_PRIMITIVE_CNTL" type="a6xx_sp_xs_prim_cntl" usage="rp_blit"/>
- <array offset="0xa874" name="SP_GS_OUT" stride="1" length="16" usage="rp_blit">
+ <reg32 offset="0xa873" name="SP_GS_OUTPUT_CNTL" type="a6xx_sp_xs_output_cntl" usage="rp_blit"/>
+ <array offset="0xa874" name="SP_GS_OUTPUT" stride="1" length="16" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="A_COMPMASK" low="8" high="11" type="hex"/>
@@ -4867,7 +2715,7 @@ to upconvert to 32b float internally?
</reg32>
</array>
- <array offset="0xa884" name="SP_GS_VPC_DST" stride="1" length="8" usage="rp_blit">
+ <array offset="0xa884" name="SP_GS_VPC_DEST" stride="1" length="8" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="OUTLOC0" low="0" high="7" type="uint"/>
<bitfield name="OUTLOC1" low="8" high="15" type="uint"/>
@@ -4877,29 +2725,29 @@ to upconvert to 32b float internally?
</array>
<!-- TODO: exact same layout as 0xa81b-0xa825 -->
- <reg32 offset="0xa88c" name="SP_GS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
- <reg64 offset="0xa88d" name="SP_GS_OBJ_START" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa88c" name="SP_GS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa88d" name="SP_GS_BASE" type="address" align="32" usage="rp_blit"/>
<reg32 offset="0xa88f" name="SP_GS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
- <reg64 offset="0xa890" name="SP_GS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
+ <reg64 offset="0xa890" name="SP_GS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
<reg32 offset="0xa892" name="SP_GS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
- <reg32 offset="0xa893" name="SP_GS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa893" name="SP_GS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa894" name="SP_GS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
- <reg32 offset="0xa895" name="SP_GS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
- <reg32 offset="0xa896" name="SP_GS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
- <reg32 offset="0xa899" name="SP_GS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
-
- <reg64 offset="0xa8a0" name="SP_VS_TEX_SAMP" type="address" align="16" usage="cmd"/>
- <reg64 offset="0xa8a2" name="SP_HS_TEX_SAMP" type="address" align="16" usage="cmd"/>
- <reg64 offset="0xa8a4" name="SP_DS_TEX_SAMP" type="address" align="16" usage="cmd"/>
- <reg64 offset="0xa8a6" name="SP_GS_TEX_SAMP" type="address" align="16" usage="cmd"/>
- <reg64 offset="0xa8a8" name="SP_VS_TEX_CONST" type="address" align="64" usage="cmd"/>
- <reg64 offset="0xa8aa" name="SP_HS_TEX_CONST" type="address" align="64" usage="cmd"/>
- <reg64 offset="0xa8ac" name="SP_DS_TEX_CONST" type="address" align="64" usage="cmd"/>
- <reg64 offset="0xa8ae" name="SP_GS_TEX_CONST" type="address" align="64" usage="cmd"/>
+ <reg32 offset="0xa895" name="SP_GS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa896" name="SP_GS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa899" name="SP_GS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
+
+ <reg64 offset="0xa8a0" name="SP_VS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a2" name="SP_HS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a4" name="SP_DS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a6" name="SP_GS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa8a8" name="SP_VS_TEXMEMOBJ_BASE" type="address" align="64" usage="cmd"/>
+ <reg64 offset="0xa8aa" name="SP_HS_TEXMEMOBJ_BASE" type="address" align="64" usage="cmd"/>
+ <reg64 offset="0xa8ac" name="SP_DS_TEXMEMOBJ_BASE" type="address" align="64" usage="cmd"/>
+ <reg64 offset="0xa8ae" name="SP_GS_TEXMEMOBJ_BASE" type="address" align="64" usage="cmd"/>
<!-- TODO: 4 unknown bool registers 0xa8c0-0xa8c3 -->
- <reg32 offset="0xa980" name="SP_FS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="rp_blit">
+ <reg32 offset="0xa980" name="SP_PS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
<bitfield name="THREADSIZE" pos="20" type="a6xx_threadsize"/>
<bitfield name="UNK21" pos="21" type="boolean"/>
<bitfield name="VARYING" pos="22" type="boolean"/>
@@ -4909,8 +2757,7 @@ to upconvert to 32b float internally?
fine derivatives and quad subgroup ops.
</doc>
</bitfield>
- <!-- note: vk blob uses bit24 -->
- <bitfield name="UNK24" pos="24" type="boolean"/>
+ <bitfield name="INOUTREGOVERLAP" pos="24" type="boolean"/>
<bitfield name="UNK25" pos="25" type="boolean"/>
<bitfield name="PIXLODENABLE" pos="26" type="boolean">
<doc>
@@ -4923,12 +2770,12 @@ to upconvert to 32b float internally?
<bitfield name="EARLYPREAMBLE" pos="28" type="boolean"/>
<bitfield name="MERGEDREGS" pos="31" type="boolean"/>
</reg32>
- <reg32 offset="0xa981" name="SP_FS_BRANCH_COND" type="hex"/>
- <reg32 offset="0xa982" name="SP_FS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="rp_blit"/>
- <reg64 offset="0xa983" name="SP_FS_OBJ_START" type="address" align="32" usage="rp_blit"/>
- <reg32 offset="0xa985" name="SP_FS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
- <reg64 offset="0xa986" name="SP_FS_PVT_MEM_ADDR" type="waddress" align="32" usage="rp_blit"/>
- <reg32 offset="0xa988" name="SP_FS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
+ <reg32 offset="0xa981" name="SP_PS_BOOLEAN_CF_MASK" type="hex"/>
+ <reg32 offset="0xa982" name="SP_PS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
+ <reg64 offset="0xa983" name="SP_PS_BASE" type="address" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa985" name="SP_PS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
+ <reg64 offset="0xa986" name="SP_PS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
+ <reg32 offset="0xa988" name="SP_PS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
<reg32 offset="0xa989" name="SP_BLEND_CNTL" usage="rp_blit">
<!-- per-mrt enable bit -->
@@ -4948,7 +2795,7 @@ to upconvert to 32b float internally?
<bitfield name="SRGB_MRT6" pos="6" type="boolean"/>
<bitfield name="SRGB_MRT7" pos="7" type="boolean"/>
</reg32>
- <reg32 offset="0xa98b" name="SP_FS_RENDER_COMPONENTS" usage="rp_blit">
+ <reg32 offset="0xa98b" name="SP_PS_OUTPUT_MASK" usage="rp_blit">
<bitfield name="RT0" low="0" high="3"/>
<bitfield name="RT1" low="4" high="7"/>
<bitfield name="RT2" low="8" high="11"/>
@@ -4958,17 +2805,17 @@ to upconvert to 32b float internally?
<bitfield name="RT6" low="24" high="27"/>
<bitfield name="RT7" low="28" high="31"/>
</reg32>
- <reg32 offset="0xa98c" name="SP_FS_OUTPUT_CNTL0" usage="rp_blit">
+ <reg32 offset="0xa98c" name="SP_PS_OUTPUT_CNTL" usage="rp_blit">
<bitfield name="DUAL_COLOR_IN_ENABLE" pos="0" type="boolean"/>
<bitfield name="DEPTH_REGID" low="8" high="15" type="a3xx_regid"/>
<bitfield name="SAMPMASK_REGID" low="16" high="23" type="a3xx_regid"/>
<bitfield name="STENCILREF_REGID" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa98d" name="SP_FS_OUTPUT_CNTL1" usage="rp_blit">
+ <reg32 offset="0xa98d" name="SP_PS_MRT_CNTL" usage="rp_blit">
<bitfield name="MRT" low="0" high="3" type="uint"/>
</reg32>
- <array offset="0xa98e" name="SP_FS_OUTPUT" stride="1" length="8" usage="rp_blit">
+ <array offset="0xa98e" name="SP_PS_OUTPUT" stride="1" length="8" usage="rp_blit">
<doc>per MRT</doc>
<reg32 offset="0x0" name="REG">
<bitfield name="REGID" low="0" high="7" type="a3xx_regid"/>
@@ -4976,7 +2823,7 @@ to upconvert to 32b float internally?
</reg32>
</array>
- <array offset="0xa996" name="SP_FS_MRT" stride="1" length="8" usage="rp_blit">
+ <array offset="0xa996" name="SP_PS_MRT" stride="1" length="8" usage="rp_blit">
<reg32 offset="0" name="REG">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
<bitfield name="COLOR_SINT" pos="8" type="boolean"/>
@@ -4985,7 +2832,7 @@ to upconvert to 32b float internally?
</reg32>
</array>
- <reg32 offset="0xa99e" name="SP_FS_PREFETCH_CNTL" usage="rp_blit">
+ <reg32 offset="0xa99e" name="SP_PS_INITIAL_TEX_LOAD_CNTL" usage="rp_blit">
<bitfield name="COUNT" low="0" high="2" type="uint"/>
<bitfield name="IJ_WRITE_DISABLE" pos="3" type="boolean"/>
<doc>
@@ -5002,7 +2849,7 @@ to upconvert to 32b float internally?
<!-- Blob never uses it -->
<bitfield name="CONSTSLOTID4COORD" low="16" high="24" type="uint" variants="A7XX-"/>
</reg32>
- <array offset="0xa99f" name="SP_FS_PREFETCH" stride="1" length="4" variants="A6XX" usage="rp_blit">
+ <array offset="0xa99f" name="SP_PS_INITIAL_TEX_LOAD" stride="1" length="4" variants="A6XX" usage="rp_blit">
<reg32 offset="0" name="CMD" variants="A6XX">
<bitfield name="SRC" low="0" high="6" type="uint"/>
<bitfield name="SAMP_ID" low="7" high="10" type="uint"/>
@@ -5016,7 +2863,7 @@ to upconvert to 32b float internally?
<bitfield name="CMD" low="29" high="31" type="a6xx_tex_prefetch_cmd"/>
</reg32>
</array>
- <array offset="0xa99f" name="SP_FS_PREFETCH" stride="1" length="4" variants="A7XX-" usage="rp_blit">
+ <array offset="0xa99f" name="SP_PS_INITIAL_TEX_LOAD" stride="1" length="4" variants="A7XX-" usage="rp_blit">
<reg32 offset="0" name="CMD" variants="A7XX-">
<bitfield name="SRC" low="0" high="6" type="uint"/>
<bitfield name="SAMP_ID" low="7" high="9" type="uint"/>
@@ -5028,22 +2875,23 @@ to upconvert to 32b float internally?
<bitfield name="CMD" low="26" high="29" type="a6xx_tex_prefetch_cmd"/>
</reg32>
</array>
- <array offset="0xa9a3" name="SP_FS_BINDLESS_PREFETCH" stride="1" length="4" usage="rp_blit">
+ <array offset="0xa9a3" name="SP_PS_INITIAL_TEX_INDEX" stride="1" length="4" usage="rp_blit">
<reg32 offset="0" name="CMD">
<bitfield name="SAMP_ID" low="0" high="15" type="uint"/>
<bitfield name="TEX_ID" low="16" high="31" type="uint"/>
</reg32>
</array>
- <reg32 offset="0xa9a7" name="SP_FS_TEX_COUNT" low="0" high="7" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xa9a7" name="SP_PS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa9a8" name="SP_UNKNOWN_A9A8" low="0" high="16" usage="cmd"/> <!-- always 0x0 ? -->
- <reg32 offset="0xa9a9" name="SP_FS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa9a9" name="SP_PS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa9ab" name="SP_PS_UNKNOWN_A9AB" variants="A7XX-" usage="cmd"/>
<!-- TODO: unknown bool register at 0xa9aa, likely same as 0xa8c0-0xa8c3 but for FS -->
- <reg32 offset="0xa9b0" name="SP_CS_CTRL_REG0" type="a6xx_sp_xs_ctrl_reg0" usage="cmd">
+ <reg32 offset="0xa9b0" name="SP_CS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="cmd">
<bitfield name="THREADSIZE" pos="20" type="a6xx_threadsize"/>
<!-- seems to make SP use less concurrent threads when possible? -->
<bitfield name="UNK21" pos="21" type="boolean"/>
@@ -5053,8 +2901,15 @@ to upconvert to 32b float internally?
<bitfield name="MERGEDREGS" pos="31" type="boolean"/>
</reg32>
+ <enum name="a6xx_const_ram_mode">
+ <value value="0x0" name="CONSTLEN_128"/>
+ <value value="0x1" name="CONSTLEN_192"/>
+ <value value="0x2" name="CONSTLEN_256"/>
+ <value value="0x3" name="CONSTLEN_512"/> <!-- a7xx only -->
+ </enum>
+
<!-- set for compute shaders -->
- <reg32 offset="0xa9b1" name="SP_CS_UNKNOWN_A9B1" usage="cmd">
+ <reg32 offset="0xa9b1" name="SP_CS_CNTL_1" usage="cmd">
<bitfield name="SHARED_SIZE" low="0" high="4" type="uint">
<doc>
If 0 - all 32k of shared storage is enabled, otherwise
@@ -5065,32 +2920,36 @@ to upconvert to 32b float internally?
always return 0)
</doc>
</bitfield>
- <bitfield name="UNK5" pos="5" type="boolean"/>
- <!-- always 1 ? -->
- <bitfield name="UNK6" pos="6" type="boolean"/>
+ <bitfield name="CONSTANTRAMMODE" low="5" high="6" type="a6xx_const_ram_mode">
+ <doc>
+ This defines the split between consts and local
+ memory in the Local Buffer. The programmed value
+ must be at least the actual CONSTLEN.
+ </doc>
+ </bitfield>
</reg32>
- <reg32 offset="0xa9b2" name="SP_CS_BRANCH_COND" type="hex" usage="cmd"/>
- <reg32 offset="0xa9b3" name="SP_CS_OBJ_FIRST_EXEC_OFFSET" type="uint" usage="cmd"/>
- <reg64 offset="0xa9b4" name="SP_CS_OBJ_START" type="address" align="32" usage="cmd"/>
+ <reg32 offset="0xa9b2" name="SP_CS_BOOLEAN_CF_MASK" type="hex" usage="cmd"/>
+ <reg32 offset="0xa9b3" name="SP_CS_PROGRAM_COUNTER_OFFSET" type="uint" usage="cmd"/>
+ <reg64 offset="0xa9b4" name="SP_CS_BASE" type="address" align="32" usage="cmd"/>
<reg32 offset="0xa9b6" name="SP_CS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="cmd"/>
- <reg64 offset="0xa9b7" name="SP_CS_PVT_MEM_ADDR" align="32" usage="cmd"/>
+ <reg64 offset="0xa9b7" name="SP_CS_PVT_MEM_BASE" align="32" usage="cmd"/>
<reg32 offset="0xa9b9" name="SP_CS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="cmd"/>
- <reg32 offset="0xa9ba" name="SP_CS_TEX_COUNT" low="0" high="7" type="uint" usage="cmd"/>
+ <reg32 offset="0xa9ba" name="SP_CS_TSIZE" low="0" high="7" type="uint" usage="cmd"/>
<reg32 offset="0xa9bb" name="SP_CS_CONFIG" type="a6xx_sp_xs_config" usage="cmd"/>
- <reg32 offset="0xa9bc" name="SP_CS_INSTRLEN" low="0" high="27" type="uint" usage="cmd"/>
- <reg32 offset="0xa9bd" name="SP_CS_PVT_MEM_HW_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_hw_stack_offset" usage="cmd"/>
+ <reg32 offset="0xa9bc" name="SP_CS_INSTR_SIZE" low="0" high="27" type="uint" usage="cmd"/>
+ <reg32 offset="0xa9bd" name="SP_CS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="cmd"/>
<reg32 offset="0xa9be" name="SP_CS_UNKNOWN_A9BE" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xa9c5" name="SP_CS_VGPR_CONFIG" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa9c5" name="SP_CS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
- <!-- new in a6xx gen4, matches HLSQ_CS_CNTL_0 -->
- <reg32 offset="0xa9c2" name="SP_CS_CNTL_0" usage="cmd">
+ <!-- new in a6xx gen4, matches SP_CS_CONST_CONFIG_0 -->
+ <reg32 offset="0xa9c2" name="SP_CS_WIE_CNTL_0" usage="cmd">
<bitfield name="WGIDCONSTID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="WGSIZECONSTID" low="8" high="15" type="a3xx_regid"/>
<bitfield name="WGOFFSETCONSTID" low="16" high="23" type="a3xx_regid"/>
<bitfield name="LOCALIDREGID" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <!-- new in a6xx gen4, matches HLSQ_CS_CNTL_1 -->
- <reg32 offset="0xa9c3" name="SP_CS_CNTL_1" variants="A6XX" usage="cmd">
+ <!-- new in a6xx gen4, matches SP_CS_WGE_CNTL -->
+ <reg32 offset="0xa9c3" name="SP_CS_WIE_CNTL_1" variants="A6XX" usage="cmd">
<!-- gl_LocalInvocationIndex -->
<bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
<!-- a650 has 6 "SP cores" (but 3 "SP"). this makes it use only
@@ -5102,7 +2961,18 @@ to upconvert to 32b float internally?
<bitfield name="THREADSIZE_SCALAR" pos="10" type="boolean"/>
</reg32>
- <reg32 offset="0xa9c3" name="SP_CS_CNTL_1" variants="A7XX-" usage="cmd">
+ <enum name="a7xx_workitem_rast_order">
+ <value value="0x0" name="WORKITEMRASTORDER_LINEAR"/>
+ <doc>
+ This is a fixed tiling, with 4x4 invocation outer tiles
+ containing 2x2 invocation inner tiles. The intent is to
+ improve cache locality with textures and images accessed
+ using gl_LocalInvocationID.
+ </doc>
+ <value value="0x1" name="WORKITEMRASTORDER_TILED"/>
+ </enum>
+
+ <reg32 offset="0xa9c3" name="SP_CS_WIE_CNTL_1" variants="A7XX-" usage="cmd">
<!-- gl_LocalInvocationIndex -->
<bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
<!-- Must match SP_CS_CTRL -->
@@ -5110,18 +2980,16 @@ to upconvert to 32b float internally?
<!-- 1 thread per wave (would hang if THREAD128 is also set) -->
<bitfield name="THREADSIZE_SCALAR" pos="9" type="boolean"/>
- <!-- Affects getone. If enabled, getone sometimes executed 1? less times
- than there are subgroups.
- -->
- <bitfield name="UNK15" pos="15" type="boolean"/>
+ <doc>How invocations/fibers within a workgroup are tiled.</doc>
+ <bitfield name="WORKITEMRASTORDER" pos="15" type="a7xx_workitem_rast_order"/>
</reg32>
<!-- TODO: two 64kb aligned addresses at a9d0/a9d2 -->
- <reg64 offset="0xa9e0" name="SP_FS_TEX_SAMP" type="address" align="16" usage="rp_blit"/>
- <reg64 offset="0xa9e2" name="SP_CS_TEX_SAMP" type="address" align="16" usage="cmd"/>
- <reg64 offset="0xa9e4" name="SP_FS_TEX_CONST" type="address" align="64" usage="rp_blit"/>
- <reg64 offset="0xa9e6" name="SP_CS_TEX_CONST" type="address" align="64" usage="cmd"/>
+ <reg64 offset="0xa9e0" name="SP_PS_SAMPLER_BASE" type="address" align="16" usage="rp_blit"/>
+ <reg64 offset="0xa9e2" name="SP_CS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
+ <reg64 offset="0xa9e4" name="SP_PS_TEXMEMOBJ_BASE" type="address" align="64" usage="rp_blit"/>
+ <reg64 offset="0xa9e6" name="SP_CS_TEXMEMOBJ_BASE" type="address" align="64" usage="cmd"/>
<enum name="a6xx_bindless_descriptor_size">
<doc>
@@ -5146,18 +3014,19 @@ to upconvert to 32b float internally?
</array>
<!--
- IBO state for compute shader:
+ UAV state for compute shader:
-->
- <reg64 offset="0xa9f2" name="SP_CS_IBO" type="address" align="16"/>
- <reg32 offset="0xaa00" name="SP_CS_IBO_COUNT" low="0" high="6" type="uint"/>
+ <reg64 offset="0xa9f2" name="SP_CS_UAV_BASE" type="address" align="16" variants="A6XX"/>
+ <reg64 offset="0xa9f8" name="SP_CS_UAV_BASE" type="address" align="16" variants="A7XX"/>
+ <reg32 offset="0xaa00" name="SP_CS_USIZE" low="0" high="6" type="uint"/>
<!-- Correlated with avgs/uvgs usage in FS -->
- <reg32 offset="0xaa01" name="SP_FS_VGPR_CONFIG" type="uint" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xaa01" name="SP_PS_VGS_CNTL" type="uint" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xaa02" name="SP_PS_ALIASED_COMPONENTS_CONTROL" variants="A7XX-" usage="cmd">
+ <reg32 offset="0xaa02" name="SP_PS_OUTPUT_CONST_CNTL" variants="A7XX-" usage="cmd">
<bitfield name="ENABLED" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0xaa03" name="SP_PS_ALIASED_COMPONENTS" variants="A7XX-" usage="cmd">
+ <reg32 offset="0xaa03" name="SP_PS_OUTPUT_CONST_MASK" variants="A7XX-" usage="cmd">
<doc>
Specify for which components the output color should be read
from alias, e.g. for:
@@ -5167,7 +3036,7 @@ to upconvert to 32b float internally?
alias.1.b32.0 r1.x, c4.x
alias.1.b32.0 r0.x, c0.x
- the SP_PS_ALIASED_COMPONENTS would be 0x00001111
+ the SP_PS_OUTPUT_CONST_MASK would be 0x00001111
</doc>
<bitfield name="RT0" low="0" high="3"/>
@@ -5193,7 +3062,7 @@ to upconvert to 32b float internally?
<value value="0x2" name="ISAMMODE_GL"/>
</enum>
- <reg32 offset="0xab00" name="SP_MODE_CONTROL" usage="rp_blit">
+ <reg32 offset="0xab00" name="SP_MODE_CNTL" usage="rp_blit">
<!--
When set, half register loads from the constant file will
load a 32-bit value (so hc0.y loads the same value as c0.y)
@@ -5210,16 +3079,16 @@ to upconvert to 32b float internally?
<reg32 offset="0xab01" name="SP_UNKNOWN_AB01" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xab02" name="SP_UNKNOWN_AB02" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xab04" name="SP_FS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
- <reg32 offset="0xab05" name="SP_FS_INSTRLEN" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xab04" name="SP_PS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
+ <reg32 offset="0xab05" name="SP_PS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
- <array offset="0xab10" name="SP_BINDLESS_BASE" stride="2" length="5" variants="A6XX" usage="rp_blit">
+ <array offset="0xab10" name="SP_GFX_BINDLESS_BASE" stride="2" length="5" variants="A6XX" usage="rp_blit">
<reg64 offset="0" name="DESCRIPTOR" variants="A6XX">
<bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
<bitfield name="ADDR" low="2" high="63" shr="2" type="address"/>
</reg64>
</array>
- <array offset="0xab0a" name="SP_BINDLESS_BASE" stride="2" length="8" variants="A7XX-" usage="rp_blit">
+ <array offset="0xab0a" name="SP_GFX_BINDLESS_BASE" stride="2" length="8" variants="A7XX-" usage="rp_blit">
<reg64 offset="0" name="DESCRIPTOR" variants="A7XX-">
<bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
<bitfield name="ADDR" low="2" high="63" shr="2" type="address"/>
@@ -5227,15 +3096,15 @@ to upconvert to 32b float internally?
</array>
<!--
- Combined IBO state for 3d pipe, used for Image and SSBO write/atomic
- instructions VS/HS/DS/GS/FS. See SP_CS_IBO_* for compute shaders.
+ Combined UAV state for 3d pipe, used for Image and SSBO write/atomic
+ instructions VS/HS/DS/GS/FS. See SP_CS_UAV_BASE_* for compute shaders.
-->
- <reg64 offset="0xab1a" name="SP_IBO" type="address" align="16" usage="cmd"/>
- <reg32 offset="0xab20" name="SP_IBO_COUNT" low="0" high="6" type="uint" usage="cmd"/>
+ <reg64 offset="0xab1a" name="SP_GFX_UAV_BASE" type="address" align="16" usage="cmd"/>
+ <reg32 offset="0xab20" name="SP_GFX_USIZE" low="0" high="6" type="uint" usage="cmd"/>
<reg32 offset="0xab22" name="SP_UNKNOWN_AB22" variants="A7XX-" usage="cmd"/>
- <bitset name="a6xx_sp_2d_dst_format" inline="yes">
+ <bitset name="a6xx_sp_a2d_output_info" inline="yes">
<bitfield name="NORM" pos="0" type="boolean"/>
<bitfield name="SINT" pos="1" type="boolean"/>
<bitfield name="UINT" pos="2" type="boolean"/>
@@ -5248,8 +3117,8 @@ to upconvert to 32b float internally?
<bitfield name="MASK" low="12" high="15"/>
</bitset>
- <reg32 offset="0xacc0" name="SP_2D_DST_FORMAT" type="a6xx_sp_2d_dst_format" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xa9bf" name="SP_2D_DST_FORMAT" type="a6xx_sp_2d_dst_format" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xacc0" name="SP_A2D_OUTPUT_INFO" type="a6xx_sp_a2d_output_info" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xa9bf" name="SP_A2D_OUTPUT_INFO" type="a6xx_sp_a2d_output_info" variants="A7XX-" usage="rp_blit"/>
<reg32 offset="0xae00" name="SP_DBG_ECO_CNTL" usage="cmd"/>
<reg32 offset="0xae01" name="SP_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode"/>
@@ -5257,16 +3126,16 @@ to upconvert to 32b float internally?
<!-- TODO: valid bits 0x3c3f, see kernel -->
</reg32>
<reg32 offset="0xae03" name="SP_CHICKEN_BITS" usage="cmd"/>
- <reg32 offset="0xae04" name="SP_FLOAT_CNTL" usage="cmd">
+ <reg32 offset="0xae04" name="SP_NC_MODE_CNTL_2" usage="cmd">
<bitfield name="F16_NO_INF" pos="3" type="boolean"/>
</reg32>
<reg32 offset="0xae06" name="SP_UNKNOWN_AE06" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae08" name="SP_UNKNOWN_AE08" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae09" name="SP_UNKNOWN_AE09" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae0a" name="SP_UNKNOWN_AE0A" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae08" name="SP_CHICKEN_BITS_1" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae09" name="SP_CHICKEN_BITS_2" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae0a" name="SP_CHICKEN_BITS_3" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae0f" name="SP_PERFCTR_ENABLE" usage="cmd">
+ <reg32 offset="0xae0f" name="SP_PERFCTR_SHADER_MASK" usage="cmd">
<!-- some perfcntrs are affected by a per-stage enable bit
(PERF_SP_ALU_WORKING_CYCLES for example)
TODO: verify position of HS/DS/GS bits -->
@@ -5281,7 +3150,7 @@ to upconvert to 32b float internally?
<array offset="0xae60" name="SP_PERFCTR_HLSQ_SEL" stride="1" length="6" variants="A7XX-"/>
<reg32 offset="0xae6a" name="SP_UNKNOWN_AE6A" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xae6b" name="SP_UNKNOWN_AE6B" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae6c" name="SP_UNKNOWN_AE6C" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae6c" name="SP_HLSQ_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xae6d" name="SP_READ_SEL" variants="A7XX-">
<bitfield name="LOCATION" low="18" high="19" type="a7xx_state_location"/>
<bitfield name="PIPE" low="16" high="17" type="a7xx_pipe"/>
@@ -5301,33 +3170,44 @@ to upconvert to 32b float internally?
"a6xx_sp_ps_tp_cluster" but this actually specifies the border
color base for compute shaders.
-->
- <reg64 offset="0xb180" name="SP_PS_TP_BORDER_COLOR_BASE_ADDR" type="address" align="128" usage="cmd"/>
+ <reg64 offset="0xb180" name="TPL1_CS_BORDER_COLOR_BASE" type="address" align="128" usage="cmd"/>
<reg32 offset="0xb182" name="SP_UNKNOWN_B182" low="0" high="2" usage="cmd"/>
<reg32 offset="0xb183" name="SP_UNKNOWN_B183" low="0" high="23" usage="cmd"/>
<reg32 offset="0xb190" name="SP_UNKNOWN_B190"/>
<reg32 offset="0xb191" name="SP_UNKNOWN_B191"/>
- <!-- could be all the stuff below here is actually TPL1?? -->
-
- <reg32 offset="0xb300" name="SP_TP_RAS_MSAA_CNTL" usage="rp_blit">
+ <reg32 offset="0xb300" name="TPL1_RAS_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="UNK2" low="2" high="3"/>
</reg32>
- <reg32 offset="0xb301" name="SP_TP_DEST_MSAA_CNTL" usage="rp_blit">
+ <reg32 offset="0xb301" name="TPL1_DEST_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
</reg32>
<!-- looks to work in the same way as a5xx: -->
- <reg64 offset="0xb302" name="SP_TP_BORDER_COLOR_BASE_ADDR" type="address" align="128" usage="cmd"/>
- <reg32 offset="0xb304" name="SP_TP_SAMPLE_CONFIG" type="a6xx_sample_config" usage="rp_blit"/>
- <reg32 offset="0xb305" name="SP_TP_SAMPLE_LOCATION_0" type="a6xx_sample_locations" usage="rp_blit"/>
- <reg32 offset="0xb306" name="SP_TP_SAMPLE_LOCATION_1" type="a6xx_sample_locations" usage="rp_blit"/>
- <reg32 offset="0xb307" name="SP_TP_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0xb309" name="SP_TP_MODE_CNTL" usage="cmd">
+ <reg64 offset="0xb302" name="TPL1_GFX_BORDER_COLOR_BASE" type="address" align="128" usage="cmd"/>
+ <reg32 offset="0xb304" name="TPL1_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
+ <reg32 offset="0xb305" name="TPL1_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0xb306" name="TPL1_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0xb307" name="TPL1_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
+
+ <enum name="a6xx_coord_round">
+ <value value="0" name="COORD_TRUNCATE"/>
+ <value value="1" name="COORD_ROUND_NEAREST_EVEN"/>
+ </enum>
+
+ <enum name="a6xx_nearest_mode">
+ <value value="0" name="ROUND_CLAMP_TRUNCATE"/>
+ <value value="1" name="CLAMP_ROUND_TRUNCATE"/>
+ </enum>
+
+ <reg32 offset="0xb309" name="TPL1_MODE_CNTL" usage="cmd">
<bitfield name="ISAMMODE" low="0" high="1" type="a6xx_isam_mode"/>
- <bitfield name="UNK3" low="2" high="7"/>
+ <bitfield name="TEXCOORDROUNDMODE" pos="2" type="a6xx_coord_round"/>
+ <bitfield name="NEARESTMIPSNAP" pos="5" type="a6xx_nearest_mode"/>
+ <bitfield name="DESTDATATYPEOVERRIDE" pos="7" type="boolean"/>
</reg32>
<reg32 offset="0xb310" name="SP_UNKNOWN_B310" variants="A7XX-" usage="cmd"/>
@@ -5336,42 +3216,45 @@ to upconvert to 32b float internally?
badly named or the functionality moved in a6xx. But downstream kernel
calls this "a6xx_sp_ps_tp_2d_cluster"
-->
- <reg32 offset="0xb4c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_src_surf_info" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb4c1" name="SP_PS_2D_SRC_SIZE" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb4c0" name="TPL1_A2D_SRC_TEXTURE_INFO" type="a6xx_a2d_src_texture_info" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb4c1" name="TPL1_A2D_SRC_TEXTURE_SIZE" variants="A6XX" usage="rp_blit">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
</reg32>
- <reg64 offset="0xb4c2" name="SP_PS_2D_SRC" type="address" align="16" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb4c4" name="SP_PS_2D_SRC_PITCH" variants="A6XX" usage="rp_blit">
+ <reg64 offset="0xb4c2" name="TPL1_A2D_SRC_TEXTURE_BASE" type="address" align="16" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb4c4" name="TPL1_A2D_SRC_TEXTURE_PITCH" variants="A6XX" usage="rp_blit">
<bitfield name="UNK0" low="0" high="8"/>
<bitfield name="PITCH" low="9" high="23" shr="6" type="uint"/>
</reg32>
- <reg32 offset="0xb2c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_src_surf_info" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xb2c1" name="SP_PS_2D_SRC_SIZE" variants="A7XX">
+ <reg32 offset="0xb2c0" name="TPL1_A2D_SRC_TEXTURE_INFO" type="a6xx_a2d_src_texture_info" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2c1" name="TPL1_A2D_SRC_TEXTURE_SIZE" variants="A7XX">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
</reg32>
- <reg64 offset="0xb2c2" name="SP_PS_2D_SRC" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xb2c4" name="SP_PS_2D_SRC_PITCH" variants="A7XX">
- <bitfield name="UNK0" low="0" high="8"/>
- <bitfield name="PITCH" low="9" high="23" shr="6" type="uint"/>
+ <reg64 offset="0xb2c2" name="TPL1_A2D_SRC_TEXTURE_BASE" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2c4" name="TPL1_A2D_SRC_TEXTURE_PITCH" variants="A7XX">
+ <!--
+ Bits from 3..9 must be zero unless 'TPL1_A2D_BLT_CNTL::TYPE'
+ is A6XX_TEX_IMG_BUFFER, which allows for lower alignment.
+ -->
+ <bitfield name="PITCH" low="3" high="23" type="uint"/>
</reg32>
<!-- planes for NV12, etc. (TODO: not tested) -->
- <reg64 offset="0xb4c5" name="SP_PS_2D_SRC_PLANE1" type="address" align="16" variants="A6XX"/>
- <reg32 offset="0xb4c7" name="SP_PS_2D_SRC_PLANE_PITCH" low="0" high="11" shr="6" type="uint" variants="A6XX"/>
- <reg64 offset="0xb4c8" name="SP_PS_2D_SRC_PLANE2" type="address" align="16" variants="A6XX"/>
+ <reg64 offset="0xb4c5" name="TPL1_A2D_SRC_TEXTURE_BASE_1" type="address" align="16" variants="A6XX"/>
+ <reg32 offset="0xb4c7" name="TPL1_A2D_SRC_TEXTURE_PITCH_1" low="0" high="11" shr="6" type="uint" variants="A6XX"/>
+ <reg64 offset="0xb4c8" name="TPL1_A2D_SRC_TEXTURE_BASE_2" type="address" align="16" variants="A6XX"/>
- <reg64 offset="0xb2c5" name="SP_PS_2D_SRC_PLANE1" type="address" align="16" variants="A7XX-"/>
- <reg32 offset="0xb2c7" name="SP_PS_2D_SRC_PLANE_PITCH" low="0" high="11" shr="6" type="uint" variants="A7XX-"/>
- <reg64 offset="0xb2c8" name="SP_PS_2D_SRC_PLANE2" type="address" align="16" variants="A7XX-"/>
+ <reg64 offset="0xb2c5" name="TPL1_A2D_SRC_TEXTURE_BASE_1" type="address" align="16" variants="A7XX-"/>
+ <reg32 offset="0xb2c7" name="TPL1_A2D_SRC_TEXTURE_PITCH_1" low="0" high="11" shr="6" type="uint" variants="A7XX-"/>
+ <reg64 offset="0xb2c8" name="TPL1_A2D_SRC_TEXTURE_BASE_2" type="address" align="16" variants="A7XX-"/>
- <reg64 offset="0xb4ca" name="SP_PS_2D_SRC_FLAGS" type="address" align="16" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb4cc" name="SP_PS_2D_SRC_FLAGS_PITCH" low="0" high="7" shr="6" type="uint" variants="A6XX" usage="rp_blit"/>
+ <reg64 offset="0xb4ca" name="TPL1_A2D_SRC_TEXTURE_FLAG_BASE" type="address" align="16" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb4cc" name="TPL1_A2D_SRC_TEXTURE_FLAG_PITCH" low="0" high="7" shr="6" type="uint" variants="A6XX" usage="rp_blit"/>
- <reg64 offset="0xb2ca" name="SP_PS_2D_SRC_FLAGS" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xb2cc" name="SP_PS_2D_SRC_FLAGS_PITCH" low="0" high="7" shr="6" type="uint" variants="A7XX-" usage="rp_blit"/>
+ <reg64 offset="0xb2ca" name="TPL1_A2D_SRC_TEXTURE_FLAG_BASE" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2cc" name="TPL1_A2D_SRC_TEXTURE_FLAG_PITCH" low="0" high="7" shr="6" type="uint" variants="A7XX-" usage="rp_blit"/>
<reg32 offset="0xb4cd" name="SP_PS_UNKNOWN_B4CD" low="6" high="31" variants="A6XX"/>
<reg32 offset="0xb4ce" name="SP_PS_UNKNOWN_B4CE" low="0" high="31" variants="A6XX"/>
@@ -5383,8 +3266,12 @@ to upconvert to 32b float internally?
<reg32 offset="0xb2ce" name="SP_PS_UNKNOWN_B4CE" low="0" high="31" variants="A7XX"/>
<reg32 offset="0xb2cf" name="SP_PS_UNKNOWN_B4CF" low="0" high="30" variants="A7XX"/>
<reg32 offset="0xb2d0" name="SP_PS_UNKNOWN_B4D0" low="0" high="29" variants="A7XX"/>
- <reg32 offset="0xb2d1" name="SP_PS_2D_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX"/>
- <reg32 offset="0xb2d2" name="SP_PS_UNKNOWN_B2D2" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2d1" name="TPL1_A2D_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX"/>
+ <reg32 offset="0xb2d2" name="TPL1_A2D_BLT_CNTL" variants="A7XX-" usage="rp_blit">
+ <bitfield name="RAW_COPY" pos="0" type="boolean"/>
+ <bitfield name="START_OFFSET_TEXELS" low="16" high="21"/>
+ <bitfield name="TYPE" low="29" high="31" type="a6xx_tex_type"/>
+ </reg32>
<reg32 offset="0xab21" name="SP_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX-" usage="rp_blit"/>
<!-- always 0x100000 or 0x1000000? -->
@@ -5422,34 +3309,44 @@ to upconvert to 32b float internally?
<!-- TODO: 4 more perfcntr sel at 0xb620 ? -->
- <bitset name="a6xx_hlsq_xs_cntl" inline="yes">
+ <bitset name="a6xx_xs_const_config" inline="yes">
<bitfield name="CONSTLEN" low="0" high="7" shr="2" type="uint"/>
<bitfield name="ENABLED" pos="8" type="boolean"/>
<bitfield name="READ_IMM_SHARED_CONSTS" pos="9" type="boolean" variants="A7XX-"/>
</bitset>
- <reg32 offset="0xb800" name="HLSQ_VS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb801" name="HLSQ_HS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb802" name="HLSQ_DS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb803" name="HLSQ_GS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb800" name="SP_VS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb801" name="SP_HS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb802" name="SP_DS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb803" name="SP_GS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xa827" name="HLSQ_VS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa83f" name="HLSQ_HS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa867" name="HLSQ_DS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa898" name="HLSQ_GS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa827" name="SP_VS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa83f" name="SP_HS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa867" name="SP_DS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa898" name="SP_GS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9aa" name="HLSQ_FS_UNKNOWN_A9AA" variants="A7XX-" usage="rp_blit">
- <!-- Tentatively named, appears to disable consts being loaded via CP_LOAD_STATE6_FRAG -->
- <bitfield name="CONSTS_LOAD_DISABLE" pos="0" type="boolean"/>
+ <reg32 offset="0xa9aa" name="SP_RENDER_CNTL" variants="A7XX-" usage="rp_blit">
+ <bitfield name="FS_DISABLE" pos="0" type="boolean"/>
</reg32>
- <!-- Always 0 -->
- <reg32 offset="0xa9ac" name="HLSQ_UNKNOWN_A9AC" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa9ac" name="SP_DITHER_CNTL" variants="A7XX-" usage="cmd">
+ <bitfield name="DITHER_MODE_MRT0" low="0" high="1" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT1" low="2" high="3" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT2" low="4" high="5" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT3" low="6" high="7" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT4" low="8" high="9" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT5" low="10" high="11" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT6" low="12" high="13" type="adreno_rb_dither_mode"/>
+ <bitfield name="DITHER_MODE_MRT7" low="14" high="15" type="adreno_rb_dither_mode"/>
+ </reg32>
- <!-- Used in VK_KHR_fragment_shading_rate -->
- <reg32 offset="0xa9ad" name="HLSQ_UNKNOWN_A9AD" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa9ad" name="SP_VRS_CONFIG" variants="A7XX-" usage="rp_blit">
+ <bitfield name="PIPELINE_FSR_ENABLE" pos="0" type="boolean"/>
+ <bitfield name="ATTACHMENT_FSR_ENABLE" pos="1" type="boolean"/>
+ <bitfield name="PRIMITIVE_FSR_ENABLE" pos="3" type="boolean"/>
+ </reg32>
- <reg32 offset="0xa9ae" name="HLSQ_UNKNOWN_A9AE" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9ae" name="SP_PS_CNTL_1" variants="A7XX-" usage="rp_blit">
<bitfield name="SYSVAL_REGS_COUNT" low="0" high="7" type="uint"/>
<!-- UNK8 is set on a730/a740 -->
<bitfield name="UNK8" pos="8" type="boolean"/>
@@ -5462,94 +3359,94 @@ to upconvert to 32b float internally?
<reg32 offset="0xb823" name="HLSQ_LOAD_STATE_GEOM_DATA"/>
- <bitset name="a6xx_hlsq_fs_cntl_0" inline="yes">
+ <bitset name="a6xx_sp_ps_wave_cntl" inline="yes">
<!-- must match SP_FS_CTRL -->
<bitfield name="THREADSIZE" pos="0" type="a6xx_threadsize"/>
<bitfield name="VARYINGS" pos="1" type="boolean"/>
<bitfield name="UNK2" low="2" high="11"/>
</bitset>
- <bitset name="a6xx_hlsq_control_3_reg" inline="yes">
+ <bitset name="a6xx_sp_reg_prog_id_1" inline="yes">
<!-- register loaded with position (bary.f) -->
<bitfield name="IJ_PERSP_PIXEL" low="0" high="7" type="a3xx_regid"/>
<bitfield name="IJ_LINEAR_PIXEL" low="8" high="15" type="a3xx_regid"/>
<bitfield name="IJ_PERSP_CENTROID" low="16" high="23" type="a3xx_regid"/>
<bitfield name="IJ_LINEAR_CENTROID" low="24" high="31" type="a3xx_regid"/>
</bitset>
- <bitset name="a6xx_hlsq_control_4_reg" inline="yes">
+ <bitset name="a6xx_sp_reg_prog_id_2" inline="yes">
<bitfield name="IJ_PERSP_SAMPLE" low="0" high="7" type="a3xx_regid"/>
<bitfield name="IJ_LINEAR_SAMPLE" low="8" high="15" type="a3xx_regid"/>
<bitfield name="XYCOORDREGID" low="16" high="23" type="a3xx_regid"/>
<bitfield name="ZWCOORDREGID" low="24" high="31" type="a3xx_regid"/>
</bitset>
- <bitset name="a6xx_hlsq_control_5_reg" inline="yes">
+ <bitset name="a6xx_sp_reg_prog_id_3" inline="yes">
<bitfield name="LINELENGTHREGID" low="0" high="7" type="a3xx_regid"/>
<bitfield name="FOVEATIONQUALITYREGID" low="8" high="15" type="a3xx_regid"/>
</bitset>
- <reg32 offset="0xb980" type="a6xx_hlsq_fs_cntl_0" name="HLSQ_FS_CNTL_0" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb980" type="a6xx_sp_ps_wave_cntl" name="SP_PS_WAVE_CNTL" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0xb981" name="HLSQ_UNKNOWN_B981" pos="0" type="boolean" variants="A6XX"/> <!-- never used by blob -->
- <reg32 offset="0xb982" name="HLSQ_CONTROL_1_REG" low="0" high="2" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb982" name="SP_LB_PARAM_LIMIT" low="0" high="2" variants="A6XX" usage="rp_blit">
<!-- Sets the maximum number of primitives allowed in one FS wave minus one, similarly to the
A3xx field, except that it's not necessary to set it to anything but the maximum, since
the hardware will simply emit smaller waves when it runs out of space. -->
<bitfield name="PRIMALLOCTHRESHOLD" low="0" high="2" type="uint"/>
</reg32>
- <reg32 offset="0xb983" name="HLSQ_CONTROL_2_REG" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb983" name="SP_REG_PROG_ID_0" variants="A6XX" usage="rp_blit">
<bitfield name="FACEREGID" low="0" high="7" type="a3xx_regid"/>
<!-- SAMPLEID is loaded into a half-precision register: -->
<bitfield name="SAMPLEID" low="8" high="15" type="a3xx_regid"/>
<bitfield name="SAMPLEMASK" low="16" high="23" type="a3xx_regid"/>
<bitfield name="CENTERRHW" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xb984" type="a6xx_hlsq_control_3_reg" name="HLSQ_CONTROL_3_REG" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb985" type="a6xx_hlsq_control_4_reg" name="HLSQ_CONTROL_4_REG" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb986" type="a6xx_hlsq_control_5_reg" name="HLSQ_CONTROL_5_REG" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb987" name="HLSQ_CS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="cmd"/>
- <reg32 offset="0xa9c6" type="a6xx_hlsq_fs_cntl_0" name="HLSQ_FS_CNTL_0" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9c7" name="HLSQ_CONTROL_1_REG" low="0" high="2" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xb984" type="a6xx_sp_reg_prog_id_1" name="SP_REG_PROG_ID_1" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb985" type="a6xx_sp_reg_prog_id_2" name="SP_REG_PROG_ID_2" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb986" type="a6xx_sp_reg_prog_id_3" name="SP_REG_PROG_ID_3" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb987" name="SP_CS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="cmd"/>
+ <reg32 offset="0xa9c6" type="a6xx_sp_ps_wave_cntl" name="SP_PS_WAVE_CNTL" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9c7" name="SP_LB_PARAM_LIMIT" low="0" high="2" variants="A7XX-" usage="rp_blit">
<bitfield name="PRIMALLOCTHRESHOLD" low="0" high="2" type="uint"/>
</reg32>
- <reg32 offset="0xa9c8" name="HLSQ_CONTROL_2_REG" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9c8" name="SP_REG_PROG_ID_0" variants="A7XX-" usage="rp_blit">
<bitfield name="FACEREGID" low="0" high="7" type="a3xx_regid"/>
<!-- SAMPLEID is loaded into a half-precision register: -->
<bitfield name="SAMPLEID" low="8" high="15" type="a3xx_regid"/>
<bitfield name="SAMPLEMASK" low="16" high="23" type="a3xx_regid"/>
<bitfield name="CENTERRHW" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xa9c9" type="a6xx_hlsq_control_3_reg" name="HLSQ_CONTROL_3_REG" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9ca" type="a6xx_hlsq_control_4_reg" name="HLSQ_CONTROL_4_REG" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9cb" type="a6xx_hlsq_control_5_reg" name="HLSQ_CONTROL_5_REG" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9cd" name="HLSQ_CS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa9c9" type="a6xx_sp_reg_prog_id_1" name="SP_REG_PROG_ID_1" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9ca" type="a6xx_sp_reg_prog_id_2" name="SP_REG_PROG_ID_2" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9cb" type="a6xx_sp_reg_prog_id_3" name="SP_REG_PROG_ID_3" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9cd" name="SP_CS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="cmd"/>
<!-- TODO: what does KERNELDIM do exactly (blob sets it differently from turnip) -->
- <reg32 offset="0xb990" name="HLSQ_CS_NDRANGE_0" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb990" name="SP_CS_NDRANGE_0" variants="A6XX" usage="rp_blit">
<bitfield name="KERNELDIM" low="0" high="1" type="uint"/>
<!-- localsize is value minus one: -->
<bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
<bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
<bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb991" name="HLSQ_CS_NDRANGE_1" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb991" name="SP_CS_NDRANGE_1" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALSIZE_X" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb992" name="HLSQ_CS_NDRANGE_2" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb992" name="SP_CS_NDRANGE_2" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALOFF_X" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb993" name="HLSQ_CS_NDRANGE_3" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb993" name="SP_CS_NDRANGE_3" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALSIZE_Y" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb994" name="HLSQ_CS_NDRANGE_4" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb994" name="SP_CS_NDRANGE_4" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALOFF_Y" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb995" name="HLSQ_CS_NDRANGE_5" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb995" name="SP_CS_NDRANGE_5" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALSIZE_Z" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb996" name="HLSQ_CS_NDRANGE_6" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb996" name="SP_CS_NDRANGE_6" variants="A6XX" usage="rp_blit">
<bitfield name="GLOBALOFF_Z" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xb997" name="HLSQ_CS_CNTL_0" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb997" name="SP_CS_CONST_CONFIG_0" variants="A6XX" usage="rp_blit">
<!-- these are all vec3. first 3 need to be high regs
- WGSIZECONSTID is the local size (from HLSQ_CS_NDRANGE_0)
+ WGSIZECONSTID is the local size (from SP_CS_NDRANGE_0)
WGOFFSETCONSTID is WGIDCONSTID*WGSIZECONSTID
-->
<bitfield name="WGIDCONSTID" low="0" high="7" type="a3xx_regid"/>
@@ -5557,7 +3454,7 @@ to upconvert to 32b float internally?
<bitfield name="WGOFFSETCONSTID" low="16" high="23" type="a3xx_regid"/>
<bitfield name="LOCALIDREGID" low="24" high="31" type="a3xx_regid"/>
</reg32>
- <reg32 offset="0xb998" name="HLSQ_CS_CNTL_1" variants="A6XX" usage="rp_blit">
+ <reg32 offset="0xb998" name="SP_CS_WGE_CNTL" variants="A6XX" usage="rp_blit">
<!-- gl_LocalInvocationIndex -->
<bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
<!-- a650 has 6 "SP cores" (but 3 "SP"). this makes it use only
@@ -5569,40 +3466,40 @@ to upconvert to 32b float internally?
<bitfield name="THREADSIZE_SCALAR" pos="10" type="boolean"/>
</reg32>
<!--note: vulkan blob doesn't use these -->
- <reg32 offset="0xb999" name="HLSQ_CS_KERNEL_GROUP_X" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb99a" name="HLSQ_CS_KERNEL_GROUP_Y" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xb99b" name="HLSQ_CS_KERNEL_GROUP_Z" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb999" name="SP_CS_KERNEL_GROUP_X" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb99a" name="SP_CS_KERNEL_GROUP_Y" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb99b" name="SP_CS_KERNEL_GROUP_Z" variants="A6XX" usage="rp_blit"/>
<!-- TODO: what does KERNELDIM do exactly (blob sets it differently from turnip) -->
- <reg32 offset="0xa9d4" name="HLSQ_CS_NDRANGE_0" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d4" name="SP_CS_NDRANGE_0" variants="A7XX-" usage="rp_blit">
<bitfield name="KERNELDIM" low="0" high="1" type="uint"/>
<!-- localsize is value minus one: -->
<bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
<bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
<bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9d5" name="HLSQ_CS_NDRANGE_1" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d5" name="SP_CS_NDRANGE_1" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALSIZE_X" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9d6" name="HLSQ_CS_NDRANGE_2" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d6" name="SP_CS_NDRANGE_2" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALOFF_X" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9d7" name="HLSQ_CS_NDRANGE_3" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d7" name="SP_CS_NDRANGE_3" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALSIZE_Y" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9d8" name="HLSQ_CS_NDRANGE_4" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d8" name="SP_CS_NDRANGE_4" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALOFF_Y" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9d9" name="HLSQ_CS_NDRANGE_5" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9d9" name="SP_CS_NDRANGE_5" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALSIZE_Z" low="0" high="31" type="uint"/>
</reg32>
- <reg32 offset="0xa9da" name="HLSQ_CS_NDRANGE_6" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9da" name="SP_CS_NDRANGE_6" variants="A7XX-" usage="rp_blit">
<bitfield name="GLOBALOFF_Z" low="0" high="31" type="uint"/>
</reg32>
<!--note: vulkan blob doesn't use these -->
- <reg32 offset="0xa9dc" name="HLSQ_CS_KERNEL_GROUP_X" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9dd" name="HLSQ_CS_KERNEL_GROUP_Y" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xa9de" name="HLSQ_CS_KERNEL_GROUP_Z" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9dc" name="SP_CS_KERNEL_GROUP_X" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9dd" name="SP_CS_KERNEL_GROUP_Y" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9de" name="SP_CS_KERNEL_GROUP_Z" variants="A7XX-" usage="rp_blit"/>
<enum name="a7xx_cs_yalign">
<value name="CS_YALIGN_1" value="8"/>
@@ -5611,19 +3508,29 @@ to upconvert to 32b float internally?
<value name="CS_YALIGN_8" value="1"/>
</enum>
- <reg32 offset="0xa9db" name="HLSQ_CS_CNTL_1" variants="A7XX-" usage="rp_blit">
+ <reg32 offset="0xa9db" name="SP_CS_WGE_CNTL" variants="A7XX-" usage="rp_blit">
<!-- gl_LocalInvocationIndex -->
<bitfield name="LINEARLOCALIDREGID" low="0" high="7" type="a3xx_regid"/>
<!-- Must match SP_CS_CTRL -->
<bitfield name="THREADSIZE" pos="9" type="a6xx_threadsize"/>
- <bitfield name="UNK11" pos="11" type="boolean"/>
- <bitfield name="UNK22" pos="22" type="boolean"/>
- <bitfield name="UNK26" pos="26" type="boolean"/>
- <bitfield name="YALIGN" low="27" high="30" type="a7xx_cs_yalign"/>
+ <doc>
+ When this bit is enabled, the dispatch order interleaves
+ the z coordinate instead of launching all workgroups
+ with z=0, then all with z=1 and so on.
+ </doc>
+ <bitfield name="WORKGROUPRASTORDERZFIRSTEN" pos="11" type="boolean"/>
+ <doc>
+ When both fields are non-0 then the dispatcher uses
+ these tile sizes to launch workgroups in a tiled manner
+ when the x and y workgroup counts are
+ both more than 1.
+ </doc>
+ <bitfield name="WGTILEWIDTH" low="20" high="25"/>
+ <bitfield name="WGTILEHEIGHT" low="26" high="31"/>
</reg32>
- <reg32 offset="0xa9df" name="HLSQ_CS_LOCAL_SIZE" variants="A7XX-" usage="cmd">
- <!-- localsize is value minus one: -->
+ <reg32 offset="0xa9df" name="SP_CS_NDRANGE_7" variants="A7XX-" usage="cmd">
+ <!-- The size of the last workgroup. localsize is value minus one: -->
<bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
<bitfield name="LOCALSIZEY" low="12" high="21" type="uint"/>
<bitfield name="LOCALSIZEZ" low="22" high="31" type="uint"/>
@@ -5641,29 +3548,27 @@ to upconvert to 32b float internally?
</reg64>
</array>
- <!-- new in a6xx gen4, mirror of SP_CS_UNKNOWN_A9B1? -->
- <reg32 offset="0xb9d0" name="HLSQ_CS_UNKNOWN_B9D0" variants="A6XX" usage="cmd">
+ <!-- new in a6xx gen4, mirror of SP_CS_CNTL_1? -->
+ <reg32 offset="0xb9d0" name="HLSQ_CS_CTRL_REG1" variants="A6XX" usage="cmd">
<bitfield name="SHARED_SIZE" low="0" high="4" type="uint"/>
- <bitfield name="UNK5" pos="5" type="boolean"/>
- <!-- always 1 ? -->
- <bitfield name="UNK6" pos="6" type="boolean"/>
+ <bitfield name="CONSTANTRAMMODE" low="5" high="6" type="a6xx_const_ram_mode"/>
</reg32>
- <reg32 offset="0xbb00" name="HLSQ_DRAW_CMD" variants="A6XX">
+ <reg32 offset="0xbb00" name="SP_DRAW_INITIATOR" variants="A6XX">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xbb01" name="HLSQ_DISPATCH_CMD" variants="A6XX">
+ <reg32 offset="0xbb01" name="SP_KERNEL_INITIATOR" variants="A6XX">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xbb02" name="HLSQ_EVENT_CMD" variants="A6XX">
+ <reg32 offset="0xbb02" name="SP_EVENT_INITIATOR" variants="A6XX">
<!-- I think only the low bit is actually used? -->
<bitfield name="STATE_ID" low="16" high="23"/>
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
</reg32>
- <reg32 offset="0xbb08" name="HLSQ_INVALIDATE_CMD" variants="A6XX" usage="cmd">
+ <reg32 offset="0xbb08" name="SP_UPDATE_CNTL" variants="A6XX" usage="cmd">
<doc>
This register clears pending loads queued up by
CP_LOAD_STATE6. Each bit resets a particular kind(s) of
@@ -5678,8 +3583,8 @@ to upconvert to 32b float internally?
<bitfield name="FS_STATE" pos="4" type="boolean"/>
<bitfield name="CS_STATE" pos="5" type="boolean"/>
- <bitfield name="CS_IBO" pos="6" type="boolean"/>
- <bitfield name="GFX_IBO" pos="7" type="boolean"/>
+ <bitfield name="CS_UAV" pos="6" type="boolean"/>
+ <bitfield name="GFX_UAV" pos="7" type="boolean"/>
<!-- Note: these only do something when HLSQ_SHARED_CONSTS is set to 1 -->
<bitfield name="CS_SHARED_CONST" pos="19" type="boolean"/>
@@ -5690,20 +3595,20 @@ to upconvert to 32b float internally?
<bitfield name="GFX_BINDLESS" low="14" high="18" type="hex"/>
</reg32>
- <reg32 offset="0xab1c" name="HLSQ_DRAW_CMD" variants="A7XX-">
+ <reg32 offset="0xab1c" name="SP_DRAW_INITIATOR" variants="A7XX-">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xab1d" name="HLSQ_DISPATCH_CMD" variants="A7XX-">
+ <reg32 offset="0xab1d" name="SP_KERNEL_INITIATOR" variants="A7XX-">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xab1e" name="HLSQ_EVENT_CMD" variants="A7XX-">
+ <reg32 offset="0xab1e" name="SP_EVENT_INITIATOR" variants="A7XX-">
<bitfield name="STATE_ID" low="16" high="23"/>
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
</reg32>
- <reg32 offset="0xab1f" name="HLSQ_INVALIDATE_CMD" variants="A7XX-" usage="cmd">
+ <reg32 offset="0xab1f" name="SP_UPDATE_CNTL" variants="A7XX-" usage="cmd">
<doc>
This register clears pending loads queued up by
CP_LOAD_STATE6. Each bit resets a particular kind(s) of
@@ -5718,18 +3623,18 @@ to upconvert to 32b float internally?
<bitfield name="FS_STATE" pos="4" type="boolean"/>
<bitfield name="CS_STATE" pos="5" type="boolean"/>
- <bitfield name="CS_IBO" pos="6" type="boolean"/>
- <bitfield name="GFX_IBO" pos="7" type="boolean"/>
+ <bitfield name="CS_UAV" pos="6" type="boolean"/>
+ <bitfield name="GFX_UAV" pos="7" type="boolean"/>
<!-- SS6_BINDLESS: one bit per bindless base -->
<bitfield name="CS_BINDLESS" low="9" high="16" type="hex"/>
<bitfield name="GFX_BINDLESS" low="17" high="24" type="hex"/>
</reg32>
- <reg32 offset="0xbb10" name="HLSQ_FS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0xab03" name="HLSQ_FS_CNTL" type="a6xx_hlsq_xs_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xbb10" name="SP_PS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xab03" name="SP_PS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
- <array offset="0xab40" name="HLSQ_SHARED_CONSTS_IMM" stride="1" length="64" variants="A7XX-"/>
+ <array offset="0xab40" name="SP_SHARED_CONSTANT_GFX_0" stride="1" length="64" variants="A7XX-"/>
<reg32 offset="0xbb11" name="HLSQ_SHARED_CONSTS" variants="A6XX" usage="cmd">
<doc>
@@ -5738,7 +3643,7 @@ to upconvert to 32b float internally?
const pool and 16 in the geometry const pool although
only 8 are actually used (why?) and they are mapped to
c504-c511 in each stage. Both VS and FS shared consts
- are written using ST6_CONSTANTS/SB6_IBO, so that both
+ are written using ST6_CONSTANTS/SB6_UAV, so that both
the geometry and FS shared consts can be written at once
by using CP_LOAD_STATE6 rather than
CP_LOAD_STATE6_FRAG/CP_LOAD_STATE6_GEOM. In addition
@@ -5747,13 +3652,13 @@ to upconvert to 32b float internally?
There is also a separate shared constant pool for CS,
which is loaded through CP_LOAD_STATE6_FRAG with
- ST6_UBO/ST6_IBO. However the only real difference for CS
+ ST6_UBO/ST6_UAV. However the only real difference for CS
is the dword units.
</doc>
<bitfield name="ENABLE" pos="0" type="boolean"/>
</reg32>
- <!-- mirror of SP_BINDLESS_BASE -->
+ <!-- mirror of SP_GFX_BINDLESS_BASE -->
<array offset="0xbb20" name="HLSQ_BINDLESS_BASE" stride="2" length="5" variants="A6XX" usage="cmd">
<reg64 offset="0" name="DESCRIPTOR">
<bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
@@ -5788,10 +3693,10 @@ to upconvert to 32b float internally?
sequence. The sequence used internally for an event looks like:
- write EVENT_CMD pipe register
- write CP_EVENT_START
- - write HLSQ_EVENT_CMD with event or HLSQ_DRAW_CMD
- - write PC_EVENT_CMD with event or PC_DRAW_CMD
- - write HLSQ_EVENT_CMD(CONTEXT_DONE)
- - write PC_EVENT_CMD(CONTEXT_DONE)
+ - write SP_EVENT_INITIATOR with event or SP_DRAW_INITIATOR
+ - write PC_EVENT_INITIATOR with event or PC_DRAW_INITIATOR
+ - write SP_EVENT_INITIATOR(CONTEXT_DONE)
+ - write PC_EVENT_INITIATOR(CONTEXT_DONE)
- write CP_EVENT_END
Writing to CP_EVENT_END seems to actually trigger the context roll
-->
@@ -5809,193 +3714,6 @@ to upconvert to 32b float internally?
</reg32>
</domain>
-<!-- Seems basically the same as a5xx, maybe move to common.xml.. -->
-<domain name="A6XX_TEX_SAMP" width="32">
- <doc>Texture sampler dwords</doc>
- <enum name="a6xx_tex_filter"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_NEAREST" value="0"/>
- <value name="A6XX_TEX_LINEAR" value="1"/>
- <value name="A6XX_TEX_ANISO" value="2"/>
- <value name="A6XX_TEX_CUBIC" value="3"/> <!-- a650 only -->
- </enum>
- <enum name="a6xx_tex_clamp"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_REPEAT" value="0"/>
- <value name="A6XX_TEX_CLAMP_TO_EDGE" value="1"/>
- <value name="A6XX_TEX_MIRROR_REPEAT" value="2"/>
- <value name="A6XX_TEX_CLAMP_TO_BORDER" value="3"/>
- <value name="A6XX_TEX_MIRROR_CLAMP" value="4"/>
- </enum>
- <enum name="a6xx_tex_aniso"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_ANISO_1" value="0"/>
- <value name="A6XX_TEX_ANISO_2" value="1"/>
- <value name="A6XX_TEX_ANISO_4" value="2"/>
- <value name="A6XX_TEX_ANISO_8" value="3"/>
- <value name="A6XX_TEX_ANISO_16" value="4"/>
- </enum>
- <enum name="a6xx_reduction_mode">
- <value name="A6XX_REDUCTION_MODE_AVERAGE" value="0"/>
- <value name="A6XX_REDUCTION_MODE_MIN" value="1"/>
- <value name="A6XX_REDUCTION_MODE_MAX" value="2"/>
- </enum>
-
- <reg32 offset="0" name="0">
- <bitfield name="MIPFILTER_LINEAR_NEAR" pos="0" type="boolean"/>
- <bitfield name="XY_MAG" low="1" high="2" type="a6xx_tex_filter"/>
- <bitfield name="XY_MIN" low="3" high="4" type="a6xx_tex_filter"/>
- <bitfield name="WRAP_S" low="5" high="7" type="a6xx_tex_clamp"/>
- <bitfield name="WRAP_T" low="8" high="10" type="a6xx_tex_clamp"/>
- <bitfield name="WRAP_R" low="11" high="13" type="a6xx_tex_clamp"/>
- <bitfield name="ANISO" low="14" high="16" type="a6xx_tex_aniso"/>
- <bitfield name="LOD_BIAS" low="19" high="31" type="fixed" radix="8"/><!-- no idea how many bits for real -->
- </reg32>
- <reg32 offset="1" name="1">
- <bitfield name="CLAMPENABLE" pos="0" type="boolean">
- <doc>
- clamp result to [0, 1] if the format is unorm or
- [-1, 1] if the format is snorm, *after*
- filtering. Has no effect for other formats.
- </doc>
- </bitfield>
- <bitfield name="COMPARE_FUNC" low="1" high="3" type="adreno_compare_func"/>
- <bitfield name="CUBEMAPSEAMLESSFILTOFF" pos="4" type="boolean"/>
- <bitfield name="UNNORM_COORDS" pos="5" type="boolean"/>
- <bitfield name="MIPFILTER_LINEAR_FAR" pos="6" type="boolean"/>
- <bitfield name="MAX_LOD" low="8" high="19" type="ufixed" radix="8"/>
- <bitfield name="MIN_LOD" low="20" high="31" type="ufixed" radix="8"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="REDUCTION_MODE" low="0" high="1" type="a6xx_reduction_mode"/>
- <bitfield name="CHROMA_LINEAR" pos="5" type="boolean"/>
- <bitfield name="BCOLOR" low="7" high="31"/>
- </reg32>
- <reg32 offset="3" name="3"/>
-</domain>
-
-<domain name="A6XX_TEX_CONST" width="32" varset="chip">
- <doc>Texture constant dwords</doc>
- <enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_X" value="0"/>
- <value name="A6XX_TEX_Y" value="1"/>
- <value name="A6XX_TEX_Z" value="2"/>
- <value name="A6XX_TEX_W" value="3"/>
- <value name="A6XX_TEX_ZERO" value="4"/>
- <value name="A6XX_TEX_ONE" value="5"/>
- </enum>
- <enum name="a6xx_tex_type"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_1D" value="0"/>
- <value name="A6XX_TEX_2D" value="1"/>
- <value name="A6XX_TEX_CUBE" value="2"/>
- <value name="A6XX_TEX_3D" value="3"/>
- <value name="A6XX_TEX_BUFFER" value="4"/>
- </enum>
- <reg32 offset="0" name="0">
- <bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/>
- <bitfield name="SRGB" pos="2" type="boolean"/>
- <bitfield name="SWIZ_X" low="4" high="6" type="a6xx_tex_swiz"/>
- <bitfield name="SWIZ_Y" low="7" high="9" type="a6xx_tex_swiz"/>
- <bitfield name="SWIZ_Z" low="10" high="12" type="a6xx_tex_swiz"/>
- <bitfield name="SWIZ_W" low="13" high="15" type="a6xx_tex_swiz"/>
- <bitfield name="MIPLVLS" low="16" high="19" type="uint"/>
- <!-- overlaps with MIPLVLS -->
- <bitfield name="CHROMA_MIDPOINT_X" pos="16" type="boolean"/>
- <bitfield name="CHROMA_MIDPOINT_Y" pos="18" type="boolean"/>
- <bitfield name="SAMPLES" low="20" high="21" type="a3xx_msaa_samples"/>
- <bitfield name="FMT" low="22" high="29" type="a6xx_format"/>
- <!--
- Why is the swap needed in addition to SWIZ_*? The swap
- is performed before border color replacement, while the
- swizzle is applied after after it.
- -->
- <bitfield name="SWAP" low="30" high="31" type="a3xx_color_swap"/>
- </reg32>
- <reg32 offset="1" name="1">
- <bitfield name="WIDTH" low="0" high="14" type="uint"/>
- <bitfield name="HEIGHT" low="15" high="29" type="uint"/>
- <bitfield name="MUTABLEEN" pos="31" type="boolean" variants="A7XX-"/>
- </reg32>
- <reg32 offset="2" name="2">
- <!--
- These fields overlap PITCH, and are used instead of
- PITCH/PITCHALIGN when TYPE is A6XX_TEX_BUFFER.
- -->
- <doc> probably for D3D structured UAVs, normally set to 1 </doc>
- <bitfield name="STRUCTSIZETEXELS" low="4" high="15" type="uint"/>
- <bitfield name="STARTOFFSETTEXELS" low="16" high="21" type="uint"/>
-
- <!-- minimum pitch (for mipmap levels): log2(pitchalign / 64) -->
- <bitfield name="PITCHALIGN" low="0" high="3" type="uint"/>
- <doc>Pitch in bytes (so actually stride)</doc>
- <bitfield name="PITCH" low="7" high="28" type="uint"/>
- <bitfield name="TYPE" low="29" high="31" type="a6xx_tex_type"/>
- </reg32>
- <reg32 offset="3" name="3">
- <!--
- ARRAY_PITCH is basically LAYERSZ for the first mipmap level, and
- for 3d textures (laid out mipmap level first) MIN_LAYERSZ is the
- layer size at the point that it stops being reduced moving to
- higher (smaller) mipmap levels
- -->
- <bitfield name="ARRAY_PITCH" low="0" high="22" shr="12" type="uint"/>
- <bitfield name="MIN_LAYERSZ" low="23" high="26" shr="12"/>
- <!--
- by default levels with w < 16 are linear
- TILE_ALL makes all levels have tiling
- seems required when using UBWC, since all levels have UBWC (can possibly be disabled?)
- -->
- <bitfield name="TILE_ALL" pos="27" type="boolean"/>
- <bitfield name="FLAG" pos="28" type="boolean"/>
- </reg32>
- <!-- for 2-3 plane format, BASE is flag buffer address (if enabled)
- the address of the non-flag base buffer is determined automatically,
- and must follow the flag buffer
- -->
- <reg32 offset="4" name="4">
- <bitfield name="BASE_LO" low="5" high="31" shr="5"/>
- </reg32>
- <reg32 offset="5" name="5">
- <bitfield name="BASE_HI" low="0" high="16"/>
- <bitfield name="DEPTH" low="17" high="29" type="uint"/>
- </reg32>
- <reg32 offset="6" name="6">
- <!-- overlaps with PLANE_PITCH -->
- <bitfield name="MIN_LOD_CLAMP" low="0" high="11" type="ufixed" radix="8"/>
- <!-- pitch for plane 2 / plane 3 -->
- <bitfield name="PLANE_PITCH" low="8" high="31" type="uint"/>
- </reg32>
- <!-- 7/8 is plane 2 address for planar formats -->
- <reg32 offset="7" name="7">
- <bitfield name="FLAG_LO" low="5" high="31" shr="5"/>
- </reg32>
- <reg32 offset="8" name="8">
- <bitfield name="FLAG_HI" low="0" high="16"/>
- </reg32>
- <!-- 9/10 is plane 3 address for planar formats -->
- <reg32 offset="9" name="9">
- <bitfield name="FLAG_BUFFER_ARRAY_PITCH" low="0" high="16" shr="4" type="uint"/>
- </reg32>
- <reg32 offset="10" name="10">
- <bitfield name="FLAG_BUFFER_PITCH" low="0" high="6" shr="6" type="uint"/>
- <!-- log2 size of the first level, required for mipmapping -->
- <bitfield name="FLAG_BUFFER_LOGW" low="8" high="11" type="uint"/>
- <bitfield name="FLAG_BUFFER_LOGH" low="12" high="15" type="uint"/>
- </reg32>
- <reg32 offset="11" name="11"/>
- <reg32 offset="12" name="12"/>
- <reg32 offset="13" name="13"/>
- <reg32 offset="14" name="14"/>
- <reg32 offset="15" name="15"/>
-</domain>
-
-<domain name="A6XX_UBO" width="32">
- <reg32 offset="0" name="0">
- <bitfield name="BASE_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="1" name="1">
- <bitfield name="BASE_HI" low="0" high="16"/>
- <bitfield name="SIZE" low="17" high="31"/> <!-- size in vec4 (4xDWORD) units -->
- </reg32>
-</domain>
-
<domain name="A6XX_PDC" width="32">
<reg32 offset="0x1140" name="GPU_ENABLE_PDC"/>
<reg32 offset="0x1148" name="GPU_SEQ_START_ADDR"/>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml
new file mode 100644
index 000000000000..307d43dda8a2
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml
@@ -0,0 +1,198 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+<import file="adreno/a6xx_enums.xml"/>
+
+<domain name="A6XX_TEX_SAMP" width="32">
+ <doc>Texture sampler dwords</doc>
+ <enum name="a6xx_tex_filter"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_NEAREST" value="0"/>
+ <value name="A6XX_TEX_LINEAR" value="1"/>
+ <value name="A6XX_TEX_ANISO" value="2"/>
+ <value name="A6XX_TEX_CUBIC" value="3"/> <!-- a650 only -->
+ </enum>
+ <enum name="a6xx_tex_clamp"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_REPEAT" value="0"/>
+ <value name="A6XX_TEX_CLAMP_TO_EDGE" value="1"/>
+ <value name="A6XX_TEX_MIRROR_REPEAT" value="2"/>
+ <value name="A6XX_TEX_CLAMP_TO_BORDER" value="3"/>
+ <value name="A6XX_TEX_MIRROR_CLAMP" value="4"/>
+ </enum>
+ <enum name="a6xx_tex_aniso"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_ANISO_1" value="0"/>
+ <value name="A6XX_TEX_ANISO_2" value="1"/>
+ <value name="A6XX_TEX_ANISO_4" value="2"/>
+ <value name="A6XX_TEX_ANISO_8" value="3"/>
+ <value name="A6XX_TEX_ANISO_16" value="4"/>
+ </enum>
+ <enum name="a6xx_reduction_mode">
+ <value name="A6XX_REDUCTION_MODE_AVERAGE" value="0"/>
+ <value name="A6XX_REDUCTION_MODE_MIN" value="1"/>
+ <value name="A6XX_REDUCTION_MODE_MAX" value="2"/>
+ </enum>
+ <enum name="a6xx_fast_border_color">
+ <!-- R B G A -->
+ <value name="A6XX_BORDER_COLOR_0_0_0_0" value="0"/>
+ <value name="A6XX_BORDER_COLOR_0_0_0_1" value="1"/>
+ <value name="A6XX_BORDER_COLOR_1_1_1_0" value="2"/>
+ <value name="A6XX_BORDER_COLOR_1_1_1_1" value="3"/>
+ </enum>
+
+ <reg32 offset="0" name="0">
+ <bitfield name="MIPFILTER_LINEAR_NEAR" pos="0" type="boolean"/>
+ <bitfield name="XY_MAG" low="1" high="2" type="a6xx_tex_filter"/>
+ <bitfield name="XY_MIN" low="3" high="4" type="a6xx_tex_filter"/>
+ <bitfield name="WRAP_S" low="5" high="7" type="a6xx_tex_clamp"/>
+ <bitfield name="WRAP_T" low="8" high="10" type="a6xx_tex_clamp"/>
+ <bitfield name="WRAP_R" low="11" high="13" type="a6xx_tex_clamp"/>
+ <bitfield name="ANISO" low="14" high="16" type="a6xx_tex_aniso"/>
+ <bitfield name="LOD_BIAS" low="19" high="31" type="fixed" radix="8"/><!-- no idea how many bits for real -->
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="CLAMPENABLE" pos="0" type="boolean">
+ <doc>
+ clamp result to [0, 1] if the format is unorm or
+ [-1, 1] if the format is snorm, *after*
+ filtering. Has no effect for other formats.
+ </doc>
+ </bitfield>
+ <bitfield name="COMPARE_FUNC" low="1" high="3" type="adreno_compare_func"/>
+ <bitfield name="CUBEMAPSEAMLESSFILTOFF" pos="4" type="boolean"/>
+ <bitfield name="UNNORM_COORDS" pos="5" type="boolean"/>
+ <bitfield name="MIPFILTER_LINEAR_FAR" pos="6" type="boolean"/>
+ <bitfield name="MAX_LOD" low="8" high="19" type="ufixed" radix="8"/>
+ <bitfield name="MIN_LOD" low="20" high="31" type="ufixed" radix="8"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="REDUCTION_MODE" low="0" high="1" type="a6xx_reduction_mode"/>
+ <bitfield name="FASTBORDERCOLOR" low="2" high="3" type="a6xx_fast_border_color"/>
+ <bitfield name="FASTBORDERCOLOREN" pos="4" type="boolean"/>
+ <bitfield name="CHROMA_LINEAR" pos="5" type="boolean"/>
+ <bitfield name="BCOLOR" low="7" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3"/>
+</domain>
+
+<domain name="A6XX_TEX_CONST" width="32" varset="chip">
+ <doc>Texture constant dwords</doc>
+ <enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_X" value="0"/>
+ <value name="A6XX_TEX_Y" value="1"/>
+ <value name="A6XX_TEX_Z" value="2"/>
+ <value name="A6XX_TEX_W" value="3"/>
+ <value name="A6XX_TEX_ZERO" value="4"/>
+ <value name="A6XX_TEX_ONE" value="5"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/>
+ <bitfield name="SRGB" pos="2" type="boolean"/>
+ <bitfield name="SWIZ_X" low="4" high="6" type="a6xx_tex_swiz"/>
+ <bitfield name="SWIZ_Y" low="7" high="9" type="a6xx_tex_swiz"/>
+ <bitfield name="SWIZ_Z" low="10" high="12" type="a6xx_tex_swiz"/>
+ <bitfield name="SWIZ_W" low="13" high="15" type="a6xx_tex_swiz"/>
+ <bitfield name="MIPLVLS" low="16" high="19" type="uint"/>
+ <!-- overlaps with MIPLVLS -->
+ <bitfield name="CHROMA_MIDPOINT_X" pos="16" type="boolean"/>
+ <bitfield name="CHROMA_MIDPOINT_Y" pos="18" type="boolean"/>
+ <bitfield name="SAMPLES" low="20" high="21" type="a3xx_msaa_samples"/>
+ <bitfield name="FMT" low="22" high="29" type="a6xx_format"/>
+ <!--
+ Why is the swap needed in addition to SWIZ_*? The swap
+ is performed before border color replacement, while the
+ swizzle is applied after after it.
+ -->
+ <bitfield name="SWAP" low="30" high="31" type="a3xx_color_swap"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="WIDTH" low="0" high="14" type="uint"/>
+ <bitfield name="HEIGHT" low="15" high="29" type="uint"/>
+ <bitfield name="MUTABLEEN" pos="31" type="boolean" variants="A7XX-"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <!--
+ These fields overlap PITCH, and are used instead of
+ PITCH/PITCHALIGN when TYPE is A6XX_TEX_BUFFER.
+ -->
+ <doc> probably for D3D structured UAVs, normally set to 1 </doc>
+ <bitfield name="STRUCTSIZETEXELS" low="4" high="15" type="uint"/>
+ <bitfield name="STARTOFFSETTEXELS" low="16" high="21" type="uint"/>
+
+ <!-- minimum pitch (for mipmap levels): log2(pitchalign / 64) -->
+ <bitfield name="PITCHALIGN" low="0" high="3" type="uint"/>
+ <doc>Pitch in bytes (so actually stride)</doc>
+ <bitfield name="PITCH" low="7" high="28" type="uint"/>
+ <bitfield name="TYPE" low="29" high="31" type="a6xx_tex_type"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <!--
+ ARRAY_PITCH is basically LAYERSZ for the first mipmap level, and
+ for 3d textures (laid out mipmap level first) MIN_LAYERSZ is the
+ layer size at the point that it stops being reduced moving to
+ higher (smaller) mipmap levels
+ -->
+ <bitfield name="ARRAY_PITCH" low="0" high="22" shr="12" type="uint"/>
+ <bitfield name="MIN_LAYERSZ" low="23" high="26" shr="12"/>
+ <!--
+ by default levels with w < 16 are linear
+ TILE_ALL makes all levels have tiling
+ seems required when using UBWC, since all levels have UBWC (can possibly be disabled?)
+ -->
+ <bitfield name="TILE_ALL" pos="27" type="boolean"/>
+ <bitfield name="FLAG" pos="28" type="boolean"/>
+ </reg32>
+ <!-- for 2-3 plane format, BASE is flag buffer address (if enabled)
+ the address of the non-flag base buffer is determined automatically,
+ and must follow the flag buffer
+ -->
+ <reg32 offset="4" name="4">
+ <bitfield name="BASE_LO" low="5" high="31" shr="5"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="BASE_HI" low="0" high="16"/>
+ <bitfield name="DEPTH" low="17" high="29" type="uint"/>
+ </reg32>
+ <reg32 offset="6" name="6">
+ <!-- overlaps with PLANE_PITCH -->
+ <bitfield name="MIN_LOD_CLAMP" low="0" high="11" type="ufixed" radix="8"/>
+ <!-- pitch for plane 2 / plane 3 -->
+ <bitfield name="PLANE_PITCH" low="8" high="31" type="uint"/>
+ </reg32>
+ <!-- 7/8 is plane 2 address for planar formats -->
+ <reg32 offset="7" name="7">
+ <bitfield name="FLAG_LO" low="5" high="31" shr="5"/>
+ </reg32>
+ <reg32 offset="8" name="8">
+ <bitfield name="FLAG_HI" low="0" high="16"/>
+ </reg32>
+ <!-- 9/10 is plane 3 address for planar formats -->
+ <reg32 offset="9" name="9">
+ <bitfield name="FLAG_BUFFER_ARRAY_PITCH" low="0" high="16" shr="4" type="uint"/>
+ </reg32>
+ <reg32 offset="10" name="10">
+ <bitfield name="FLAG_BUFFER_PITCH" low="0" high="6" shr="6" type="uint"/>
+ <!-- log2 size of the first level, required for mipmapping -->
+ <bitfield name="FLAG_BUFFER_LOGW" low="8" high="11" type="uint"/>
+ <bitfield name="FLAG_BUFFER_LOGH" low="12" high="15" type="uint"/>
+ </reg32>
+ <reg32 offset="11" name="11"/>
+ <reg32 offset="12" name="12"/>
+ <reg32 offset="13" name="13"/>
+ <reg32 offset="14" name="14"/>
+ <reg32 offset="15" name="15"/>
+</domain>
+
+<domain name="A6XX_UBO" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="BASE_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="BASE_HI" low="0" high="16"/>
+ <bitfield name="SIZE" low="17" high="31"/> <!-- size in vec4 (4xDWORD) units -->
+ </reg32>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml
new file mode 100644
index 000000000000..665539b098c6
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml
@@ -0,0 +1,383 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a6xx_tile_mode">
+ <value name="TILE6_LINEAR" value="0"/>
+ <value name="TILE6_2" value="2"/>
+ <value name="TILE6_3" value="3"/>
+</enum>
+
+<enum name="a6xx_format">
+ <value value="0x02" name="FMT6_A8_UNORM"/>
+ <value value="0x03" name="FMT6_8_UNORM"/>
+ <value value="0x04" name="FMT6_8_SNORM"/>
+ <value value="0x05" name="FMT6_8_UINT"/>
+ <value value="0x06" name="FMT6_8_SINT"/>
+
+ <value value="0x08" name="FMT6_4_4_4_4_UNORM"/>
+ <value value="0x0a" name="FMT6_5_5_5_1_UNORM"/>
+ <value value="0x0c" name="FMT6_1_5_5_5_UNORM"/> <!-- read only -->
+ <value value="0x0e" name="FMT6_5_6_5_UNORM"/>
+
+ <value value="0x0f" name="FMT6_8_8_UNORM"/>
+ <value value="0x10" name="FMT6_8_8_SNORM"/>
+ <value value="0x11" name="FMT6_8_8_UINT"/>
+ <value value="0x12" name="FMT6_8_8_SINT"/>
+ <value value="0x13" name="FMT6_L8_A8_UNORM"/>
+
+ <value value="0x15" name="FMT6_16_UNORM"/>
+ <value value="0x16" name="FMT6_16_SNORM"/>
+ <value value="0x17" name="FMT6_16_FLOAT"/>
+ <value value="0x18" name="FMT6_16_UINT"/>
+ <value value="0x19" name="FMT6_16_SINT"/>
+
+ <value value="0x21" name="FMT6_8_8_8_UNORM"/>
+ <value value="0x22" name="FMT6_8_8_8_SNORM"/>
+ <value value="0x23" name="FMT6_8_8_8_UINT"/>
+ <value value="0x24" name="FMT6_8_8_8_SINT"/>
+
+ <value value="0x30" name="FMT6_8_8_8_8_UNORM"/>
+ <value value="0x31" name="FMT6_8_8_8_X8_UNORM"/> <!-- samples 1 for alpha -->
+ <value value="0x32" name="FMT6_8_8_8_8_SNORM"/>
+ <value value="0x33" name="FMT6_8_8_8_8_UINT"/>
+ <value value="0x34" name="FMT6_8_8_8_8_SINT"/>
+
+ <value value="0x35" name="FMT6_9_9_9_E5_FLOAT"/>
+
+ <value value="0x36" name="FMT6_10_10_10_2_UNORM"/>
+ <value value="0x37" name="FMT6_10_10_10_2_UNORM_DEST"/>
+ <value value="0x39" name="FMT6_10_10_10_2_SNORM"/>
+ <value value="0x3a" name="FMT6_10_10_10_2_UINT"/>
+ <value value="0x3b" name="FMT6_10_10_10_2_SINT"/>
+
+ <value value="0x42" name="FMT6_11_11_10_FLOAT"/>
+
+ <value value="0x43" name="FMT6_16_16_UNORM"/>
+ <value value="0x44" name="FMT6_16_16_SNORM"/>
+ <value value="0x45" name="FMT6_16_16_FLOAT"/>
+ <value value="0x46" name="FMT6_16_16_UINT"/>
+ <value value="0x47" name="FMT6_16_16_SINT"/>
+
+ <value value="0x48" name="FMT6_32_UNORM"/>
+ <value value="0x49" name="FMT6_32_SNORM"/>
+ <value value="0x4a" name="FMT6_32_FLOAT"/>
+ <value value="0x4b" name="FMT6_32_UINT"/>
+ <value value="0x4c" name="FMT6_32_SINT"/>
+ <value value="0x4d" name="FMT6_32_FIXED"/>
+
+ <value value="0x58" name="FMT6_16_16_16_UNORM"/>
+ <value value="0x59" name="FMT6_16_16_16_SNORM"/>
+ <value value="0x5a" name="FMT6_16_16_16_FLOAT"/>
+ <value value="0x5b" name="FMT6_16_16_16_UINT"/>
+ <value value="0x5c" name="FMT6_16_16_16_SINT"/>
+
+ <value value="0x60" name="FMT6_16_16_16_16_UNORM"/>
+ <value value="0x61" name="FMT6_16_16_16_16_SNORM"/>
+ <value value="0x62" name="FMT6_16_16_16_16_FLOAT"/>
+ <value value="0x63" name="FMT6_16_16_16_16_UINT"/>
+ <value value="0x64" name="FMT6_16_16_16_16_SINT"/>
+
+ <value value="0x65" name="FMT6_32_32_UNORM"/>
+ <value value="0x66" name="FMT6_32_32_SNORM"/>
+ <value value="0x67" name="FMT6_32_32_FLOAT"/>
+ <value value="0x68" name="FMT6_32_32_UINT"/>
+ <value value="0x69" name="FMT6_32_32_SINT"/>
+ <value value="0x6a" name="FMT6_32_32_FIXED"/>
+
+ <value value="0x70" name="FMT6_32_32_32_UNORM"/>
+ <value value="0x71" name="FMT6_32_32_32_SNORM"/>
+ <value value="0x72" name="FMT6_32_32_32_UINT"/>
+ <value value="0x73" name="FMT6_32_32_32_SINT"/>
+ <value value="0x74" name="FMT6_32_32_32_FLOAT"/>
+ <value value="0x75" name="FMT6_32_32_32_FIXED"/>
+
+ <value value="0x80" name="FMT6_32_32_32_32_UNORM"/>
+ <value value="0x81" name="FMT6_32_32_32_32_SNORM"/>
+ <value value="0x82" name="FMT6_32_32_32_32_FLOAT"/>
+ <value value="0x83" name="FMT6_32_32_32_32_UINT"/>
+ <value value="0x84" name="FMT6_32_32_32_32_SINT"/>
+ <value value="0x85" name="FMT6_32_32_32_32_FIXED"/>
+
+ <value value="0x8c" name="FMT6_G8R8B8R8_422_UNORM"/> <!-- UYVY -->
+ <value value="0x8d" name="FMT6_R8G8R8B8_422_UNORM"/> <!-- YUYV -->
+ <value value="0x8e" name="FMT6_R8_G8B8_2PLANE_420_UNORM"/> <!-- NV12 -->
+ <value value="0x8f" name="FMT6_NV21"/>
+ <value value="0x90" name="FMT6_R8_G8_B8_3PLANE_420_UNORM"/> <!-- YV12 -->
+
+ <value value="0x91" name="FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8"/>
+
+ <!-- Note: tiling/UBWC for these may be different from equivalent formats
+ For example FMT6_NV12_Y is not compatible with FMT6_8_UNORM
+ -->
+ <value value="0x94" name="FMT6_NV12_Y"/>
+ <value value="0x95" name="FMT6_NV12_UV"/>
+ <value value="0x96" name="FMT6_NV12_VU"/>
+ <value value="0x97" name="FMT6_NV12_4R"/>
+ <value value="0x98" name="FMT6_NV12_4R_Y"/>
+ <value value="0x99" name="FMT6_NV12_4R_UV"/>
+ <value value="0x9a" name="FMT6_P010"/>
+ <value value="0x9b" name="FMT6_P010_Y"/>
+ <value value="0x9c" name="FMT6_P010_UV"/>
+ <value value="0x9d" name="FMT6_TP10"/>
+ <value value="0x9e" name="FMT6_TP10_Y"/>
+ <value value="0x9f" name="FMT6_TP10_UV"/>
+
+ <value value="0xa0" name="FMT6_Z24_UNORM_S8_UINT"/>
+
+ <value value="0xab" name="FMT6_ETC2_RG11_UNORM"/>
+ <value value="0xac" name="FMT6_ETC2_RG11_SNORM"/>
+ <value value="0xad" name="FMT6_ETC2_R11_UNORM"/>
+ <value value="0xae" name="FMT6_ETC2_R11_SNORM"/>
+ <value value="0xaf" name="FMT6_ETC1"/>
+ <value value="0xb0" name="FMT6_ETC2_RGB8"/>
+ <value value="0xb1" name="FMT6_ETC2_RGBA8"/>
+ <value value="0xb2" name="FMT6_ETC2_RGB8A1"/>
+ <value value="0xb3" name="FMT6_DXT1"/>
+ <value value="0xb4" name="FMT6_DXT3"/>
+ <value value="0xb5" name="FMT6_DXT5"/>
+ <value value="0xb6" name="FMT6_RGTC1_UNORM"/>
+ <value value="0xb7" name="FMT6_RGTC1_UNORM_FAST"/>
+ <value value="0xb8" name="FMT6_RGTC1_SNORM"/>
+ <value value="0xb9" name="FMT6_RGTC1_SNORM_FAST"/>
+ <value value="0xba" name="FMT6_RGTC2_UNORM"/>
+ <value value="0xbb" name="FMT6_RGTC2_UNORM_FAST"/>
+ <value value="0xbc" name="FMT6_RGTC2_SNORM"/>
+ <value value="0xbd" name="FMT6_RGTC2_SNORM_FAST"/>
+ <value value="0xbe" name="FMT6_BPTC_UFLOAT"/>
+ <value value="0xbf" name="FMT6_BPTC_FLOAT"/>
+ <value value="0xc0" name="FMT6_BPTC"/>
+ <value value="0xc1" name="FMT6_ASTC_4x4"/>
+ <value value="0xc2" name="FMT6_ASTC_5x4"/>
+ <value value="0xc3" name="FMT6_ASTC_5x5"/>
+ <value value="0xc4" name="FMT6_ASTC_6x5"/>
+ <value value="0xc5" name="FMT6_ASTC_6x6"/>
+ <value value="0xc6" name="FMT6_ASTC_8x5"/>
+ <value value="0xc7" name="FMT6_ASTC_8x6"/>
+ <value value="0xc8" name="FMT6_ASTC_8x8"/>
+ <value value="0xc9" name="FMT6_ASTC_10x5"/>
+ <value value="0xca" name="FMT6_ASTC_10x6"/>
+ <value value="0xcb" name="FMT6_ASTC_10x8"/>
+ <value value="0xcc" name="FMT6_ASTC_10x10"/>
+ <value value="0xcd" name="FMT6_ASTC_12x10"/>
+ <value value="0xce" name="FMT6_ASTC_12x12"/>
+
+ <!-- for sampling stencil (integer, 2nd channel), not available on a630 -->
+ <value value="0xea" name="FMT6_Z24_UINT_S8_UINT"/>
+
+ <!-- Not a hw enum, used internally in driver -->
+ <value value="0xff" name="FMT6_NONE"/>
+
+</enum>
+
+<!-- probably same as a5xx -->
+<enum name="a6xx_polygon_mode">
+ <value name="POLYMODE6_POINTS" value="1"/>
+ <value name="POLYMODE6_LINES" value="2"/>
+ <value name="POLYMODE6_TRIANGLES" value="3"/>
+</enum>
+
+<enum name="a6xx_depth_format">
+ <value name="DEPTH6_NONE" value="0"/>
+ <value name="DEPTH6_16" value="1"/>
+ <value name="DEPTH6_24_8" value="2"/>
+ <value name="DEPTH6_32" value="4"/>
+</enum>
+
+<bitset name="a6x_cp_protect" inline="yes">
+ <bitfield name="BASE_ADDR" low="0" high="17"/>
+ <bitfield name="MASK_LEN" low="18" high="30"/>
+ <bitfield name="READ" pos="31" type="boolean"/>
+</bitset>
+
+<enum name="a6xx_shader_id">
+ <value value="0x9" name="A6XX_TP0_TMO_DATA"/>
+ <value value="0xa" name="A6XX_TP0_SMO_DATA"/>
+ <value value="0xb" name="A6XX_TP0_MIPMAP_BASE_DATA"/>
+ <value value="0x19" name="A6XX_TP1_TMO_DATA"/>
+ <value value="0x1a" name="A6XX_TP1_SMO_DATA"/>
+ <value value="0x1b" name="A6XX_TP1_MIPMAP_BASE_DATA"/>
+ <value value="0x29" name="A6XX_SP_INST_DATA"/>
+ <value value="0x2a" name="A6XX_SP_LB_0_DATA"/>
+ <value value="0x2b" name="A6XX_SP_LB_1_DATA"/>
+ <value value="0x2c" name="A6XX_SP_LB_2_DATA"/>
+ <value value="0x2d" name="A6XX_SP_LB_3_DATA"/>
+ <value value="0x2e" name="A6XX_SP_LB_4_DATA"/>
+ <value value="0x2f" name="A6XX_SP_LB_5_DATA"/>
+ <value value="0x30" name="A6XX_SP_CB_BINDLESS_DATA"/>
+ <value value="0x31" name="A6XX_SP_CB_LEGACY_DATA"/>
+ <value value="0x32" name="A6XX_SP_GFX_UAV_BASE_DATA"/>
+ <value value="0x33" name="A6XX_SP_INST_TAG"/>
+ <value value="0x34" name="A6XX_SP_CB_BINDLESS_TAG"/>
+ <value value="0x35" name="A6XX_SP_TMO_UMO_TAG"/>
+ <value value="0x36" name="A6XX_SP_SMO_TAG"/>
+ <value value="0x37" name="A6XX_SP_STATE_DATA"/>
+ <value value="0x49" name="A6XX_HLSQ_CHUNK_CVS_RAM"/>
+ <value value="0x4a" name="A6XX_HLSQ_CHUNK_CPS_RAM"/>
+ <value value="0x4b" name="A6XX_HLSQ_CHUNK_CVS_RAM_TAG"/>
+ <value value="0x4c" name="A6XX_HLSQ_CHUNK_CPS_RAM_TAG"/>
+ <value value="0x4d" name="A6XX_HLSQ_ICB_CVS_CB_BASE_TAG"/>
+ <value value="0x4e" name="A6XX_HLSQ_ICB_CPS_CB_BASE_TAG"/>
+ <value value="0x50" name="A6XX_HLSQ_CVS_MISC_RAM"/>
+ <value value="0x51" name="A6XX_HLSQ_CPS_MISC_RAM"/>
+ <value value="0x52" name="A6XX_HLSQ_INST_RAM"/>
+ <value value="0x53" name="A6XX_HLSQ_GFX_CVS_CONST_RAM"/>
+ <value value="0x54" name="A6XX_HLSQ_GFX_CPS_CONST_RAM"/>
+ <value value="0x55" name="A6XX_HLSQ_CVS_MISC_RAM_TAG"/>
+ <value value="0x56" name="A6XX_HLSQ_CPS_MISC_RAM_TAG"/>
+ <value value="0x57" name="A6XX_HLSQ_INST_RAM_TAG"/>
+ <value value="0x58" name="A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/>
+ <value value="0x59" name="A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/>
+ <value value="0x5a" name="A6XX_HLSQ_PWR_REST_RAM"/>
+ <value value="0x5b" name="A6XX_HLSQ_PWR_REST_TAG"/>
+ <value value="0x60" name="A6XX_HLSQ_DATAPATH_META"/>
+ <value value="0x61" name="A6XX_HLSQ_FRONTEND_META"/>
+ <value value="0x62" name="A6XX_HLSQ_INDIRECT_META"/>
+ <value value="0x63" name="A6XX_HLSQ_BACKEND_META"/>
+ <value value="0x70" name="A6XX_SP_LB_6_DATA"/>
+ <value value="0x71" name="A6XX_SP_LB_7_DATA"/>
+ <value value="0x73" name="A6XX_HLSQ_INST_RAM_1"/>
+</enum>
+
+<enum name="a6xx_debugbus_id">
+ <value value="0x1" name="A6XX_DBGBUS_CP"/>
+ <value value="0x2" name="A6XX_DBGBUS_RBBM"/>
+ <value value="0x3" name="A6XX_DBGBUS_VBIF"/>
+ <value value="0x4" name="A6XX_DBGBUS_HLSQ"/>
+ <value value="0x5" name="A6XX_DBGBUS_UCHE"/>
+ <value value="0x6" name="A6XX_DBGBUS_DPM"/>
+ <value value="0x7" name="A6XX_DBGBUS_TESS"/>
+ <value value="0x8" name="A6XX_DBGBUS_PC"/>
+ <value value="0x9" name="A6XX_DBGBUS_VFDP"/>
+ <value value="0xa" name="A6XX_DBGBUS_VPC"/>
+ <value value="0xb" name="A6XX_DBGBUS_TSE"/>
+ <value value="0xc" name="A6XX_DBGBUS_RAS"/>
+ <value value="0xd" name="A6XX_DBGBUS_VSC"/>
+ <value value="0xe" name="A6XX_DBGBUS_COM"/>
+ <value value="0x10" name="A6XX_DBGBUS_LRZ"/>
+ <value value="0x11" name="A6XX_DBGBUS_A2D"/>
+ <value value="0x12" name="A6XX_DBGBUS_CCUFCHE"/>
+ <value value="0x13" name="A6XX_DBGBUS_GMU_CX"/>
+ <value value="0x14" name="A6XX_DBGBUS_RBP"/>
+ <value value="0x15" name="A6XX_DBGBUS_DCS"/>
+ <value value="0x16" name="A6XX_DBGBUS_DBGC"/>
+ <value value="0x17" name="A6XX_DBGBUS_CX"/>
+ <value value="0x18" name="A6XX_DBGBUS_GMU_GX"/>
+ <value value="0x19" name="A6XX_DBGBUS_TPFCHE"/>
+ <value value="0x1a" name="A6XX_DBGBUS_GBIF_GX"/>
+ <value value="0x1d" name="A6XX_DBGBUS_GPC"/>
+ <value value="0x1e" name="A6XX_DBGBUS_LARC"/>
+ <value value="0x1f" name="A6XX_DBGBUS_HLSQ_SPTP"/>
+ <value value="0x20" name="A6XX_DBGBUS_RB_0"/>
+ <value value="0x21" name="A6XX_DBGBUS_RB_1"/>
+ <value value="0x22" name="A6XX_DBGBUS_RB_2"/>
+ <value value="0x24" name="A6XX_DBGBUS_UCHE_WRAPPER"/>
+ <value value="0x28" name="A6XX_DBGBUS_CCU_0"/>
+ <value value="0x29" name="A6XX_DBGBUS_CCU_1"/>
+ <value value="0x2a" name="A6XX_DBGBUS_CCU_2"/>
+ <value value="0x38" name="A6XX_DBGBUS_VFD_0"/>
+ <value value="0x39" name="A6XX_DBGBUS_VFD_1"/>
+ <value value="0x3a" name="A6XX_DBGBUS_VFD_2"/>
+ <value value="0x3b" name="A6XX_DBGBUS_VFD_3"/>
+ <value value="0x3c" name="A6XX_DBGBUS_VFD_4"/>
+ <value value="0x3d" name="A6XX_DBGBUS_VFD_5"/>
+ <value value="0x40" name="A6XX_DBGBUS_SP_0"/>
+ <value value="0x41" name="A6XX_DBGBUS_SP_1"/>
+ <value value="0x42" name="A6XX_DBGBUS_SP_2"/>
+ <value value="0x48" name="A6XX_DBGBUS_TPL1_0"/>
+ <value value="0x49" name="A6XX_DBGBUS_TPL1_1"/>
+ <value value="0x4a" name="A6XX_DBGBUS_TPL1_2"/>
+ <value value="0x4b" name="A6XX_DBGBUS_TPL1_3"/>
+ <value value="0x4c" name="A6XX_DBGBUS_TPL1_4"/>
+ <value value="0x4d" name="A6XX_DBGBUS_TPL1_5"/>
+ <value value="0x58" name="A6XX_DBGBUS_SPTP_0"/>
+ <value value="0x59" name="A6XX_DBGBUS_SPTP_1"/>
+ <value value="0x5a" name="A6XX_DBGBUS_SPTP_2"/>
+ <value value="0x5b" name="A6XX_DBGBUS_SPTP_3"/>
+ <value value="0x5c" name="A6XX_DBGBUS_SPTP_4"/>
+ <value value="0x5d" name="A6XX_DBGBUS_SPTP_5"/>
+</enum>
+
+<!--
+Used in a6xx_a2d_bit_cntl.. the value mostly seems to correlate to the
+component type/size, so I think it relates to internal format used for
+blending? The one exception is that 16b unorm and 32b float use the
+same value... maybe 16b unorm is uncommon enough that it was just easier
+to upconvert to 32b float internally?
+
+ 8b unorm: 10 (sometimes 0, is the high bit part of something else?)
+16b unorm: 4
+
+32b int: 7
+16b int: 6
+ 8b int: 5
+
+32b float: 4
+16b float: 3
+ -->
+<enum name="a6xx_2d_ifmt">
+ <value value="0x10" name="R2D_UNORM8"/>
+ <value value="0x7" name="R2D_INT32"/>
+ <value value="0x6" name="R2D_INT16"/>
+ <value value="0x5" name="R2D_INT8"/>
+ <value value="0x4" name="R2D_FLOAT32"/>
+ <value value="0x3" name="R2D_FLOAT16"/>
+ <value value="0x1" name="R2D_UNORM8_SRGB"/>
+ <value value="0x0" name="R2D_RAW"/>
+</enum>
+
+<enum name="a6xx_tex_type">
+ <value name="A6XX_TEX_1D" value="0"/>
+ <value name="A6XX_TEX_2D" value="1"/>
+ <value name="A6XX_TEX_CUBE" value="2"/>
+ <value name="A6XX_TEX_3D" value="3"/>
+ <value name="A6XX_TEX_BUFFER" value="4"/>
+ <doc>
+ A special buffer type for usage as the source for buffer
+ to image copies with lower alignment requirements than
+ A6XX_TEX_2D, available since A7XX.
+ </doc>
+ <value name="A6XX_TEX_IMG_BUFFER" value="5"/>
+</enum>
+
+<enum name="a6xx_ztest_mode">
+ <doc>Allow early z-test and early-lrz (if applicable)</doc>
+ <value value="0x0" name="A6XX_EARLY_Z"/>
+ <doc>Disable early z-test and early-lrz test (if applicable)</doc>
+ <value value="0x1" name="A6XX_LATE_Z"/>
+ <doc>
+ A special mode that allows early-lrz (if applicable) or early-z
+ tests, but also does late-z tests at which point it writes depth.
+
+ This mode is used when fragment can be killed (via discard or
+ sample mask) after early-z tests and it writes depth. In such case
+ depth can be written only at late-z stage, but it's ok to use
+ early-z to discard fragments.
+
+ However this mode is not compatible with:
+ - Lack of D/S attachment
+ - Stencil writes on stencil or depth test failures
+ - Per-sample shading
+ </doc>
+ <value value="0x2" name="A6XX_EARLY_Z_LATE_Z"/>
+ <doc>Not a real hw value, used internally by mesa</doc>
+ <value value="0x3" name="A6XX_INVALID_ZTEST"/>
+</enum>
+
+<enum name="a6xx_tess_spacing">
+ <value value="0x0" name="TESS_EQUAL"/>
+ <value value="0x2" name="TESS_FRACTIONAL_ODD"/>
+ <value value="0x3" name="TESS_FRACTIONAL_EVEN"/>
+</enum>
+<enum name="a6xx_tess_output">
+ <value value="0x0" name="TESS_POINTS"/>
+ <value value="0x1" name="TESS_LINES"/>
+ <value value="0x2" name="TESS_CW_TRIS"/>
+ <value value="0x3" name="TESS_CCW_TRIS"/>
+</enum>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml
new file mode 100644
index 000000000000..c446a2eb1120
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml
@@ -0,0 +1,600 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a6xx_cp_perfcounter_select">
+ <value value="0" name="PERF_CP_ALWAYS_COUNT"/>
+ <value value="1" name="PERF_CP_BUSY_GFX_CORE_IDLE"/>
+ <value value="2" name="PERF_CP_BUSY_CYCLES"/>
+ <value value="3" name="PERF_CP_NUM_PREEMPTIONS"/>
+ <value value="4" name="PERF_CP_PREEMPTION_REACTION_DELAY"/>
+ <value value="5" name="PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
+ <value value="6" name="PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
+ <value value="7" name="PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
+ <value value="8" name="PERF_CP_PREDICATED_DRAWS_KILLED"/>
+ <value value="9" name="PERF_CP_MODE_SWITCH"/>
+ <value value="10" name="PERF_CP_ZPASS_DONE"/>
+ <value value="11" name="PERF_CP_CONTEXT_DONE"/>
+ <value value="12" name="PERF_CP_CACHE_FLUSH"/>
+ <value value="13" name="PERF_CP_LONG_PREEMPTIONS"/>
+ <value value="14" name="PERF_CP_SQE_I_CACHE_STARVE"/>
+ <value value="15" name="PERF_CP_SQE_IDLE"/>
+ <value value="16" name="PERF_CP_SQE_PM4_STARVE_RB_IB"/>
+ <value value="17" name="PERF_CP_SQE_PM4_STARVE_SDS"/>
+ <value value="18" name="PERF_CP_SQE_MRB_STARVE"/>
+ <value value="19" name="PERF_CP_SQE_RRB_STARVE"/>
+ <value value="20" name="PERF_CP_SQE_VSD_STARVE"/>
+ <value value="21" name="PERF_CP_VSD_DECODE_STARVE"/>
+ <value value="22" name="PERF_CP_SQE_PIPE_OUT_STALL"/>
+ <value value="23" name="PERF_CP_SQE_SYNC_STALL"/>
+ <value value="24" name="PERF_CP_SQE_PM4_WFI_STALL"/>
+ <value value="25" name="PERF_CP_SQE_SYS_WFI_STALL"/>
+ <value value="26" name="PERF_CP_SQE_T4_EXEC"/>
+ <value value="27" name="PERF_CP_SQE_LOAD_STATE_EXEC"/>
+ <value value="28" name="PERF_CP_SQE_SAVE_SDS_STATE"/>
+ <value value="29" name="PERF_CP_SQE_DRAW_EXEC"/>
+ <value value="30" name="PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
+ <value value="31" name="PERF_CP_SQE_EXEC_PROFILED"/>
+ <value value="32" name="PERF_CP_MEMORY_POOL_EMPTY"/>
+ <value value="33" name="PERF_CP_MEMORY_POOL_SYNC_STALL"/>
+ <value value="34" name="PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
+ <value value="35" name="PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
+ <value value="36" name="PERF_CP_AHB_STALL_SQE_GMU"/>
+ <value value="37" name="PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
+ <value value="38" name="PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
+ <value value="39" name="PERF_CP_CLUSTER0_EMPTY"/>
+ <value value="40" name="PERF_CP_CLUSTER1_EMPTY"/>
+ <value value="41" name="PERF_CP_CLUSTER2_EMPTY"/>
+ <value value="42" name="PERF_CP_CLUSTER3_EMPTY"/>
+ <value value="43" name="PERF_CP_CLUSTER4_EMPTY"/>
+ <value value="44" name="PERF_CP_CLUSTER5_EMPTY"/>
+ <value value="45" name="PERF_CP_PM4_DATA"/>
+ <value value="46" name="PERF_CP_PM4_HEADERS"/>
+ <value value="47" name="PERF_CP_VBIF_READ_BEATS"/>
+ <value value="48" name="PERF_CP_VBIF_WRITE_BEATS"/>
+ <value value="49" name="PERF_CP_SQE_INSTR_COUNTER"/>
+</enum>
+
+<enum name="a6xx_rbbm_perfcounter_select">
+ <value value="0" name="PERF_RBBM_ALWAYS_COUNT"/>
+ <value value="1" name="PERF_RBBM_ALWAYS_ON"/>
+ <value value="2" name="PERF_RBBM_TSE_BUSY"/>
+ <value value="3" name="PERF_RBBM_RAS_BUSY"/>
+ <value value="4" name="PERF_RBBM_PC_DCALL_BUSY"/>
+ <value value="5" name="PERF_RBBM_PC_VSD_BUSY"/>
+ <value value="6" name="PERF_RBBM_STATUS_MASKED"/>
+ <value value="7" name="PERF_RBBM_COM_BUSY"/>
+ <value value="8" name="PERF_RBBM_DCOM_BUSY"/>
+ <value value="9" name="PERF_RBBM_VBIF_BUSY"/>
+ <value value="10" name="PERF_RBBM_VSC_BUSY"/>
+ <value value="11" name="PERF_RBBM_TESS_BUSY"/>
+ <value value="12" name="PERF_RBBM_UCHE_BUSY"/>
+ <value value="13" name="PERF_RBBM_HLSQ_BUSY"/>
+</enum>
+
+<enum name="a6xx_pc_perfcounter_select">
+ <value value="0" name="PERF_PC_BUSY_CYCLES"/>
+ <value value="1" name="PERF_PC_WORKING_CYCLES"/>
+ <value value="2" name="PERF_PC_STALL_CYCLES_VFD"/>
+ <value value="3" name="PERF_PC_STALL_CYCLES_TSE"/>
+ <value value="4" name="PERF_PC_STALL_CYCLES_VPC"/>
+ <value value="5" name="PERF_PC_STALL_CYCLES_UCHE"/>
+ <value value="6" name="PERF_PC_STALL_CYCLES_TESS"/>
+ <value value="7" name="PERF_PC_STALL_CYCLES_TSE_ONLY"/>
+ <value value="8" name="PERF_PC_STALL_CYCLES_VPC_ONLY"/>
+ <value value="9" name="PERF_PC_PASS1_TF_STALL_CYCLES"/>
+ <value value="10" name="PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
+ <value value="11" name="PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
+ <value value="12" name="PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
+ <value value="13" name="PERF_PC_STARVE_CYCLES_FOR_POSITION"/>
+ <value value="14" name="PERF_PC_STARVE_CYCLES_DI"/>
+ <value value="15" name="PERF_PC_VIS_STREAMS_LOADED"/>
+ <value value="16" name="PERF_PC_INSTANCES"/>
+ <value value="17" name="PERF_PC_VPC_PRIMITIVES"/>
+ <value value="18" name="PERF_PC_DEAD_PRIM"/>
+ <value value="19" name="PERF_PC_LIVE_PRIM"/>
+ <value value="20" name="PERF_PC_VERTEX_HITS"/>
+ <value value="21" name="PERF_PC_IA_VERTICES"/>
+ <value value="22" name="PERF_PC_IA_PRIMITIVES"/>
+ <value value="23" name="PERF_PC_GS_PRIMITIVES"/>
+ <value value="24" name="PERF_PC_HS_INVOCATIONS"/>
+ <value value="25" name="PERF_PC_DS_INVOCATIONS"/>
+ <value value="26" name="PERF_PC_VS_INVOCATIONS"/>
+ <value value="27" name="PERF_PC_GS_INVOCATIONS"/>
+ <value value="28" name="PERF_PC_DS_PRIMITIVES"/>
+ <value value="29" name="PERF_PC_VPC_POS_DATA_TRANSACTION"/>
+ <value value="30" name="PERF_PC_3D_DRAWCALLS"/>
+ <value value="31" name="PERF_PC_2D_DRAWCALLS"/>
+ <value value="32" name="PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
+ <value value="33" name="PERF_TESS_BUSY_CYCLES"/>
+ <value value="34" name="PERF_TESS_WORKING_CYCLES"/>
+ <value value="35" name="PERF_TESS_STALL_CYCLES_PC"/>
+ <value value="36" name="PERF_TESS_STARVE_CYCLES_PC"/>
+ <value value="37" name="PERF_PC_TSE_TRANSACTION"/>
+ <value value="38" name="PERF_PC_TSE_VERTEX"/>
+ <value value="39" name="PERF_PC_TESS_PC_UV_TRANS"/>
+ <value value="40" name="PERF_PC_TESS_PC_UV_PATCHES"/>
+ <value value="41" name="PERF_PC_TESS_FACTOR_TRANS"/>
+</enum>
+
+<enum name="a6xx_vfd_perfcounter_select">
+ <value value="0" name="PERF_VFD_BUSY_CYCLES"/>
+ <value value="1" name="PERF_VFD_STALL_CYCLES_UCHE"/>
+ <value value="2" name="PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
+ <value value="3" name="PERF_VFD_STALL_CYCLES_SP_INFO"/>
+ <value value="4" name="PERF_VFD_STALL_CYCLES_SP_ATTR"/>
+ <value value="5" name="PERF_VFD_STARVE_CYCLES_UCHE"/>
+ <value value="6" name="PERF_VFD_RBUFFER_FULL"/>
+ <value value="7" name="PERF_VFD_ATTR_INFO_FIFO_FULL"/>
+ <value value="8" name="PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
+ <value value="9" name="PERF_VFD_NUM_ATTRIBUTES"/>
+ <value value="10" name="PERF_VFD_UPPER_SHADER_FIBERS"/>
+ <value value="11" name="PERF_VFD_LOWER_SHADER_FIBERS"/>
+ <value value="12" name="PERF_VFD_MODE_0_FIBERS"/>
+ <value value="13" name="PERF_VFD_MODE_1_FIBERS"/>
+ <value value="14" name="PERF_VFD_MODE_2_FIBERS"/>
+ <value value="15" name="PERF_VFD_MODE_3_FIBERS"/>
+ <value value="16" name="PERF_VFD_MODE_4_FIBERS"/>
+ <value value="17" name="PERF_VFD_TOTAL_VERTICES"/>
+ <value value="18" name="PERF_VFDP_STALL_CYCLES_VFD"/>
+ <value value="19" name="PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
+ <value value="20" name="PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
+ <value value="21" name="PERF_VFDP_STARVE_CYCLES_PC"/>
+ <value value="22" name="PERF_VFDP_VS_STAGE_WAVES"/>
+</enum>
+
+<enum name="a6xx_hlsq_perfcounter_select">
+ <value value="0" name="PERF_HLSQ_BUSY_CYCLES"/>
+ <value value="1" name="PERF_HLSQ_STALL_CYCLES_UCHE"/>
+ <value value="2" name="PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
+ <value value="3" name="PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
+ <value value="4" name="PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
+ <value value="5" name="PERF_HLSQ_UCHE_LATENCY_COUNT"/>
+ <value value="6" name="PERF_HLSQ_FS_STAGE_1X_WAVES"/>
+ <value value="7" name="PERF_HLSQ_FS_STAGE_2X_WAVES"/>
+ <value value="8" name="PERF_HLSQ_QUADS"/>
+ <value value="9" name="PERF_HLSQ_CS_INVOCATIONS"/>
+ <value value="10" name="PERF_HLSQ_COMPUTE_DRAWCALLS"/>
+ <value value="11" name="PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
+ <value value="12" name="PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
+ <value value="13" name="PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
+ <value value="14" name="PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
+ <value value="15" name="PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
+ <value value="16" name="PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
+ <value value="17" name="PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
+ <value value="18" name="PERF_HLSQ_STALL_CYCLES_VPC"/>
+ <value value="19" name="PERF_HLSQ_PIXELS"/>
+ <value value="20" name="PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
+</enum>
+
+<enum name="a6xx_vpc_perfcounter_select">
+ <value value="0" name="PERF_VPC_BUSY_CYCLES"/>
+ <value value="1" name="PERF_VPC_WORKING_CYCLES"/>
+ <value value="2" name="PERF_VPC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="PERF_VPC_STALL_CYCLES_VFD_WACK"/>
+ <value value="4" name="PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
+ <value value="5" name="PERF_VPC_STALL_CYCLES_PC"/>
+ <value value="6" name="PERF_VPC_STALL_CYCLES_SP_LM"/>
+ <value value="7" name="PERF_VPC_STARVE_CYCLES_SP"/>
+ <value value="8" name="PERF_VPC_STARVE_CYCLES_LRZ"/>
+ <value value="9" name="PERF_VPC_PC_PRIMITIVES"/>
+ <value value="10" name="PERF_VPC_SP_COMPONENTS"/>
+ <value value="11" name="PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
+ <value value="12" name="PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
+ <value value="13" name="PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
+ <value value="14" name="PERF_VPC_LM_TRANSACTION"/>
+ <value value="15" name="PERF_VPC_STREAMOUT_TRANSACTION"/>
+ <value value="16" name="PERF_VPC_VS_BUSY_CYCLES"/>
+ <value value="17" name="PERF_VPC_PS_BUSY_CYCLES"/>
+ <value value="18" name="PERF_VPC_VS_WORKING_CYCLES"/>
+ <value value="19" name="PERF_VPC_PS_WORKING_CYCLES"/>
+ <value value="20" name="PERF_VPC_STARVE_CYCLES_RB"/>
+ <value value="21" name="PERF_VPC_NUM_VPCRAM_READ_POS"/>
+ <value value="22" name="PERF_VPC_WIT_FULL_CYCLES"/>
+ <value value="23" name="PERF_VPC_VPCRAM_FULL_CYCLES"/>
+ <value value="24" name="PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
+ <value value="25" name="PERF_VPC_NUM_VPCRAM_WRITE"/>
+ <value value="26" name="PERF_VPC_NUM_VPCRAM_READ_SO"/>
+ <value value="27" name="PERF_VPC_NUM_ATTR_REQ_LM"/>
+</enum>
+
+<enum name="a6xx_tse_perfcounter_select">
+ <value value="0" name="PERF_TSE_BUSY_CYCLES"/>
+ <value value="1" name="PERF_TSE_CLIPPING_CYCLES"/>
+ <value value="2" name="PERF_TSE_STALL_CYCLES_RAS"/>
+ <value value="3" name="PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
+ <value value="4" name="PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
+ <value value="5" name="PERF_TSE_STARVE_CYCLES_PC"/>
+ <value value="6" name="PERF_TSE_INPUT_PRIM"/>
+ <value value="7" name="PERF_TSE_INPUT_NULL_PRIM"/>
+ <value value="8" name="PERF_TSE_TRIVAL_REJ_PRIM"/>
+ <value value="9" name="PERF_TSE_CLIPPED_PRIM"/>
+ <value value="10" name="PERF_TSE_ZERO_AREA_PRIM"/>
+ <value value="11" name="PERF_TSE_FACENESS_CULLED_PRIM"/>
+ <value value="12" name="PERF_TSE_ZERO_PIXEL_PRIM"/>
+ <value value="13" name="PERF_TSE_OUTPUT_NULL_PRIM"/>
+ <value value="14" name="PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
+ <value value="15" name="PERF_TSE_CINVOCATION"/>
+ <value value="16" name="PERF_TSE_CPRIMITIVES"/>
+ <value value="17" name="PERF_TSE_2D_INPUT_PRIM"/>
+ <value value="18" name="PERF_TSE_2D_ALIVE_CYCLES"/>
+ <value value="19" name="PERF_TSE_CLIP_PLANES"/>
+</enum>
+
+<enum name="a6xx_ras_perfcounter_select">
+ <value value="0" name="PERF_RAS_BUSY_CYCLES"/>
+ <value value="1" name="PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
+ <value value="2" name="PERF_RAS_STALL_CYCLES_LRZ"/>
+ <value value="3" name="PERF_RAS_STARVE_CYCLES_TSE"/>
+ <value value="4" name="PERF_RAS_SUPER_TILES"/>
+ <value value="5" name="PERF_RAS_8X4_TILES"/>
+ <value value="6" name="PERF_RAS_MASKGEN_ACTIVE"/>
+ <value value="7" name="PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
+ <value value="8" name="PERF_RAS_FULLY_COVERED_8X4_TILES"/>
+ <value value="9" name="PERF_RAS_PRIM_KILLED_INVISILBE"/>
+ <value value="10" name="PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
+ <value value="11" name="PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
+ <value value="12" name="PERF_RAS_BLOCKS"/>
+</enum>
+
+<enum name="a6xx_uche_perfcounter_select">
+ <value value="0" name="PERF_UCHE_BUSY_CYCLES"/>
+ <value value="1" name="PERF_UCHE_STALL_CYCLES_ARBITER"/>
+ <value value="2" name="PERF_UCHE_VBIF_LATENCY_CYCLES"/>
+ <value value="3" name="PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
+ <value value="4" name="PERF_UCHE_VBIF_READ_BEATS_TP"/>
+ <value value="5" name="PERF_UCHE_VBIF_READ_BEATS_VFD"/>
+ <value value="6" name="PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
+ <value value="7" name="PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
+ <value value="8" name="PERF_UCHE_VBIF_READ_BEATS_SP"/>
+ <value value="9" name="PERF_UCHE_READ_REQUESTS_TP"/>
+ <value value="10" name="PERF_UCHE_READ_REQUESTS_VFD"/>
+ <value value="11" name="PERF_UCHE_READ_REQUESTS_HLSQ"/>
+ <value value="12" name="PERF_UCHE_READ_REQUESTS_LRZ"/>
+ <value value="13" name="PERF_UCHE_READ_REQUESTS_SP"/>
+ <value value="14" name="PERF_UCHE_WRITE_REQUESTS_LRZ"/>
+ <value value="15" name="PERF_UCHE_WRITE_REQUESTS_SP"/>
+ <value value="16" name="PERF_UCHE_WRITE_REQUESTS_VPC"/>
+ <value value="17" name="PERF_UCHE_WRITE_REQUESTS_VSC"/>
+ <value value="18" name="PERF_UCHE_EVICTS"/>
+ <value value="19" name="PERF_UCHE_BANK_REQ0"/>
+ <value value="20" name="PERF_UCHE_BANK_REQ1"/>
+ <value value="21" name="PERF_UCHE_BANK_REQ2"/>
+ <value value="22" name="PERF_UCHE_BANK_REQ3"/>
+ <value value="23" name="PERF_UCHE_BANK_REQ4"/>
+ <value value="24" name="PERF_UCHE_BANK_REQ5"/>
+ <value value="25" name="PERF_UCHE_BANK_REQ6"/>
+ <value value="26" name="PERF_UCHE_BANK_REQ7"/>
+ <value value="27" name="PERF_UCHE_VBIF_READ_BEATS_CH0"/>
+ <value value="28" name="PERF_UCHE_VBIF_READ_BEATS_CH1"/>
+ <value value="29" name="PERF_UCHE_GMEM_READ_BEATS"/>
+ <value value="30" name="PERF_UCHE_TPH_REF_FULL"/>
+ <value value="31" name="PERF_UCHE_TPH_VICTIM_FULL"/>
+ <value value="32" name="PERF_UCHE_TPH_EXT_FULL"/>
+ <value value="33" name="PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
+ <value value="34" name="PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
+ <value value="35" name="PERF_UCHE_DCMP_LATENCY_CYCLES"/>
+ <value value="36" name="PERF_UCHE_VBIF_READ_BEATS_PC"/>
+ <value value="37" name="PERF_UCHE_READ_REQUESTS_PC"/>
+ <value value="38" name="PERF_UCHE_RAM_READ_REQ"/>
+ <value value="39" name="PERF_UCHE_RAM_WRITE_REQ"/>
+</enum>
+
+<enum name="a6xx_tp_perfcounter_select">
+ <value value="0" name="PERF_TP_BUSY_CYCLES"/>
+ <value value="1" name="PERF_TP_STALL_CYCLES_UCHE"/>
+ <value value="2" name="PERF_TP_LATENCY_CYCLES"/>
+ <value value="3" name="PERF_TP_LATENCY_TRANS"/>
+ <value value="4" name="PERF_TP_FLAG_CACHE_REQUEST_SAMPLES"/>
+ <value value="5" name="PERF_TP_FLAG_CACHE_REQUEST_LATENCY"/>
+ <value value="6" name="PERF_TP_L1_CACHELINE_REQUESTS"/>
+ <value value="7" name="PERF_TP_L1_CACHELINE_MISSES"/>
+ <value value="8" name="PERF_TP_SP_TP_TRANS"/>
+ <value value="9" name="PERF_TP_TP_SP_TRANS"/>
+ <value value="10" name="PERF_TP_OUTPUT_PIXELS"/>
+ <value value="11" name="PERF_TP_FILTER_WORKLOAD_16BIT"/>
+ <value value="12" name="PERF_TP_FILTER_WORKLOAD_32BIT"/>
+ <value value="13" name="PERF_TP_QUADS_RECEIVED"/>
+ <value value="14" name="PERF_TP_QUADS_OFFSET"/>
+ <value value="15" name="PERF_TP_QUADS_SHADOW"/>
+ <value value="16" name="PERF_TP_QUADS_ARRAY"/>
+ <value value="17" name="PERF_TP_QUADS_GRADIENT"/>
+ <value value="18" name="PERF_TP_QUADS_1D"/>
+ <value value="19" name="PERF_TP_QUADS_2D"/>
+ <value value="20" name="PERF_TP_QUADS_BUFFER"/>
+ <value value="21" name="PERF_TP_QUADS_3D"/>
+ <value value="22" name="PERF_TP_QUADS_CUBE"/>
+ <value value="23" name="PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
+ <value value="24" name="PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
+ <value value="25" name="PERF_TP_OUTPUT_PIXELS_POINT"/>
+ <value value="26" name="PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="27" name="PERF_TP_OUTPUT_PIXELS_MIP"/>
+ <value value="28" name="PERF_TP_OUTPUT_PIXELS_ANISO"/>
+ <value value="29" name="PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
+ <value value="30" name="PERF_TP_FLAG_CACHE_REQUESTS"/>
+ <value value="31" name="PERF_TP_FLAG_CACHE_MISSES"/>
+ <value value="32" name="PERF_TP_L1_5_L2_REQUESTS"/>
+ <value value="33" name="PERF_TP_2D_OUTPUT_PIXELS"/>
+ <value value="34" name="PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
+ <value value="35" name="PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="36" name="PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
+ <value value="37" name="PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
+ <value value="38" name="PERF_TP_TPA2TPC_TRANS"/>
+ <value value="39" name="PERF_TP_L1_MISSES_ASTC_1TILE"/>
+ <value value="40" name="PERF_TP_L1_MISSES_ASTC_2TILE"/>
+ <value value="41" name="PERF_TP_L1_MISSES_ASTC_4TILE"/>
+ <value value="42" name="PERF_TP_L1_5_L2_COMPRESS_REQS"/>
+ <value value="43" name="PERF_TP_L1_5_L2_COMPRESS_MISS"/>
+ <value value="44" name="PERF_TP_L1_BANK_CONFLICT"/>
+ <value value="45" name="PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
+ <value value="46" name="PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
+ <value value="47" name="PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
+ <value value="48" name="PERF_TP_FRONTEND_WORKING_CYCLES"/>
+ <value value="49" name="PERF_TP_L1_TAG_WORKING_CYCLES"/>
+ <value value="50" name="PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
+ <value value="51" name="PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
+ <value value="52" name="PERF_TP_BACKEND_WORKING_CYCLES"/>
+ <value value="53" name="PERF_TP_FLAG_CACHE_WORKING_CYCLES"/>
+ <value value="54" name="PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
+ <value value="55" name="PERF_TP_STARVE_CYCLES_SP"/>
+ <value value="56" name="PERF_TP_STARVE_CYCLES_UCHE"/>
+</enum>
+
+<enum name="a6xx_sp_perfcounter_select">
+ <value value="0" name="PERF_SP_BUSY_CYCLES"/>
+ <value value="1" name="PERF_SP_ALU_WORKING_CYCLES"/>
+ <value value="2" name="PERF_SP_EFU_WORKING_CYCLES"/>
+ <value value="3" name="PERF_SP_STALL_CYCLES_VPC"/>
+ <value value="4" name="PERF_SP_STALL_CYCLES_TP"/>
+ <value value="5" name="PERF_SP_STALL_CYCLES_UCHE"/>
+ <value value="6" name="PERF_SP_STALL_CYCLES_RB"/>
+ <value value="7" name="PERF_SP_NON_EXECUTION_CYCLES"/>
+ <value value="8" name="PERF_SP_WAVE_CONTEXTS"/>
+ <value value="9" name="PERF_SP_WAVE_CONTEXT_CYCLES"/>
+ <value value="10" name="PERF_SP_FS_STAGE_WAVE_CYCLES"/>
+ <value value="11" name="PERF_SP_FS_STAGE_WAVE_SAMPLES"/>
+ <value value="12" name="PERF_SP_VS_STAGE_WAVE_CYCLES"/>
+ <value value="13" name="PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
+ <value value="14" name="PERF_SP_FS_STAGE_DURATION_CYCLES"/>
+ <value value="15" name="PERF_SP_VS_STAGE_DURATION_CYCLES"/>
+ <value value="16" name="PERF_SP_WAVE_CTRL_CYCLES"/>
+ <value value="17" name="PERF_SP_WAVE_LOAD_CYCLES"/>
+ <value value="18" name="PERF_SP_WAVE_EMIT_CYCLES"/>
+ <value value="19" name="PERF_SP_WAVE_NOP_CYCLES"/>
+ <value value="20" name="PERF_SP_WAVE_WAIT_CYCLES"/>
+ <value value="21" name="PERF_SP_WAVE_FETCH_CYCLES"/>
+ <value value="22" name="PERF_SP_WAVE_IDLE_CYCLES"/>
+ <value value="23" name="PERF_SP_WAVE_END_CYCLES"/>
+ <value value="24" name="PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
+ <value value="25" name="PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
+ <value value="26" name="PERF_SP_WAVE_JOIN_CYCLES"/>
+ <value value="27" name="PERF_SP_LM_LOAD_INSTRUCTIONS"/>
+ <value value="28" name="PERF_SP_LM_STORE_INSTRUCTIONS"/>
+ <value value="29" name="PERF_SP_LM_ATOMICS"/>
+ <value value="30" name="PERF_SP_GM_LOAD_INSTRUCTIONS"/>
+ <value value="31" name="PERF_SP_GM_STORE_INSTRUCTIONS"/>
+ <value value="32" name="PERF_SP_GM_ATOMICS"/>
+ <value value="33" name="PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="34" name="PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="35" name="PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="36" name="PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="37" name="PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="38" name="PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
+ <value value="39" name="PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="40" name="PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="41" name="PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="42" name="PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
+ <value value="43" name="PERF_SP_VS_INSTRUCTIONS"/>
+ <value value="44" name="PERF_SP_FS_INSTRUCTIONS"/>
+ <value value="45" name="PERF_SP_ADDR_LOCK_COUNT"/>
+ <value value="46" name="PERF_SP_UCHE_READ_TRANS"/>
+ <value value="47" name="PERF_SP_UCHE_WRITE_TRANS"/>
+ <value value="48" name="PERF_SP_EXPORT_VPC_TRANS"/>
+ <value value="49" name="PERF_SP_EXPORT_RB_TRANS"/>
+ <value value="50" name="PERF_SP_PIXELS_KILLED"/>
+ <value value="51" name="PERF_SP_ICL1_REQUESTS"/>
+ <value value="52" name="PERF_SP_ICL1_MISSES"/>
+ <value value="53" name="PERF_SP_HS_INSTRUCTIONS"/>
+ <value value="54" name="PERF_SP_DS_INSTRUCTIONS"/>
+ <value value="55" name="PERF_SP_GS_INSTRUCTIONS"/>
+ <value value="56" name="PERF_SP_CS_INSTRUCTIONS"/>
+ <value value="57" name="PERF_SP_GPR_READ"/>
+ <value value="58" name="PERF_SP_GPR_WRITE"/>
+ <value value="59" name="PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="60" name="PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="61" name="PERF_SP_LM_BANK_CONFLICTS"/>
+ <value value="62" name="PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
+ <value value="63" name="PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
+ <value value="64" name="PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
+ <value value="65" name="PERF_SP_LM_WORKING_CYCLES"/>
+ <value value="66" name="PERF_SP_DISPATCHER_WORKING_CYCLES"/>
+ <value value="67" name="PERF_SP_SEQUENCER_WORKING_CYCLES"/>
+ <value value="68" name="PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
+ <value value="69" name="PERF_SP_STARVE_CYCLES_HLSQ"/>
+ <value value="70" name="PERF_SP_NON_EXECUTION_LS_CYCLES"/>
+ <value value="71" name="PERF_SP_WORKING_EU"/>
+ <value value="72" name="PERF_SP_ANY_EU_WORKING"/>
+ <value value="73" name="PERF_SP_WORKING_EU_FS_STAGE"/>
+ <value value="74" name="PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
+ <value value="75" name="PERF_SP_WORKING_EU_VS_STAGE"/>
+ <value value="76" name="PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
+ <value value="77" name="PERF_SP_WORKING_EU_CS_STAGE"/>
+ <value value="78" name="PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
+ <value value="79" name="PERF_SP_GPR_READ_PREFETCH"/>
+ <value value="80" name="PERF_SP_GPR_READ_CONFLICT"/>
+ <value value="81" name="PERF_SP_GPR_WRITE_CONFLICT"/>
+ <value value="82" name="PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
+ <value value="83" name="PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
+ <value value="84" name="PERF_SP_EXECUTABLE_WAVES"/>
+</enum>
+
+<enum name="a6xx_rb_perfcounter_select">
+ <value value="0" name="PERF_RB_BUSY_CYCLES"/>
+ <value value="1" name="PERF_RB_STALL_CYCLES_HLSQ"/>
+ <value value="2" name="PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
+ <value value="3" name="PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
+ <value value="4" name="PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
+ <value value="5" name="PERF_RB_STARVE_CYCLES_SP"/>
+ <value value="6" name="PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
+ <value value="7" name="PERF_RB_STARVE_CYCLES_CCU"/>
+ <value value="8" name="PERF_RB_STARVE_CYCLES_Z_PLANE"/>
+ <value value="9" name="PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
+ <value value="10" name="PERF_RB_Z_WORKLOAD"/>
+ <value value="11" name="PERF_RB_HLSQ_ACTIVE"/>
+ <value value="12" name="PERF_RB_Z_READ"/>
+ <value value="13" name="PERF_RB_Z_WRITE"/>
+ <value value="14" name="PERF_RB_C_READ"/>
+ <value value="15" name="PERF_RB_C_WRITE"/>
+ <value value="16" name="PERF_RB_TOTAL_PASS"/>
+ <value value="17" name="PERF_RB_Z_PASS"/>
+ <value value="18" name="PERF_RB_Z_FAIL"/>
+ <value value="19" name="PERF_RB_S_FAIL"/>
+ <value value="20" name="PERF_RB_BLENDED_FXP_COMPONENTS"/>
+ <value value="21" name="PERF_RB_BLENDED_FP16_COMPONENTS"/>
+ <value value="22" name="PERF_RB_PS_INVOCATIONS"/>
+ <value value="23" name="PERF_RB_2D_ALIVE_CYCLES"/>
+ <value value="24" name="PERF_RB_2D_STALL_CYCLES_A2D"/>
+ <value value="25" name="PERF_RB_2D_STARVE_CYCLES_SRC"/>
+ <value value="26" name="PERF_RB_2D_STARVE_CYCLES_SP"/>
+ <value value="27" name="PERF_RB_2D_STARVE_CYCLES_DST"/>
+ <value value="28" name="PERF_RB_2D_VALID_PIXELS"/>
+ <value value="29" name="PERF_RB_3D_PIXELS"/>
+ <value value="30" name="PERF_RB_BLENDER_WORKING_CYCLES"/>
+ <value value="31" name="PERF_RB_ZPROC_WORKING_CYCLES"/>
+ <value value="32" name="PERF_RB_CPROC_WORKING_CYCLES"/>
+ <value value="33" name="PERF_RB_SAMPLER_WORKING_CYCLES"/>
+ <value value="34" name="PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
+ <value value="35" name="PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
+ <value value="36" name="PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
+ <value value="37" name="PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
+ <value value="38" name="PERF_RB_STALL_CYCLES_VPC"/>
+ <value value="39" name="PERF_RB_2D_INPUT_TRANS"/>
+ <value value="40" name="PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
+ <value value="41" name="PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
+ <value value="42" name="PERF_RB_BLENDED_FP32_COMPONENTS"/>
+ <value value="43" name="PERF_RB_COLOR_PIX_TILES"/>
+ <value value="44" name="PERF_RB_STALL_CYCLES_CCU"/>
+ <value value="45" name="PERF_RB_EARLY_Z_ARB3_GRANT"/>
+ <value value="46" name="PERF_RB_LATE_Z_ARB3_GRANT"/>
+ <value value="47" name="PERF_RB_EARLY_Z_SKIP_GRANT"/>
+</enum>
+
+<enum name="a6xx_vsc_perfcounter_select">
+ <value value="0" name="PERF_VSC_BUSY_CYCLES"/>
+ <value value="1" name="PERF_VSC_WORKING_CYCLES"/>
+ <value value="2" name="PERF_VSC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="PERF_VSC_EOT_NUM"/>
+ <value value="4" name="PERF_VSC_INPUT_TILES"/>
+</enum>
+
+<enum name="a6xx_ccu_perfcounter_select">
+ <value value="0" name="PERF_CCU_BUSY_CYCLES"/>
+ <value value="1" name="PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
+ <value value="2" name="PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
+ <value value="3" name="PERF_CCU_STARVE_CYCLES_FLAG_RETURN"/>
+ <value value="4" name="PERF_CCU_DEPTH_BLOCKS"/>
+ <value value="5" name="PERF_CCU_COLOR_BLOCKS"/>
+ <value value="6" name="PERF_CCU_DEPTH_BLOCK_HIT"/>
+ <value value="7" name="PERF_CCU_COLOR_BLOCK_HIT"/>
+ <value value="8" name="PERF_CCU_PARTIAL_BLOCK_READ"/>
+ <value value="9" name="PERF_CCU_GMEM_READ"/>
+ <value value="10" name="PERF_CCU_GMEM_WRITE"/>
+ <value value="11" name="PERF_CCU_DEPTH_READ_FLAG0_COUNT"/>
+ <value value="12" name="PERF_CCU_DEPTH_READ_FLAG1_COUNT"/>
+ <value value="13" name="PERF_CCU_DEPTH_READ_FLAG2_COUNT"/>
+ <value value="14" name="PERF_CCU_DEPTH_READ_FLAG3_COUNT"/>
+ <value value="15" name="PERF_CCU_DEPTH_READ_FLAG4_COUNT"/>
+ <value value="16" name="PERF_CCU_DEPTH_READ_FLAG5_COUNT"/>
+ <value value="17" name="PERF_CCU_DEPTH_READ_FLAG6_COUNT"/>
+ <value value="18" name="PERF_CCU_DEPTH_READ_FLAG8_COUNT"/>
+ <value value="19" name="PERF_CCU_COLOR_READ_FLAG0_COUNT"/>
+ <value value="20" name="PERF_CCU_COLOR_READ_FLAG1_COUNT"/>
+ <value value="21" name="PERF_CCU_COLOR_READ_FLAG2_COUNT"/>
+ <value value="22" name="PERF_CCU_COLOR_READ_FLAG3_COUNT"/>
+ <value value="23" name="PERF_CCU_COLOR_READ_FLAG4_COUNT"/>
+ <value value="24" name="PERF_CCU_COLOR_READ_FLAG5_COUNT"/>
+ <value value="25" name="PERF_CCU_COLOR_READ_FLAG6_COUNT"/>
+ <value value="26" name="PERF_CCU_COLOR_READ_FLAG8_COUNT"/>
+ <value value="27" name="PERF_CCU_2D_RD_REQ"/>
+ <value value="28" name="PERF_CCU_2D_WR_REQ"/>
+</enum>
+
+<enum name="a6xx_lrz_perfcounter_select">
+ <value value="0" name="PERF_LRZ_BUSY_CYCLES"/>
+ <value value="1" name="PERF_LRZ_STARVE_CYCLES_RAS"/>
+ <value value="2" name="PERF_LRZ_STALL_CYCLES_RB"/>
+ <value value="3" name="PERF_LRZ_STALL_CYCLES_VSC"/>
+ <value value="4" name="PERF_LRZ_STALL_CYCLES_VPC"/>
+ <value value="5" name="PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
+ <value value="6" name="PERF_LRZ_STALL_CYCLES_UCHE"/>
+ <value value="7" name="PERF_LRZ_LRZ_READ"/>
+ <value value="8" name="PERF_LRZ_LRZ_WRITE"/>
+ <value value="9" name="PERF_LRZ_READ_LATENCY"/>
+ <value value="10" name="PERF_LRZ_MERGE_CACHE_UPDATING"/>
+ <value value="11" name="PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
+ <value value="12" name="PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
+ <value value="13" name="PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
+ <value value="14" name="PERF_LRZ_FULL_8X8_TILES"/>
+ <value value="15" name="PERF_LRZ_PARTIAL_8X8_TILES"/>
+ <value value="16" name="PERF_LRZ_TILE_KILLED"/>
+ <value value="17" name="PERF_LRZ_TOTAL_PIXEL"/>
+ <value value="18" name="PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
+ <value value="19" name="PERF_LRZ_FULLY_COVERED_TILES"/>
+ <value value="20" name="PERF_LRZ_PARTIAL_COVERED_TILES"/>
+ <value value="21" name="PERF_LRZ_FEEDBACK_ACCEPT"/>
+ <value value="22" name="PERF_LRZ_FEEDBACK_DISCARD"/>
+ <value value="23" name="PERF_LRZ_FEEDBACK_STALL"/>
+ <value value="24" name="PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
+ <value value="25" name="PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
+ <value value="26" name="PERF_LRZ_STALL_CYCLES_VC"/>
+ <value value="27" name="PERF_LRZ_RAS_MASK_TRANS"/>
+</enum>
+
+<enum name="a6xx_cmp_perfcounter_select">
+ <value value="0" name="PERF_CMPDECMP_STALL_CYCLES_ARB"/>
+ <value value="1" name="PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
+ <value value="2" name="PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
+ <value value="3" name="PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
+ <value value="4" name="PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
+ <value value="5" name="PERF_CMPDECMP_VBIF_READ_REQUEST"/>
+ <value value="6" name="PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
+ <value value="7" name="PERF_CMPDECMP_VBIF_READ_DATA"/>
+ <value value="8" name="PERF_CMPDECMP_VBIF_WRITE_DATA"/>
+ <value value="9" name="PERF_CMPDECMP_FLAG_FETCH_CYCLES"/>
+ <value value="10" name="PERF_CMPDECMP_FLAG_FETCH_SAMPLES"/>
+ <value value="11" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
+ <value value="12" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
+ <value value="13" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
+ <value value="14" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
+ <value value="15" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
+ <value value="16" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
+ <value value="17" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
+ <value value="18" name="PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
+ <value value="19" name="PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
+ <value value="20" name="PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
+ <value value="21" name="PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
+ <value value="22" name="PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
+ <value value="23" name="PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
+ <value value="24" name="PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
+ <value value="25" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ"/>
+ <value value="26" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR"/>
+ <value value="27" name="PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN"/>
+ <value value="28" name="PERF_CMPDECMP_2D_RD_DATA"/>
+ <value value="29" name="PERF_CMPDECMP_2D_WR_DATA"/>
+ <value value="30" name="PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
+ <value value="31" name="PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
+ <value value="32" name="PERF_CMPDECMP_2D_OUTPUT_TRANS"/>
+ <value value="33" name="PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
+ <value value="34" name="PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
+ <value value="35" name="PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
+ <value value="36" name="PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
+ <value value="37" name="PERF_CMPDECMP_2D_BUSY_CYCLES"/>
+ <value value="38" name="PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES"/>
+ <value value="39" name="PERF_CMPDECMP_2D_PIXELS"/>
+</enum>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml b/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml
new file mode 100644
index 000000000000..661b0dd0f675
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml
@@ -0,0 +1,223 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a7xx_statetype_id">
+ <value value="0" name="A7XX_TP0_NCTX_REG"/>
+ <value value="1" name="A7XX_TP0_CTX0_3D_CVS_REG"/>
+ <value value="2" name="A7XX_TP0_CTX0_3D_CPS_REG"/>
+ <value value="3" name="A7XX_TP0_CTX1_3D_CVS_REG"/>
+ <value value="4" name="A7XX_TP0_CTX1_3D_CPS_REG"/>
+ <value value="5" name="A7XX_TP0_CTX2_3D_CPS_REG"/>
+ <value value="6" name="A7XX_TP0_CTX3_3D_CPS_REG"/>
+ <value value="9" name="A7XX_TP0_TMO_DATA"/>
+ <value value="10" name="A7XX_TP0_SMO_DATA"/>
+ <value value="11" name="A7XX_TP0_MIPMAP_BASE_DATA"/>
+ <value value="32" name="A7XX_SP_NCTX_REG"/>
+ <value value="33" name="A7XX_SP_CTX0_3D_CVS_REG"/>
+ <value value="34" name="A7XX_SP_CTX0_3D_CPS_REG"/>
+ <value value="35" name="A7XX_SP_CTX1_3D_CVS_REG"/>
+ <value value="36" name="A7XX_SP_CTX1_3D_CPS_REG"/>
+ <value value="37" name="A7XX_SP_CTX2_3D_CPS_REG"/>
+ <value value="38" name="A7XX_SP_CTX3_3D_CPS_REG"/>
+ <value value="39" name="A7XX_SP_INST_DATA"/>
+ <value value="40" name="A7XX_SP_INST_DATA_1"/>
+ <value value="41" name="A7XX_SP_LB_0_DATA"/>
+ <value value="42" name="A7XX_SP_LB_1_DATA"/>
+ <value value="43" name="A7XX_SP_LB_2_DATA"/>
+ <value value="44" name="A7XX_SP_LB_3_DATA"/>
+ <value value="45" name="A7XX_SP_LB_4_DATA"/>
+ <value value="46" name="A7XX_SP_LB_5_DATA"/>
+ <value value="47" name="A7XX_SP_LB_6_DATA"/>
+ <value value="48" name="A7XX_SP_LB_7_DATA"/>
+ <value value="49" name="A7XX_SP_CB_RAM"/>
+ <value value="50" name="A7XX_SP_LB_13_DATA"/>
+ <value value="51" name="A7XX_SP_LB_14_DATA"/>
+ <value value="52" name="A7XX_SP_INST_TAG"/>
+ <value value="53" name="A7XX_SP_INST_DATA_2"/>
+ <value value="54" name="A7XX_SP_TMO_TAG"/>
+ <value value="55" name="A7XX_SP_SMO_TAG"/>
+ <value value="56" name="A7XX_SP_STATE_DATA"/>
+ <value value="57" name="A7XX_SP_HWAVE_RAM"/>
+ <value value="58" name="A7XX_SP_L0_INST_BUF"/>
+ <value value="59" name="A7XX_SP_LB_8_DATA"/>
+ <value value="60" name="A7XX_SP_LB_9_DATA"/>
+ <value value="61" name="A7XX_SP_LB_10_DATA"/>
+ <value value="62" name="A7XX_SP_LB_11_DATA"/>
+ <value value="63" name="A7XX_SP_LB_12_DATA"/>
+ <value value="64" name="A7XX_HLSQ_DATAPATH_DSTR_META"/>
+ <value value="67" name="A7XX_HLSQ_L2STC_TAG_RAM"/>
+ <value value="68" name="A7XX_HLSQ_L2STC_INFO_CMD"/>
+ <value value="69" name="A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG"/>
+ <value value="70" name="A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG"/>
+ <value value="71" name="A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM"/>
+ <value value="72" name="A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM"/>
+ <value value="73" name="A7XX_HLSQ_CHUNK_CVS_RAM"/>
+ <value value="74" name="A7XX_HLSQ_CHUNK_CPS_RAM"/>
+ <value value="75" name="A7XX_HLSQ_CHUNK_CVS_RAM_TAG"/>
+ <value value="76" name="A7XX_HLSQ_CHUNK_CPS_RAM_TAG"/>
+ <value value="77" name="A7XX_HLSQ_ICB_CVS_CB_BASE_TAG"/>
+ <value value="78" name="A7XX_HLSQ_ICB_CPS_CB_BASE_TAG"/>
+ <value value="79" name="A7XX_HLSQ_CVS_MISC_RAM"/>
+ <value value="80" name="A7XX_HLSQ_CPS_MISC_RAM"/>
+ <value value="81" name="A7XX_HLSQ_CPS_MISC_RAM_1"/>
+ <value value="82" name="A7XX_HLSQ_INST_RAM"/>
+ <value value="83" name="A7XX_HLSQ_GFX_CVS_CONST_RAM"/>
+ <value value="84" name="A7XX_HLSQ_GFX_CPS_CONST_RAM"/>
+ <value value="85" name="A7XX_HLSQ_CVS_MISC_RAM_TAG"/>
+ <value value="86" name="A7XX_HLSQ_CPS_MISC_RAM_TAG"/>
+ <value value="87" name="A7XX_HLSQ_INST_RAM_TAG"/>
+ <value value="88" name="A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/>
+ <value value="89" name="A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/>
+ <value value="90" name="A7XX_HLSQ_GFX_LOCAL_MISC_RAM"/>
+ <value value="91" name="A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG"/>
+ <value value="92" name="A7XX_HLSQ_INST_RAM_1"/>
+ <value value="93" name="A7XX_HLSQ_STPROC_META"/>
+ <value value="94" name="A7XX_HLSQ_BV_BE_META"/>
+ <value value="95" name="A7XX_HLSQ_INST_RAM_2"/>
+ <value value="96" name="A7XX_HLSQ_DATAPATH_META"/>
+ <value value="97" name="A7XX_HLSQ_FRONTEND_META"/>
+ <value value="98" name="A7XX_HLSQ_INDIRECT_META"/>
+ <value value="99" name="A7XX_HLSQ_BACKEND_META"/>
+</enum>
+
+<enum name="a7xx_state_location">
+ <value value="0" name="A7XX_HLSQ_STATE"/>
+ <value value="1" name="A7XX_HLSQ_DP"/>
+ <value value="2" name="A7XX_SP_TOP"/>
+ <value value="3" name="A7XX_USPTP"/>
+ <value value="4" name="A7XX_HLSQ_DP_STR"/>
+</enum>
+
+<enum name="a7xx_pipe">
+ <value value="0" name="A7XX_PIPE_NONE"/>
+ <value value="1" name="A7XX_PIPE_BR"/>
+ <value value="2" name="A7XX_PIPE_BV"/>
+ <value value="3" name="A7XX_PIPE_LPAC"/>
+</enum>
+
+<enum name="a7xx_cluster">
+ <value value="0" name="A7XX_CLUSTER_NONE"/>
+ <value value="1" name="A7XX_CLUSTER_FE"/>
+ <value value="2" name="A7XX_CLUSTER_SP_VS"/>
+ <value value="3" name="A7XX_CLUSTER_PC_VS"/>
+ <value value="4" name="A7XX_CLUSTER_GRAS"/>
+ <value value="5" name="A7XX_CLUSTER_SP_PS"/>
+ <value value="6" name="A7XX_CLUSTER_VPC_PS"/>
+ <value value="7" name="A7XX_CLUSTER_PS"/>
+</enum>
+
+<enum name="a7xx_debugbus_id">
+ <value value="1" name="A7XX_DBGBUS_CP_0_0"/>
+ <value value="2" name="A7XX_DBGBUS_CP_0_1"/>
+ <value value="3" name="A7XX_DBGBUS_RBBM"/>
+ <value value="5" name="A7XX_DBGBUS_GBIF_GX"/>
+ <value value="6" name="A7XX_DBGBUS_GBIF_CX"/>
+ <value value="7" name="A7XX_DBGBUS_HLSQ"/>
+ <value value="9" name="A7XX_DBGBUS_UCHE_0"/>
+ <value value="10" name="A7XX_DBGBUS_UCHE_1"/>
+ <value value="13" name="A7XX_DBGBUS_TESS_BR"/>
+ <value value="14" name="A7XX_DBGBUS_TESS_BV"/>
+ <value value="17" name="A7XX_DBGBUS_PC_BR"/>
+ <value value="18" name="A7XX_DBGBUS_PC_BV"/>
+ <value value="21" name="A7XX_DBGBUS_VFDP_BR"/>
+ <value value="22" name="A7XX_DBGBUS_VFDP_BV"/>
+ <value value="25" name="A7XX_DBGBUS_VPC_BR"/>
+ <value value="26" name="A7XX_DBGBUS_VPC_BV"/>
+ <value value="29" name="A7XX_DBGBUS_TSE_BR"/>
+ <value value="30" name="A7XX_DBGBUS_TSE_BV"/>
+ <value value="33" name="A7XX_DBGBUS_RAS_BR"/>
+ <value value="34" name="A7XX_DBGBUS_RAS_BV"/>
+ <value value="37" name="A7XX_DBGBUS_VSC"/>
+ <value value="39" name="A7XX_DBGBUS_COM_0"/>
+ <value value="43" name="A7XX_DBGBUS_LRZ_BR"/>
+ <value value="44" name="A7XX_DBGBUS_LRZ_BV"/>
+ <value value="47" name="A7XX_DBGBUS_UFC_0"/>
+ <value value="48" name="A7XX_DBGBUS_UFC_1"/>
+ <value value="55" name="A7XX_DBGBUS_GMU_GX"/>
+ <value value="59" name="A7XX_DBGBUS_DBGC"/>
+ <value value="60" name="A7XX_DBGBUS_CX"/>
+ <value value="61" name="A7XX_DBGBUS_GMU_CX"/>
+ <value value="62" name="A7XX_DBGBUS_GPC_BR"/>
+ <value value="63" name="A7XX_DBGBUS_GPC_BV"/>
+ <value value="66" name="A7XX_DBGBUS_LARC"/>
+ <value value="68" name="A7XX_DBGBUS_HLSQ_SPTP"/>
+ <value value="70" name="A7XX_DBGBUS_RB_0"/>
+ <value value="71" name="A7XX_DBGBUS_RB_1"/>
+ <value value="72" name="A7XX_DBGBUS_RB_2"/>
+ <value value="73" name="A7XX_DBGBUS_RB_3"/>
+ <value value="74" name="A7XX_DBGBUS_RB_4"/>
+ <value value="75" name="A7XX_DBGBUS_RB_5"/>
+ <value value="102" name="A7XX_DBGBUS_UCHE_WRAPPER"/>
+ <value value="106" name="A7XX_DBGBUS_CCU_0"/>
+ <value value="107" name="A7XX_DBGBUS_CCU_1"/>
+ <value value="108" name="A7XX_DBGBUS_CCU_2"/>
+ <value value="109" name="A7XX_DBGBUS_CCU_3"/>
+ <value value="110" name="A7XX_DBGBUS_CCU_4"/>
+ <value value="111" name="A7XX_DBGBUS_CCU_5"/>
+ <value value="138" name="A7XX_DBGBUS_VFD_BR_0"/>
+ <value value="139" name="A7XX_DBGBUS_VFD_BR_1"/>
+ <value value="140" name="A7XX_DBGBUS_VFD_BR_2"/>
+ <value value="141" name="A7XX_DBGBUS_VFD_BR_3"/>
+ <value value="142" name="A7XX_DBGBUS_VFD_BR_4"/>
+ <value value="143" name="A7XX_DBGBUS_VFD_BR_5"/>
+ <value value="144" name="A7XX_DBGBUS_VFD_BR_6"/>
+ <value value="145" name="A7XX_DBGBUS_VFD_BR_7"/>
+ <value value="202" name="A7XX_DBGBUS_VFD_BV_0"/>
+ <value value="203" name="A7XX_DBGBUS_VFD_BV_1"/>
+ <value value="204" name="A7XX_DBGBUS_VFD_BV_2"/>
+ <value value="205" name="A7XX_DBGBUS_VFD_BV_3"/>
+ <value value="234" name="A7XX_DBGBUS_USP_0"/>
+ <value value="235" name="A7XX_DBGBUS_USP_1"/>
+ <value value="236" name="A7XX_DBGBUS_USP_2"/>
+ <value value="237" name="A7XX_DBGBUS_USP_3"/>
+ <value value="238" name="A7XX_DBGBUS_USP_4"/>
+ <value value="239" name="A7XX_DBGBUS_USP_5"/>
+ <value value="266" name="A7XX_DBGBUS_TP_0"/>
+ <value value="267" name="A7XX_DBGBUS_TP_1"/>
+ <value value="268" name="A7XX_DBGBUS_TP_2"/>
+ <value value="269" name="A7XX_DBGBUS_TP_3"/>
+ <value value="270" name="A7XX_DBGBUS_TP_4"/>
+ <value value="271" name="A7XX_DBGBUS_TP_5"/>
+ <value value="272" name="A7XX_DBGBUS_TP_6"/>
+ <value value="273" name="A7XX_DBGBUS_TP_7"/>
+ <value value="274" name="A7XX_DBGBUS_TP_8"/>
+ <value value="275" name="A7XX_DBGBUS_TP_9"/>
+ <value value="276" name="A7XX_DBGBUS_TP_10"/>
+ <value value="277" name="A7XX_DBGBUS_TP_11"/>
+ <value value="330" name="A7XX_DBGBUS_USPTP_0"/>
+ <value value="331" name="A7XX_DBGBUS_USPTP_1"/>
+ <value value="332" name="A7XX_DBGBUS_USPTP_2"/>
+ <value value="333" name="A7XX_DBGBUS_USPTP_3"/>
+ <value value="334" name="A7XX_DBGBUS_USPTP_4"/>
+ <value value="335" name="A7XX_DBGBUS_USPTP_5"/>
+ <value value="336" name="A7XX_DBGBUS_USPTP_6"/>
+ <value value="337" name="A7XX_DBGBUS_USPTP_7"/>
+ <value value="338" name="A7XX_DBGBUS_USPTP_8"/>
+ <value value="339" name="A7XX_DBGBUS_USPTP_9"/>
+ <value value="340" name="A7XX_DBGBUS_USPTP_10"/>
+ <value value="341" name="A7XX_DBGBUS_USPTP_11"/>
+ <value value="396" name="A7XX_DBGBUS_CCHE_0"/>
+ <value value="397" name="A7XX_DBGBUS_CCHE_1"/>
+ <value value="398" name="A7XX_DBGBUS_CCHE_2"/>
+ <value value="408" name="A7XX_DBGBUS_VPC_DSTR_0"/>
+ <value value="409" name="A7XX_DBGBUS_VPC_DSTR_1"/>
+ <value value="410" name="A7XX_DBGBUS_VPC_DSTR_2"/>
+ <value value="411" name="A7XX_DBGBUS_HLSQ_DP_STR_0"/>
+ <value value="412" name="A7XX_DBGBUS_HLSQ_DP_STR_1"/>
+ <value value="413" name="A7XX_DBGBUS_HLSQ_DP_STR_2"/>
+ <value value="414" name="A7XX_DBGBUS_HLSQ_DP_STR_3"/>
+ <value value="415" name="A7XX_DBGBUS_HLSQ_DP_STR_4"/>
+ <value value="416" name="A7XX_DBGBUS_HLSQ_DP_STR_5"/>
+ <value value="443" name="A7XX_DBGBUS_UFC_DSTR_0"/>
+ <value value="444" name="A7XX_DBGBUS_UFC_DSTR_1"/>
+ <value value="445" name="A7XX_DBGBUS_UFC_DSTR_2"/>
+ <value value="446" name="A7XX_DBGBUS_CGC_SUBCORE"/>
+ <value value="447" name="A7XX_DBGBUS_CGC_CORE"/>
+</enum>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml b/drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml
new file mode 100644
index 000000000000..9bf78b0a854b
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml
@@ -0,0 +1,1030 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a7xx_cp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CP_ALWAYS_COUNT"/>
+ <value value="1" name="A7XX_PERF_CP_BUSY_GFX_CORE_IDLE"/>
+ <value value="2" name="A7XX_PERF_CP_BUSY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_CP_NUM_PREEMPTIONS"/>
+ <value value="4" name="A7XX_PERF_CP_PREEMPTION_REACTION_DELAY"/>
+ <value value="5" name="A7XX_PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
+ <value value="6" name="A7XX_PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
+ <value value="7" name="A7XX_PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
+ <value value="8" name="A7XX_PERF_CP_PREDICATED_DRAWS_KILLED"/>
+ <value value="9" name="A7XX_PERF_CP_MODE_SWITCH"/>
+ <value value="10" name="A7XX_PERF_CP_ZPASS_DONE"/>
+ <value value="11" name="A7XX_PERF_CP_CONTEXT_DONE"/>
+ <value value="12" name="A7XX_PERF_CP_CACHE_FLUSH"/>
+ <value value="13" name="A7XX_PERF_CP_LONG_PREEMPTIONS"/>
+ <value value="14" name="A7XX_PERF_CP_SQE_I_CACHE_STARVE"/>
+ <value value="15" name="A7XX_PERF_CP_SQE_IDLE"/>
+ <value value="16" name="A7XX_PERF_CP_SQE_PM4_STARVE_RB_IB"/>
+ <value value="17" name="A7XX_PERF_CP_SQE_PM4_STARVE_SDS"/>
+ <value value="18" name="A7XX_PERF_CP_SQE_MRB_STARVE"/>
+ <value value="19" name="A7XX_PERF_CP_SQE_RRB_STARVE"/>
+ <value value="20" name="A7XX_PERF_CP_SQE_VSD_STARVE"/>
+ <value value="21" name="A7XX_PERF_CP_VSD_DECODE_STARVE"/>
+ <value value="22" name="A7XX_PERF_CP_SQE_PIPE_OUT_STALL"/>
+ <value value="23" name="A7XX_PERF_CP_SQE_SYNC_STALL"/>
+ <value value="24" name="A7XX_PERF_CP_SQE_PM4_WFI_STALL"/>
+ <value value="25" name="A7XX_PERF_CP_SQE_SYS_WFI_STALL"/>
+ <value value="26" name="A7XX_PERF_CP_SQE_T4_EXEC"/>
+ <value value="27" name="A7XX_PERF_CP_SQE_LOAD_STATE_EXEC"/>
+ <value value="28" name="A7XX_PERF_CP_SQE_SAVE_SDS_STATE"/>
+ <value value="29" name="A7XX_PERF_CP_SQE_DRAW_EXEC"/>
+ <value value="30" name="A7XX_PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
+ <value value="31" name="A7XX_PERF_CP_SQE_EXEC_PROFILED"/>
+ <value value="32" name="A7XX_PERF_CP_MEMORY_POOL_EMPTY"/>
+ <value value="33" name="A7XX_PERF_CP_MEMORY_POOL_SYNC_STALL"/>
+ <value value="34" name="A7XX_PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
+ <value value="35" name="A7XX_PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
+ <value value="36" name="A7XX_PERF_CP_AHB_STALL_SQE_GMU"/>
+ <value value="37" name="A7XX_PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
+ <value value="38" name="A7XX_PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
+ <value value="39" name="A7XX_PERF_CP_CLUSTER0_EMPTY"/>
+ <value value="40" name="A7XX_PERF_CP_CLUSTER1_EMPTY"/>
+ <value value="41" name="A7XX_PERF_CP_CLUSTER2_EMPTY"/>
+ <value value="42" name="A7XX_PERF_CP_CLUSTER3_EMPTY"/>
+ <value value="43" name="A7XX_PERF_CP_CLUSTER4_EMPTY"/>
+ <value value="44" name="A7XX_PERF_CP_CLUSTER5_EMPTY"/>
+ <value value="45" name="A7XX_PERF_CP_PM4_DATA"/>
+ <value value="46" name="A7XX_PERF_CP_PM4_HEADERS"/>
+ <value value="47" name="A7XX_PERF_CP_VBIF_READ_BEATS"/>
+ <value value="48" name="A7XX_PERF_CP_VBIF_WRITE_BEATS"/>
+ <value value="49" name="A7XX_PERF_CP_SQE_INSTR_COUNTER"/>
+ <value value="50" name="A7XX_PERF_CP_RESERVED_50"/>
+ <value value="51" name="A7XX_PERF_CP_RESERVED_51"/>
+ <value value="52" name="A7XX_PERF_CP_RESERVED_52"/>
+ <value value="53" name="A7XX_PERF_CP_RESERVED_53"/>
+ <value value="54" name="A7XX_PERF_CP_RESERVED_54"/>
+ <value value="55" name="A7XX_PERF_CP_RESERVED_55"/>
+ <value value="56" name="A7XX_PERF_CP_RESERVED_56"/>
+ <value value="57" name="A7XX_PERF_CP_RESERVED_57"/>
+ <value value="58" name="A7XX_PERF_CP_RESERVED_58"/>
+ <value value="59" name="A7XX_PERF_CP_RESERVED_59"/>
+ <value value="60" name="A7XX_PERF_CP_CLUSTER0_FULL"/>
+ <value value="61" name="A7XX_PERF_CP_CLUSTER1_FULL"/>
+ <value value="62" name="A7XX_PERF_CP_CLUSTER2_FULL"/>
+ <value value="63" name="A7XX_PERF_CP_CLUSTER3_FULL"/>
+ <value value="64" name="A7XX_PERF_CP_CLUSTER4_FULL"/>
+ <value value="65" name="A7XX_PERF_CP_CLUSTER5_FULL"/>
+ <value value="66" name="A7XX_PERF_CP_CLUSTER6_FULL"/>
+ <value value="67" name="A7XX_PERF_CP_CLUSTER6_EMPTY"/>
+ <value value="68" name="A7XX_PERF_CP_ICACHE_MISSES"/>
+ <value value="69" name="A7XX_PERF_CP_ICACHE_HITS"/>
+ <value value="70" name="A7XX_PERF_CP_ICACHE_STALL"/>
+ <value value="71" name="A7XX_PERF_CP_DCACHE_MISSES"/>
+ <value value="72" name="A7XX_PERF_CP_DCACHE_HITS"/>
+ <value value="73" name="A7XX_PERF_CP_DCACHE_STALLS"/>
+ <value value="74" name="A7XX_PERF_CP_AQE_SQE_STALL"/>
+ <value value="75" name="A7XX_PERF_CP_SQE_AQE_STARVE"/>
+ <value value="76" name="A7XX_PERF_CP_PREEMPT_LATENCY"/>
+ <value value="77" name="A7XX_PERF_CP_SQE_MD8_STALL_CYCLES"/>
+ <value value="78" name="A7XX_PERF_CP_SQE_MESH_EXEC_CYCLES"/>
+ <value value="79" name="A7XX_PERF_CP_AQE_NUM_AS_CHUNKS"/>
+ <value value="80" name="A7XX_PERF_CP_AQE_NUM_MS_CHUNKS"/>
+</enum>
+
+<enum name="a7xx_rbbm_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RBBM_ALWAYS_COUNT"/>
+ <value value="1" name="A7XX_PERF_RBBM_ALWAYS_ON"/>
+ <value value="2" name="A7XX_PERF_RBBM_TSE_BUSY"/>
+ <value value="3" name="A7XX_PERF_RBBM_RAS_BUSY"/>
+ <value value="4" name="A7XX_PERF_RBBM_PC_DCALL_BUSY"/>
+ <value value="5" name="A7XX_PERF_RBBM_PC_VSD_BUSY"/>
+ <value value="6" name="A7XX_PERF_RBBM_STATUS_MASKED"/>
+ <value value="7" name="A7XX_PERF_RBBM_COM_BUSY"/>
+ <value value="8" name="A7XX_PERF_RBBM_DCOM_BUSY"/>
+ <value value="9" name="A7XX_PERF_RBBM_VBIF_BUSY"/>
+ <value value="10" name="A7XX_PERF_RBBM_VSC_BUSY"/>
+ <value value="11" name="A7XX_PERF_RBBM_TESS_BUSY"/>
+ <value value="12" name="A7XX_PERF_RBBM_UCHE_BUSY"/>
+ <value value="13" name="A7XX_PERF_RBBM_HLSQ_BUSY"/>
+</enum>
+
+<enum name="a7xx_pc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_PC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_PC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_PC_STALL_CYCLES_VFD"/>
+ <value value="3" name="A7XX_PERF_PC_RESERVED"/>
+ <value value="4" name="A7XX_PERF_PC_STALL_CYCLES_VPC"/>
+ <value value="5" name="A7XX_PERF_PC_STALL_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_PC_STALL_CYCLES_TESS"/>
+ <value value="7" name="A7XX_PERF_PC_STALL_CYCLES_VFD_ONLY"/>
+ <value value="8" name="A7XX_PERF_PC_STALL_CYCLES_VPC_ONLY"/>
+ <value value="9" name="A7XX_PERF_PC_PASS1_TF_STALL_CYCLES"/>
+ <value value="10" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
+ <value value="11" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
+ <value value="12" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
+ <value value="13" name="A7XX_PERF_PC_STARVE_CYCLES_DI"/>
+ <value value="14" name="A7XX_PERF_PC_VIS_STREAMS_LOADED"/>
+ <value value="15" name="A7XX_PERF_PC_INSTANCES"/>
+ <value value="16" name="A7XX_PERF_PC_VPC_PRIMITIVES"/>
+ <value value="17" name="A7XX_PERF_PC_DEAD_PRIM"/>
+ <value value="18" name="A7XX_PERF_PC_LIVE_PRIM"/>
+ <value value="19" name="A7XX_PERF_PC_VERTEX_HITS"/>
+ <value value="20" name="A7XX_PERF_PC_IA_VERTICES"/>
+ <value value="21" name="A7XX_PERF_PC_IA_PRIMITIVES"/>
+ <value value="22" name="A7XX_PERF_PC_RESERVED_22"/>
+ <value value="23" name="A7XX_PERF_PC_HS_INVOCATIONS"/>
+ <value value="24" name="A7XX_PERF_PC_DS_INVOCATIONS"/>
+ <value value="25" name="A7XX_PERF_PC_VS_INVOCATIONS"/>
+ <value value="26" name="A7XX_PERF_PC_GS_INVOCATIONS"/>
+ <value value="27" name="A7XX_PERF_PC_DS_PRIMITIVES"/>
+ <value value="28" name="A7XX_PERF_PC_3D_DRAWCALLS"/>
+ <value value="29" name="A7XX_PERF_PC_2D_DRAWCALLS"/>
+ <value value="30" name="A7XX_PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
+ <value value="31" name="A7XX_PERF_PC_TESS_BUSY_CYCLES"/>
+ <value value="32" name="A7XX_PERF_PC_TESS_WORKING_CYCLES"/>
+ <value value="33" name="A7XX_PERF_PC_TESS_STALL_CYCLES_PC"/>
+ <value value="34" name="A7XX_PERF_PC_TESS_STARVE_CYCLES_PC"/>
+ <value value="35" name="A7XX_PERF_PC_TESS_SINGLE_PRIM_CYCLES"/>
+ <value value="36" name="A7XX_PERF_PC_TESS_PC_UV_TRANS"/>
+ <value value="37" name="A7XX_PERF_PC_TESS_PC_UV_PATCHES"/>
+ <value value="38" name="A7XX_PERF_PC_TESS_FACTOR_TRANS"/>
+ <value value="39" name="A7XX_PERF_PC_TAG_CHECKED_VERTICES"/>
+ <value value="40" name="A7XX_PERF_PC_MESH_VS_WAVES"/>
+ <value value="41" name="A7XX_PERF_PC_MESH_DRAWS"/>
+ <value value="42" name="A7XX_PERF_PC_MESH_DEAD_DRAWS"/>
+ <value value="43" name="A7XX_PERF_PC_MESH_MVIS_EN_DRAWS"/>
+ <value value="44" name="A7XX_PERF_PC_MESH_DEAD_PRIM"/>
+ <value value="45" name="A7XX_PERF_PC_MESH_LIVE_PRIM"/>
+ <value value="46" name="A7XX_PERF_PC_MESH_PA_EN_PRIM"/>
+ <value value="47" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_MVIS_STREAM"/>
+ <value value="48" name="A7XX_PERF_PC_STARVE_CYCLES_PREDRAW"/>
+ <value value="49" name="A7XX_PERF_PC_STALL_CYCLES_COMPUTE_GFX"/>
+ <value value="50" name="A7XX_PERF_PC_STALL_CYCLES_GFX_COMPUTE"/>
+ <value value="51" name="A7XX_PERF_PC_TESS_PC_MULTI_PATCH_TRANS"/>
+</enum>
+
+<enum name="a7xx_vfd_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VFD_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VFD_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
+ <value value="3" name="A7XX_PERF_VFD_STALL_CYCLES_SP_INFO"/>
+ <value value="4" name="A7XX_PERF_VFD_STALL_CYCLES_SP_ATTR"/>
+ <value value="5" name="A7XX_PERF_VFD_STARVE_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_VFD_RBUFFER_FULL"/>
+ <value value="7" name="A7XX_PERF_VFD_ATTR_INFO_FIFO_FULL"/>
+ <value value="8" name="A7XX_PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
+ <value value="9" name="A7XX_PERF_VFD_NUM_ATTRIBUTES"/>
+ <value value="10" name="A7XX_PERF_VFD_UPPER_SHADER_FIBERS"/>
+ <value value="11" name="A7XX_PERF_VFD_LOWER_SHADER_FIBERS"/>
+ <value value="12" name="A7XX_PERF_VFD_MODE_0_FIBERS"/>
+ <value value="13" name="A7XX_PERF_VFD_MODE_1_FIBERS"/>
+ <value value="14" name="A7XX_PERF_VFD_MODE_2_FIBERS"/>
+ <value value="15" name="A7XX_PERF_VFD_MODE_3_FIBERS"/>
+ <value value="16" name="A7XX_PERF_VFD_MODE_4_FIBERS"/>
+ <value value="17" name="A7XX_PERF_VFD_TOTAL_VERTICES"/>
+ <value value="18" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD"/>
+ <value value="19" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
+ <value value="20" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
+ <value value="21" name="A7XX_PERF_VFDP_STARVE_CYCLES_PC"/>
+ <value value="22" name="A7XX_PERF_VFDP_VS_STAGE_WAVES"/>
+ <value value="23" name="A7XX_PERF_VFD_STALL_CYCLES_PRG_END_FE"/>
+ <value value="24" name="A7XX_PERF_VFD_STALL_CYCLES_CBSYNC"/>
+</enum>
+
+<enum name="a7xx_hlsq_perfcounter_select">
+ <value value="0" name="A7XX_PERF_HLSQ_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_HLSQ_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
+ <value value="3" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
+ <value value="4" name="A7XX_PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
+ <value value="5" name="A7XX_PERF_HLSQ_UCHE_LATENCY_COUNT"/>
+ <value value="6" name="A7XX_PERF_HLSQ_RESERVED_6"/>
+ <value value="7" name="A7XX_PERF_HLSQ_RESERVED_7"/>
+ <value value="8" name="A7XX_PERF_HLSQ_RESERVED_8"/>
+ <value value="9" name="A7XX_PERF_HLSQ_RESERVED_9"/>
+ <value value="10" name="A7XX_PERF_HLSQ_COMPUTE_DRAWCALLS"/>
+ <value value="11" name="A7XX_PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
+ <value value="12" name="A7XX_PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
+ <value value="13" name="A7XX_PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
+ <value value="14" name="A7XX_PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
+ <value value="15" name="A7XX_PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
+ <value value="16" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
+ <value value="17" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
+ <value value="18" name="A7XX_PERF_HLSQ_STALL_CYCLES_VPC"/>
+ <value value="19" name="A7XX_PERF_HLSQ_RESERVED_19"/>
+ <value value="20" name="A7XX_PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
+ <value value="21" name="A7XX_PERF_HLSQ_VSBR_STALL_CYCLES"/>
+ <value value="22" name="A7XX_PERF_HLSQ_FS_STALL_CYCLES"/>
+ <value value="23" name="A7XX_PERF_HLSQ_LPAC_STALL_CYCLES"/>
+ <value value="24" name="A7XX_PERF_HLSQ_BV_STALL_CYCLES"/>
+ <value value="25" name="A7XX_PERF_HLSQ_VSBR_DEREF_CYCLES"/>
+ <value value="26" name="A7XX_PERF_HLSQ_FS_DEREF_CYCLES"/>
+ <value value="27" name="A7XX_PERF_HLSQ_LPAC_DEREF_CYCLES"/>
+ <value value="28" name="A7XX_PERF_HLSQ_BV_DEREF_CYCLES"/>
+ <value value="29" name="A7XX_PERF_HLSQ_VSBR_S2W_CYCLES"/>
+ <value value="30" name="A7XX_PERF_HLSQ_FS_S2W_CYCLES"/>
+ <value value="31" name="A7XX_PERF_HLSQ_LPAC_S2W_CYCLES"/>
+ <value value="32" name="A7XX_PERF_HLSQ_BV_S2W_CYCLES"/>
+ <value value="33" name="A7XX_PERF_HLSQ_VSBR_WAIT_FS_S2W"/>
+ <value value="34" name="A7XX_PERF_HLSQ_FS_WAIT_VS_S2W"/>
+ <value value="35" name="A7XX_PERF_HLSQ_LPAC_WAIT_VS_S2W"/>
+ <value value="36" name="A7XX_PERF_HLSQ_BV_WAIT_FS_S2W"/>
+ <value value="37" name="A7XX_PERF_HLSQ_VS_WAIT_CONST_RESOURCE"/>
+ <value value="38" name="A7XX_PERF_HLSQ_FS_WAIT_SAME_VS_S2W"/>
+ <value value="39" name="A7XX_PERF_HLSQ_FS_STARVING_SP"/>
+ <value value="40" name="A7XX_PERF_HLSQ_VS_DATA_WAIT_PROGRAMMING"/>
+ <value value="41" name="A7XX_PERF_HLSQ_BV_DATA_WAIT_PROGRAMMING"/>
+ <value value="42" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_VS"/>
+ <value value="43" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_VS"/>
+ <value value="44" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_FS"/>
+ <value value="45" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_FS"/>
+ <value value="46" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_BV"/>
+ <value value="47" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_BV"/>
+ <value value="48" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_LPAC"/>
+ <value value="49" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_LPAC"/>
+ <value value="50" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_VS"/>
+ <value value="51" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_FS"/>
+ <value value="52" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_BV"/>
+ <value value="53" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_LPAC"/>
+ <value value="54" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_VS"/>
+ <value value="55" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_FS"/>
+ <value value="56" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_BV"/>
+ <value value="57" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_LPAC"/>
+</enum>
+
+<enum name="a7xx_vpc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VPC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VPC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_VPC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="A7XX_PERF_VPC_STALL_CYCLES_VFD_WACK"/>
+ <value value="4" name="A7XX_PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
+ <value value="5" name="A7XX_PERF_VPC_RESERVED_5"/>
+ <value value="6" name="A7XX_PERF_VPC_STALL_CYCLES_SP_LM"/>
+ <value value="7" name="A7XX_PERF_VPC_STARVE_CYCLES_SP"/>
+ <value value="8" name="A7XX_PERF_VPC_STARVE_CYCLES_LRZ"/>
+ <value value="9" name="A7XX_PERF_VPC_PC_PRIMITIVES"/>
+ <value value="10" name="A7XX_PERF_VPC_SP_COMPONENTS"/>
+ <value value="11" name="A7XX_PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
+ <value value="12" name="A7XX_PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
+ <value value="13" name="A7XX_PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
+ <value value="14" name="A7XX_PERF_VPC_LM_TRANSACTION"/>
+ <value value="15" name="A7XX_PERF_VPC_STREAMOUT_TRANSACTION"/>
+ <value value="16" name="A7XX_PERF_VPC_VS_BUSY_CYCLES"/>
+ <value value="17" name="A7XX_PERF_VPC_PS_BUSY_CYCLES"/>
+ <value value="18" name="A7XX_PERF_VPC_VS_WORKING_CYCLES"/>
+ <value value="19" name="A7XX_PERF_VPC_PS_WORKING_CYCLES"/>
+ <value value="20" name="A7XX_PERF_VPC_STARVE_CYCLES_RB"/>
+ <value value="21" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_POS"/>
+ <value value="22" name="A7XX_PERF_VPC_WIT_FULL_CYCLES"/>
+ <value value="23" name="A7XX_PERF_VPC_VPCRAM_FULL_CYCLES"/>
+ <value value="24" name="A7XX_PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
+ <value value="25" name="A7XX_PERF_VPC_NUM_VPCRAM_WRITE"/>
+ <value value="26" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_SO"/>
+ <value value="27" name="A7XX_PERF_VPC_NUM_ATTR_REQ_LM"/>
+ <value value="28" name="A7XX_PERF_VPC_STALL_CYCLE_TSE"/>
+ <value value="29" name="A7XX_PERF_VPC_TSE_PRIMITIVES"/>
+ <value value="30" name="A7XX_PERF_VPC_GS_PRIMITIVES"/>
+ <value value="31" name="A7XX_PERF_VPC_TSE_TRANSACTIONS"/>
+ <value value="32" name="A7XX_PERF_VPC_STALL_CYCLES_CCU"/>
+ <value value="33" name="A7XX_PERF_VPC_NUM_WM_HIT"/>
+ <value value="34" name="A7XX_PERF_VPC_STALL_DQ_WACK"/>
+ <value value="35" name="A7XX_PERF_VPC_STALL_CYCLES_CCHE"/>
+ <value value="36" name="A7XX_PERF_VPC_STARVE_CYCLES_CCHE"/>
+ <value value="37" name="A7XX_PERF_VPC_NUM_PA_REQ"/>
+ <value value="38" name="A7XX_PERF_VPC_NUM_LM_REQ_HIT"/>
+ <value value="39" name="A7XX_PERF_VPC_CCHE_REQBUF_FULL"/>
+ <value value="40" name="A7XX_PERF_VPC_STALL_CYCLES_LM_ACK"/>
+ <value value="41" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_FE"/>
+ <value value="42" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_PCVS"/>
+ <value value="43" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_VPCPS"/>
+</enum>
+
+<enum name="a7xx_tse_perfcounter_select">
+ <value value="0" name="A7XX_PERF_TSE_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_TSE_CLIPPING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_TSE_STALL_CYCLES_RAS"/>
+ <value value="3" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
+ <value value="4" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
+ <value value="5" name="A7XX_PERF_TSE_STARVE_CYCLES_PC"/>
+ <value value="6" name="A7XX_PERF_TSE_INPUT_PRIM"/>
+ <value value="7" name="A7XX_PERF_TSE_INPUT_NULL_PRIM"/>
+ <value value="8" name="A7XX_PERF_TSE_TRIVAL_REJ_PRIM"/>
+ <value value="9" name="A7XX_PERF_TSE_CLIPPED_PRIM"/>
+ <value value="10" name="A7XX_PERF_TSE_ZERO_AREA_PRIM"/>
+ <value value="11" name="A7XX_PERF_TSE_FACENESS_CULLED_PRIM"/>
+ <value value="12" name="A7XX_PERF_TSE_ZERO_PIXEL_PRIM"/>
+ <value value="13" name="A7XX_PERF_TSE_OUTPUT_NULL_PRIM"/>
+ <value value="14" name="A7XX_PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
+ <value value="15" name="A7XX_PERF_TSE_CINVOCATION"/>
+ <value value="16" name="A7XX_PERF_TSE_CPRIMITIVES"/>
+ <value value="17" name="A7XX_PERF_TSE_2D_INPUT_PRIM"/>
+ <value value="18" name="A7XX_PERF_TSE_2D_ALIVE_CYCLES"/>
+ <value value="19" name="A7XX_PERF_TSE_CLIP_PLANES"/>
+</enum>
+
+<enum name="a7xx_ras_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RAS_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
+ <value value="2" name="A7XX_PERF_RAS_STALL_CYCLES_LRZ"/>
+ <value value="3" name="A7XX_PERF_RAS_STARVE_CYCLES_TSE"/>
+ <value value="4" name="A7XX_PERF_RAS_SUPER_TILES"/>
+ <value value="5" name="A7XX_PERF_RAS_8X4_TILES"/>
+ <value value="6" name="A7XX_PERF_RAS_MASKGEN_ACTIVE"/>
+ <value value="7" name="A7XX_PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
+ <value value="8" name="A7XX_PERF_RAS_FULLY_COVERED_8X4_TILES"/>
+ <value value="9" name="A7XX_PERF_RAS_PRIM_KILLED_INVISILBE"/>
+ <value value="10" name="A7XX_PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
+ <value value="11" name="A7XX_PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
+ <value value="12" name="A7XX_PERF_RAS_BLOCKS"/>
+ <value value="13" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_0_WORKING_CC_l2"/>
+ <value value="14" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_1_WORKING_CC_l2"/>
+ <value value="15" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_2_WORKING_CC_l2"/>
+ <value value="16" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_3_WORKING_CC_l2"/>
+ <value value="17" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_4_WORKING_CC_l2"/>
+ <value value="18" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_5_WORKING_CC_l2"/>
+ <value value="19" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_6_WORKING_CC_l2"/>
+ <value value="20" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_7_WORKING_CC_l2"/>
+ <value value="21" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_8_WORKING_CC_l2"/>
+ <value value="22" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_9_WORKING_CC_l2"/>
+ <value value="23" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_10_WORKING_CC_l2"/>
+ <value value="24" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_11_WORKING_CC_l2"/>
+ <value value="25" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_12_WORKING_CC_l2"/>
+ <value value="26" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_13_WORKING_CC_l2"/>
+ <value value="27" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_14_WORKING_CC_l2"/>
+ <value value="28" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_15_WORKING_CC_l2"/>
+ <value value="29" name="A7XX_PERF_RAS_FALSE_PARTIAL_STILE"/>
+
+</enum>
+
+<enum name="a7xx_uche_perfcounter_select">
+ <value value="0" name="A7XX_PERF_UCHE_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_UCHE_STALL_CYCLES_ARBITER"/>
+ <value value="2" name="A7XX_PERF_UCHE_VBIF_LATENCY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
+ <value value="4" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_TP"/>
+ <value value="5" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_VFD"/>
+ <value value="6" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
+ <value value="7" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
+ <value value="8" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_SP"/>
+ <value value="9" name="A7XX_PERF_UCHE_READ_REQUESTS_TP"/>
+ <value value="10" name="A7XX_PERF_UCHE_READ_REQUESTS_VFD"/>
+ <value value="11" name="A7XX_PERF_UCHE_READ_REQUESTS_HLSQ"/>
+ <value value="12" name="A7XX_PERF_UCHE_READ_REQUESTS_LRZ"/>
+ <value value="13" name="A7XX_PERF_UCHE_READ_REQUESTS_SP"/>
+ <value value="14" name="A7XX_PERF_UCHE_WRITE_REQUESTS_LRZ"/>
+ <value value="15" name="A7XX_PERF_UCHE_WRITE_REQUESTS_SP"/>
+ <value value="16" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VPC"/>
+ <value value="17" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VSC"/>
+ <value value="18" name="A7XX_PERF_UCHE_EVICTS"/>
+ <value value="19" name="A7XX_PERF_UCHE_BANK_REQ0"/>
+ <value value="20" name="A7XX_PERF_UCHE_BANK_REQ1"/>
+ <value value="21" name="A7XX_PERF_UCHE_BANK_REQ2"/>
+ <value value="22" name="A7XX_PERF_UCHE_BANK_REQ3"/>
+ <value value="23" name="A7XX_PERF_UCHE_BANK_REQ4"/>
+ <value value="24" name="A7XX_PERF_UCHE_BANK_REQ5"/>
+ <value value="25" name="A7XX_PERF_UCHE_BANK_REQ6"/>
+ <value value="26" name="A7XX_PERF_UCHE_BANK_REQ7"/>
+ <value value="27" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH0"/>
+ <value value="28" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH1"/>
+ <value value="29" name="A7XX_PERF_UCHE_GMEM_READ_BEATS"/>
+ <value value="30" name="A7XX_PERF_UCHE_TPH_REF_FULL"/>
+ <value value="31" name="A7XX_PERF_UCHE_TPH_VICTIM_FULL"/>
+ <value value="32" name="A7XX_PERF_UCHE_TPH_EXT_FULL"/>
+ <value value="33" name="A7XX_PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
+ <value value="34" name="A7XX_PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
+ <value value="35" name="A7XX_PERF_UCHE_DCMP_LATENCY_CYCLES"/>
+ <value value="36" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_PC"/>
+ <value value="37" name="A7XX_PERF_UCHE_READ_REQUESTS_PC"/>
+ <value value="38" name="A7XX_PERF_UCHE_RAM_READ_REQ"/>
+ <value value="39" name="A7XX_PERF_UCHE_RAM_WRITE_REQ"/>
+ <value value="40" name="A7XX_PERF_UCHE_STARVED_CYCLES_VBIF_DECMP"/>
+ <value value="41" name="A7XX_PERF_UCHE_STALL_CYCLES_DECMP"/>
+ <value value="42" name="A7XX_PERF_UCHE_ARBITER_STALL_CYCLES_VBIF"/>
+ <value value="43" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_UBWC"/>
+ <value value="44" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_NONUBWC"/>
+ <value value="45" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_GMEM"/>
+ <value value="46" name="A7XX_PERF_UCHE_LONG_LINE_ALL_EVICTS_KAILUA"/>
+ <value value="47" name="A7XX_PERF_UCHE_LONG_LINE_PARTIAL_EVICTS_KAILUA"/>
+ <value value="48" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_CCHE"/>
+ <value value="49" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_OTHER_KAILUA"/>
+ <value value="50" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_CCHE"/>
+ <value value="51" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_OTHER_CLIENTS"/>
+ <value value="52" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH0"/>
+ <value value="53" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH1"/>
+ <value value="54" name="A7XX_PERF_UCHE_CCHE_TPH_QUEUE_FULL"/>
+ <value value="55" name="A7XX_PERF_UCHE_CCHE_DPH_QUEUE_FULL"/>
+ <value value="56" name="A7XX_PERF_UCHE_GMEM_WRITE_BEATS"/>
+ <value value="57" name="A7XX_PERF_UCHE_UBWC_READ_BEATS"/>
+ <value value="58" name="A7XX_PERF_UCHE_UBWC_WRITE_BEATS"/>
+</enum>
+
+<enum name="a7xx_tp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_TP_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_TP_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_TP_LATENCY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_TP_LATENCY_TRANS"/>
+ <value value="4" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_SAMPLES"/>
+ <value value="5" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_CYCLES"/>
+ <value value="6" name="A7XX_PERF_TP_L1_CACHELINE_REQUESTS"/>
+ <value value="7" name="A7XX_PERF_TP_L1_CACHELINE_MISSES"/>
+ <value value="8" name="A7XX_PERF_TP_SP_TP_TRANS"/>
+ <value value="9" name="A7XX_PERF_TP_TP_SP_TRANS"/>
+ <value value="10" name="A7XX_PERF_TP_OUTPUT_PIXELS"/>
+ <value value="11" name="A7XX_PERF_TP_FILTER_WORKLOAD_16BIT"/>
+ <value value="12" name="A7XX_PERF_TP_FILTER_WORKLOAD_32BIT"/>
+ <value value="13" name="A7XX_PERF_TP_QUADS_RECEIVED"/>
+ <value value="14" name="A7XX_PERF_TP_QUADS_OFFSET"/>
+ <value value="15" name="A7XX_PERF_TP_QUADS_SHADOW"/>
+ <value value="16" name="A7XX_PERF_TP_QUADS_ARRAY"/>
+ <value value="17" name="A7XX_PERF_TP_QUADS_GRADIENT"/>
+ <value value="18" name="A7XX_PERF_TP_QUADS_1D"/>
+ <value value="19" name="A7XX_PERF_TP_QUADS_2D"/>
+ <value value="20" name="A7XX_PERF_TP_QUADS_BUFFER"/>
+ <value value="21" name="A7XX_PERF_TP_QUADS_3D"/>
+ <value value="22" name="A7XX_PERF_TP_QUADS_CUBE"/>
+ <value value="23" name="A7XX_PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
+ <value value="24" name="A7XX_PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
+ <value value="25" name="A7XX_PERF_TP_OUTPUT_PIXELS_POINT"/>
+ <value value="26" name="A7XX_PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="27" name="A7XX_PERF_TP_OUTPUT_PIXELS_MIP"/>
+ <value value="28" name="A7XX_PERF_TP_OUTPUT_PIXELS_ANISO"/>
+ <value value="29" name="A7XX_PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
+ <value value="30" name="A7XX_PERF_TP_FLAG_CACHE_REQUESTS"/>
+ <value value="31" name="A7XX_PERF_TP_FLAG_CACHE_MISSES"/>
+ <value value="32" name="A7XX_PERF_TP_L1_5_L2_REQUESTS"/>
+ <value value="33" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS"/>
+ <value value="34" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
+ <value value="35" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="36" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
+ <value value="37" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
+ <value value="38" name="A7XX_PERF_TP_TPA2TPC_TRANS"/>
+ <value value="39" name="A7XX_PERF_TP_L1_MISSES_ASTC_1TILE"/>
+ <value value="40" name="A7XX_PERF_TP_L1_MISSES_ASTC_2TILE"/>
+ <value value="41" name="A7XX_PERF_TP_L1_MISSES_ASTC_4TILE"/>
+ <value value="42" name="A7XX_PERF_TP_L1_5_COMPRESS_REQS"/>
+ <value value="43" name="A7XX_PERF_TP_L1_5_L2_COMPRESS_MISS"/>
+ <value value="44" name="A7XX_PERF_TP_L1_BANK_CONFLICT"/>
+ <value value="45" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
+ <value value="46" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
+ <value value="47" name="A7XX_PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
+ <value value="48" name="A7XX_PERF_TP_FRONTEND_WORKING_CYCLES"/>
+ <value value="49" name="A7XX_PERF_TP_L1_TAG_WORKING_CYCLES"/>
+ <value value="50" name="A7XX_PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
+ <value value="51" name="A7XX_PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
+ <value value="52" name="A7XX_PERF_TP_BACKEND_WORKING_CYCLES"/>
+ <value value="53" name="A7XX_PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
+ <value value="54" name="A7XX_PERF_TP_STARVE_CYCLES_SP"/>
+ <value value="55" name="A7XX_PERF_TP_STARVE_CYCLES_UCHE"/>
+ <value value="56" name="A7XX_PERF_TP_STALL_CYCLES_UFC"/>
+ <value value="57" name="A7XX_PERF_TP_FORMAT_DECOMP"/>
+ <value value="58" name="A7XX_PERF_TP_FILTER_POINT_FP16"/>
+ <value value="59" name="A7XX_PERF_TP_FILTER_POINT_FP32"/>
+ <value value="60" name="A7XX_PERF_TP_LATENCY_FIFO_FULL"/>
+ <value value="61" name="A7XX_PERF_TP_RESERVED_61"/>
+ <value value="62" name="A7XX_PERF_TP_RESERVED_62"/>
+ <value value="63" name="A7XX_PERF_TP_RESERVED_63"/>
+ <value value="64" name="A7XX_PERF_TP_RESERVED_64"/>
+ <value value="65" name="A7XX_PERF_TP_RESERVED_65"/>
+ <value value="66" name="A7XX_PERF_TP_RESERVED_66"/>
+ <value value="67" name="A7XX_PERF_TP_RESERVED_67"/>
+ <value value="68" name="A7XX_PERF_TP_RESERVED_68"/>
+ <value value="69" name="A7XX_PERF_TP_RESERVED_69"/>
+ <value value="70" name="A7XX_PERF_TP_RESERVED_70"/>
+ <value value="71" name="A7XX_PERF_TP_RESERVED_71"/>
+ <value value="72" name="A7XX_PERF_TP_RESERVED_72"/>
+ <value value="73" name="A7XX_PERF_TP_RESERVED_73"/>
+ <value value="74" name="A7XX_PERF_TP_RESERVED_74"/>
+ <value value="75" name="A7XX_PERF_TP_RESERVED_75"/>
+ <value value="76" name="A7XX_PERF_TP_RESERVED_76"/>
+ <value value="77" name="A7XX_PERF_TP_RESERVED_77"/>
+ <value value="78" name="A7XX_PERF_TP_RESERVED_78"/>
+ <value value="79" name="A7XX_PERF_TP_RESERVED_79"/>
+ <value value="80" name="A7XX_PERF_TP_RESERVED_80"/>
+ <value value="81" name="A7XX_PERF_TP_RESERVED_81"/>
+ <value value="82" name="A7XX_PERF_TP_RESERVED_82"/>
+ <value value="83" name="A7XX_PERF_TP_RESERVED_83"/>
+ <value value="84" name="A7XX_PERF_TP_RESERVED_84"/>
+ <value value="85" name="A7XX_PERF_TP_RESERVED_85"/>
+ <value value="86" name="A7XX_PERF_TP_RESERVED_86"/>
+ <value value="87" name="A7XX_PERF_TP_RESERVED_87"/>
+ <value value="88" name="A7XX_PERF_TP_RESERVED_88"/>
+ <value value="89" name="A7XX_PERF_TP_RESERVED_89"/>
+ <value value="90" name="A7XX_PERF_TP_RESERVED_90"/>
+ <value value="91" name="A7XX_PERF_TP_RESERVED_91"/>
+ <value value="92" name="A7XX_PERF_TP_RESERVED_92"/>
+ <value value="93" name="A7XX_PERF_TP_RESERVED_93"/>
+ <value value="94" name="A7XX_PERF_TP_RESERVED_94"/>
+ <value value="95" name="A7XX_PERF_TP_RESERVED_95"/>
+ <value value="96" name="A7XX_PERF_TP_RESERVED_96"/>
+ <value value="97" name="A7XX_PERF_TP_RESERVED_97"/>
+ <value value="98" name="A7XX_PERF_TP_RESERVED_98"/>
+ <value value="99" name="A7XX_PERF_TP_RESERVED_99"/>
+ <value value="100" name="A7XX_PERF_TP_RESERVED_100"/>
+ <value value="101" name="A7XX_PERF_TP_RESERVED_101"/>
+ <value value="102" name="A7XX_PERF_TP_RESERVED_102"/>
+ <value value="103" name="A7XX_PERF_TP_RESERVED_103"/>
+ <value value="104" name="A7XX_PERF_TP_RESERVED_104"/>
+ <value value="105" name="A7XX_PERF_TP_RESERVED_105"/>
+ <value value="106" name="A7XX_PERF_TP_RESERVED_106"/>
+ <value value="107" name="A7XX_PERF_TP_RESERVED_107"/>
+ <value value="108" name="A7XX_PERF_TP_RESERVED_108"/>
+ <value value="109" name="A7XX_PERF_TP_RESERVED_109"/>
+ <value value="110" name="A7XX_PERF_TP_RESERVED_110"/>
+ <value value="111" name="A7XX_PERF_TP_RESERVED_111"/>
+ <value value="112" name="A7XX_PERF_TP_RESERVED_112"/>
+ <value value="113" name="A7XX_PERF_TP_RESERVED_113"/>
+ <value value="114" name="A7XX_PERF_TP_RESERVED_114"/>
+ <value value="115" name="A7XX_PERF_TP_RESERVED_115"/>
+ <value value="116" name="A7XX_PERF_TP_RESERVED_116"/>
+ <value value="117" name="A7XX_PERF_TP_RESERVED_117"/>
+ <value value="118" name="A7XX_PERF_TP_RESERVED_118"/>
+ <value value="119" name="A7XX_PERF_TP_RESERVED_119"/>
+ <value value="120" name="A7XX_PERF_TP_RESERVED_120"/>
+ <value value="121" name="A7XX_PERF_TP_RESERVED_121"/>
+ <value value="122" name="A7XX_PERF_TP_RESERVED_122"/>
+ <value value="123" name="A7XX_PERF_TP_RESERVED_123"/>
+ <value value="124" name="A7XX_PERF_TP_RESERVED_124"/>
+ <value value="125" name="A7XX_PERF_TP_RESERVED_125"/>
+ <value value="126" name="A7XX_PERF_TP_RESERVED_126"/>
+ <value value="127" name="A7XX_PERF_TP_RESERVED_127"/>
+ <value value="128" name="A7XX_PERF_TP_FORMAT_DECOMP_BILINEAR"/>
+ <value value="129" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP16"/>
+ <value value="130" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP16"/>
+ <value value="131" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP32"/>
+ <value value="132" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP32"/>
+</enum>
+
+<enum name="a7xx_sp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_SP_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_SP_ALU_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_SP_EFU_WORKING_CYCLES"/>
+ <value value="3" name="A7XX_PERF_SP_STALL_CYCLES_VPC"/>
+ <value value="4" name="A7XX_PERF_SP_STALL_CYCLES_TP"/>
+ <value value="5" name="A7XX_PERF_SP_STALL_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_SP_STALL_CYCLES_RB"/>
+ <value value="7" name="A7XX_PERF_SP_NON_EXECUTION_CYCLES"/>
+ <value value="8" name="A7XX_PERF_SP_WAVE_CONTEXTS"/>
+ <value value="9" name="A7XX_PERF_SP_WAVE_CONTEXT_CYCLES"/>
+ <value value="10" name="A7XX_PERF_SP_STAGE_WAVE_CYCLES"/>
+ <value value="11" name="A7XX_PERF_SP_STAGE_WAVE_SAMPLES"/>
+ <value value="12" name="A7XX_PERF_SP_VS_STAGE_WAVE_CYCLES"/>
+ <value value="13" name="A7XX_PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
+ <value value="14" name="A7XX_PERF_SP_FS_STAGE_DURATION_CYCLES"/>
+ <value value="15" name="A7XX_PERF_SP_VS_STAGE_DURATION_CYCLES"/>
+ <value value="16" name="A7XX_PERF_SP_WAVE_CTRL_CYCLES"/>
+ <value value="17" name="A7XX_PERF_SP_WAVE_LOAD_CYCLES"/>
+ <value value="18" name="A7XX_PERF_SP_WAVE_EMIT_CYCLES"/>
+ <value value="19" name="A7XX_PERF_SP_WAVE_NOP_CYCLES"/>
+ <value value="20" name="A7XX_PERF_SP_WAVE_WAIT_CYCLES"/>
+ <value value="21" name="A7XX_PERF_SP_WAVE_FETCH_CYCLES"/>
+ <value value="22" name="A7XX_PERF_SP_WAVE_IDLE_CYCLES"/>
+ <value value="23" name="A7XX_PERF_SP_WAVE_END_CYCLES"/>
+ <value value="24" name="A7XX_PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
+ <value value="25" name="A7XX_PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
+ <value value="26" name="A7XX_PERF_SP_WAVE_JOIN_CYCLES"/>
+ <value value="27" name="A7XX_PERF_SP_LM_LOAD_INSTRUCTIONS"/>
+ <value value="28" name="A7XX_PERF_SP_LM_STORE_INSTRUCTIONS"/>
+ <value value="29" name="A7XX_PERF_SP_LM_ATOMICS"/>
+ <value value="30" name="A7XX_PERF_SP_GM_LOAD_INSTRUCTIONS"/>
+ <value value="31" name="A7XX_PERF_SP_GM_STORE_INSTRUCTIONS"/>
+ <value value="32" name="A7XX_PERF_SP_GM_ATOMICS"/>
+ <value value="33" name="A7XX_PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="34" name="A7XX_PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="35" name="A7XX_PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="36" name="A7XX_PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="37" name="A7XX_PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="38" name="A7XX_PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
+ <value value="39" name="A7XX_PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="40" name="A7XX_PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="41" name="A7XX_PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="42" name="A7XX_PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
+ <value value="43" name="A7XX_PERF_SP_VS_INSTRUCTIONS"/>
+ <value value="44" name="A7XX_PERF_SP_FS_INSTRUCTIONS"/>
+ <value value="45" name="A7XX_PERF_SP_ADDR_LOCK_COUNT"/>
+ <value value="46" name="A7XX_PERF_SP_UCHE_READ_TRANS"/>
+ <value value="47" name="A7XX_PERF_SP_UCHE_WRITE_TRANS"/>
+ <value value="48" name="A7XX_PERF_SP_EXPORT_VPC_TRANS"/>
+ <value value="49" name="A7XX_PERF_SP_EXPORT_RB_TRANS"/>
+ <value value="50" name="A7XX_PERF_SP_PIXELS_KILLED"/>
+ <value value="51" name="A7XX_PERF_SP_ICL1_REQUESTS"/>
+ <value value="52" name="A7XX_PERF_SP_ICL1_MISSES"/>
+ <value value="53" name="A7XX_PERF_SP_HS_INSTRUCTIONS"/>
+ <value value="54" name="A7XX_PERF_SP_DS_INSTRUCTIONS"/>
+ <value value="55" name="A7XX_PERF_SP_GS_INSTRUCTIONS"/>
+ <value value="56" name="A7XX_PERF_SP_CS_INSTRUCTIONS"/>
+ <value value="57" name="A7XX_PERF_SP_GPR_READ"/>
+ <value value="58" name="A7XX_PERF_SP_GPR_WRITE"/>
+ <value value="59" name="A7XX_PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="60" name="A7XX_PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="61" name="A7XX_PERF_SP_LM_BANK_CONFLICTS"/>
+ <value value="62" name="A7XX_PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
+ <value value="63" name="A7XX_PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
+ <value value="64" name="A7XX_PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
+ <value value="65" name="A7XX_PERF_SP_LM_WORKING_CYCLES"/>
+ <value value="66" name="A7XX_PERF_SP_DISPATCHER_WORKING_CYCLES"/>
+ <value value="67" name="A7XX_PERF_SP_SEQUENCER_WORKING_CYCLES"/>
+ <value value="68" name="A7XX_PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
+ <value value="69" name="A7XX_PERF_SP_STARVE_CYCLES_HLSQ"/>
+ <value value="70" name="A7XX_PERF_SP_NON_EXECUTION_LS_CYCLES"/>
+ <value value="71" name="A7XX_PERF_SP_WORKING_EU"/>
+ <value value="72" name="A7XX_PERF_SP_ANY_EU_WORKING"/>
+ <value value="73" name="A7XX_PERF_SP_WORKING_EU_FS_STAGE"/>
+ <value value="74" name="A7XX_PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
+ <value value="75" name="A7XX_PERF_SP_WORKING_EU_VS_STAGE"/>
+ <value value="76" name="A7XX_PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
+ <value value="77" name="A7XX_PERF_SP_WORKING_EU_CS_STAGE"/>
+ <value value="78" name="A7XX_PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
+ <value value="79" name="A7XX_PERF_SP_GPR_READ_PREFETCH"/>
+ <value value="80" name="A7XX_PERF_SP_GPR_READ_CONFLICT"/>
+ <value value="81" name="A7XX_PERF_SP_GPR_WRITE_CONFLICT"/>
+ <value value="82" name="A7XX_PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
+ <value value="83" name="A7XX_PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
+ <value value="84" name="A7XX_PERF_SP_EXECUTABLE_WAVES"/>
+ <value value="85" name="A7XX_PERF_SP_ICL1_MISS_FETCH_CYCLES"/>
+ <value value="86" name="A7XX_PERF_SP_WORKING_EU_LPAC"/>
+ <value value="87" name="A7XX_PERF_SP_BYPASS_BUSY_CYCLES"/>
+ <value value="88" name="A7XX_PERF_SP_ANY_EU_WORKING_LPAC"/>
+ <value value="89" name="A7XX_PERF_SP_WAVE_ALU_CYCLES"/>
+ <value value="90" name="A7XX_PERF_SP_WAVE_EFU_CYCLES"/>
+ <value value="91" name="A7XX_PERF_SP_WAVE_INT_CYCLES"/>
+ <value value="92" name="A7XX_PERF_SP_WAVE_CSP_CYCLES"/>
+ <value value="93" name="A7XX_PERF_SP_EWAVE_CONTEXTS"/>
+ <value value="94" name="A7XX_PERF_SP_EWAVE_CONTEXT_CYCLES"/>
+ <value value="95" name="A7XX_PERF_SP_LPAC_BUSY_CYCLES"/>
+ <value value="96" name="A7XX_PERF_SP_LPAC_INSTRUCTIONS"/>
+ <value value="97" name="A7XX_PERF_SP_FS_STAGE_1X_WAVES"/>
+ <value value="98" name="A7XX_PERF_SP_FS_STAGE_2X_WAVES"/>
+ <value value="99" name="A7XX_PERF_SP_QUADS"/>
+ <value value="100" name="A7XX_PERF_SP_CS_INVOCATIONS"/>
+ <value value="101" name="A7XX_PERF_SP_PIXELS"/>
+ <value value="102" name="A7XX_PERF_SP_LPAC_DRAWCALLS"/>
+ <value value="103" name="A7XX_PERF_SP_PI_WORKING_CYCLES"/>
+ <value value="104" name="A7XX_PERF_SP_WAVE_INPUT_CYCLES"/>
+ <value value="105" name="A7XX_PERF_SP_WAVE_OUTPUT_CYCLES"/>
+ <value value="106" name="A7XX_PERF_SP_WAVE_HWAVE_WAIT_CYCLES"/>
+ <value value="107" name="A7XX_PERF_SP_WAVE_HWAVE_SYNC"/>
+ <value value="108" name="A7XX_PERF_SP_OUTPUT_3D_PIXELS"/>
+ <value value="109" name="A7XX_PERF_SP_FULL_ALU_MAD_INSTRUCTIONS"/>
+ <value value="110" name="A7XX_PERF_SP_HALF_ALU_MAD_INSTRUCTIONS"/>
+ <value value="111" name="A7XX_PERF_SP_FULL_ALU_MUL_INSTRUCTIONS"/>
+ <value value="112" name="A7XX_PERF_SP_HALF_ALU_MUL_INSTRUCTIONS"/>
+ <value value="113" name="A7XX_PERF_SP_FULL_ALU_ADD_INSTRUCTIONS"/>
+ <value value="114" name="A7XX_PERF_SP_HALF_ALU_ADD_INSTRUCTIONS"/>
+ <value value="115" name="A7XX_PERF_SP_BARY_FP32_INSTRUCTIONS"/>
+ <value value="116" name="A7XX_PERF_SP_ALU_GPR_READ_CYCLES"/>
+ <value value="117" name="A7XX_PERF_SP_ALU_DATA_FORWARDING_CYCLES"/>
+ <value value="118" name="A7XX_PERF_SP_LM_FULL_CYCLES"/>
+ <value value="119" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_CYCLES"/>
+ <value value="120" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_SAMPLES"/>
+ <value value="121" name="A7XX_PERF_SP_FS_STAGE_PI_TEX_INSTRUCTION"/>
+ <value value="122" name="A7XX_PERF_SP_RAY_QUERY_INSTRUCTIONS"/>
+ <value value="123" name="A7XX_PERF_SP_RBRT_KICKOFF_FIBERS"/>
+ <value value="124" name="A7XX_PERF_SP_RBRT_KICKOFF_DQUADS"/>
+ <value value="125" name="A7XX_PERF_SP_RTU_BUSY_CYCLES"/>
+ <value value="126" name="A7XX_PERF_SP_RTU_L0_HITS"/>
+ <value value="127" name="A7XX_PERF_SP_RTU_L0_MISSES"/>
+ <value value="128" name="A7XX_PERF_SP_RTU_L0_HIT_ON_MISS"/>
+ <value value="129" name="A7XX_PERF_SP_RTU_STALL_CYCLES_WAVE_QUEUE"/>
+ <value value="130" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_HIT_QUEUE"/>
+ <value value="131" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_MISS_QUEUE"/>
+ <value value="132" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0D_IDX_QUEUE"/>
+ <value value="133" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0DATA"/>
+ <value value="134" name="A7XX_PERF_SP_RTU_STALL_CYCLES_REPLACE_CNT"/>
+ <value value="135" name="A7XX_PERF_SP_RTU_STALL_CYCLES_MRG_CNT"/>
+ <value value="136" name="A7XX_PERF_SP_RTU_STALL_CYCLES_UCHE"/>
+ <value value="137" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_L0"/>
+ <value value="138" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_INS_FIFO"/>
+ <value value="139" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_CYCLES"/>
+ <value value="140" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_SAMPLES"/>
+ <value value="141" name="A7XX_PERF_SP_STCHE_MISS_INC_VS"/>
+ <value value="142" name="A7XX_PERF_SP_STCHE_MISS_INC_FS"/>
+ <value value="143" name="A7XX_PERF_SP_STCHE_MISS_INC_BV"/>
+ <value value="144" name="A7XX_PERF_SP_STCHE_MISS_INC_LPAC"/>
+ <value value="145" name="A7XX_PERF_SP_VGPR_ACTIVE_CONTEXTS"/>
+ <value value="146" name="A7XX_PERF_SP_PGPR_ALLOC_CONTEXTS"/>
+ <value value="147" name="A7XX_PERF_SP_VGPR_ALLOC_CONTEXTS"/>
+ <value value="148" name="A7XX_PERF_SP_RTU_RAY_BOX_INTERSECTIONS"/>
+ <value value="149" name="A7XX_PERF_SP_RTU_RAY_TRIANGLE_INTERSECTIONS"/>
+ <value value="150" name="A7XX_PERF_SP_SCH_STALL_CYCLES_RTU"/>
+</enum>
+
+<enum name="a7xx_rb_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RB_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_RB_STALL_CYCLES_HLSQ"/>
+ <value value="2" name="A7XX_PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
+ <value value="3" name="A7XX_PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
+ <value value="4" name="A7XX_PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
+ <value value="5" name="A7XX_PERF_RB_STARVE_CYCLES_SP"/>
+ <value value="6" name="A7XX_PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
+ <value value="7" name="A7XX_PERF_RB_STARVE_CYCLES_CCU"/>
+ <value value="8" name="A7XX_PERF_RB_STARVE_CYCLES_Z_PLANE"/>
+ <value value="9" name="A7XX_PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
+ <value value="10" name="A7XX_PERF_RB_Z_WORKLOAD"/>
+ <value value="11" name="A7XX_PERF_RB_HLSQ_ACTIVE"/>
+ <value value="12" name="A7XX_PERF_RB_Z_READ"/>
+ <value value="13" name="A7XX_PERF_RB_Z_WRITE"/>
+ <value value="14" name="A7XX_PERF_RB_C_READ"/>
+ <value value="15" name="A7XX_PERF_RB_C_WRITE"/>
+ <value value="16" name="A7XX_PERF_RB_TOTAL_PASS"/>
+ <value value="17" name="A7XX_PERF_RB_Z_PASS"/>
+ <value value="18" name="A7XX_PERF_RB_Z_FAIL"/>
+ <value value="19" name="A7XX_PERF_RB_S_FAIL"/>
+ <value value="20" name="A7XX_PERF_RB_BLENDED_FXP_COMPONENTS"/>
+ <value value="21" name="A7XX_PERF_RB_BLENDED_FP16_COMPONENTS"/>
+ <value value="22" name="A7XX_PERF_RB_PS_INVOCATIONS"/>
+ <value value="23" name="A7XX_PERF_RB_2D_ALIVE_CYCLES"/>
+ <value value="24" name="A7XX_PERF_RB_2D_STALL_CYCLES_A2D"/>
+ <value value="25" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SRC"/>
+ <value value="26" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SP"/>
+ <value value="27" name="A7XX_PERF_RB_2D_STARVE_CYCLES_DST"/>
+ <value value="28" name="A7XX_PERF_RB_2D_VALID_PIXELS"/>
+ <value value="29" name="A7XX_PERF_RB_3D_PIXELS"/>
+ <value value="30" name="A7XX_PERF_RB_BLENDER_WORKING_CYCLES"/>
+ <value value="31" name="A7XX_PERF_RB_ZPROC_WORKING_CYCLES"/>
+ <value value="32" name="A7XX_PERF_RB_CPROC_WORKING_CYCLES"/>
+ <value value="33" name="A7XX_PERF_RB_SAMPLER_WORKING_CYCLES"/>
+ <value value="34" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
+ <value value="35" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
+ <value value="36" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
+ <value value="37" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
+ <value value="38" name="A7XX_PERF_RB_STALL_CYCLES_VPC"/>
+ <value value="39" name="A7XX_PERF_RB_2D_INPUT_TRANS"/>
+ <value value="40" name="A7XX_PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
+ <value value="41" name="A7XX_PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
+ <value value="42" name="A7XX_PERF_RB_BLENDED_FP32_COMPONENTS"/>
+ <value value="43" name="A7XX_PERF_RB_COLOR_PIX_TILES"/>
+ <value value="44" name="A7XX_PERF_RB_STALL_CYCLES_CCU"/>
+ <value value="45" name="A7XX_PERF_RB_EARLY_Z_ARB3_GRANT"/>
+ <value value="46" name="A7XX_PERF_RB_LATE_Z_ARB3_GRANT"/>
+ <value value="47" name="A7XX_PERF_RB_EARLY_Z_SKIP_GRANT"/>
+ <value value="48" name="A7XX_PERF_RB_VRS_1x1_QUADS"/>
+ <value value="49" name="A7XX_PERF_RB_VRS_2x1_QUADS"/>
+ <value value="50" name="A7XX_PERF_RB_VRS_1x2_QUADS"/>
+ <value value="51" name="A7XX_PERF_RB_VRS_2x2_QUADS"/>
+ <value value="52" name="A7XX_PERF_RB_VRS_4x2_QUADS"/>
+ <value value="53" name="A7XX_PERF_RB_VRS_4x4_QUADS"/>
+</enum>
+
+<enum name="a7xx_vsc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VSC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VSC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_VSC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="A7XX_PERF_VSC_EOT_NUM"/>
+ <value value="4" name="A7XX_PERF_VSC_INPUT_TILES"/>
+</enum>
+
+<enum name="a7xx_ccu_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CCU_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
+ <value value="2" name="A7XX_PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
+ <value value="3" name="A7XX_PERF_CCU_DEPTH_BLOCKS"/>
+ <value value="4" name="A7XX_PERF_CCU_COLOR_BLOCKS"/>
+ <value value="5" name="A7XX_PERF_CCU_DEPTH_BLOCK_HIT"/>
+ <value value="6" name="A7XX_PERF_CCU_COLOR_BLOCK_HIT"/>
+ <value value="7" name="A7XX_PERF_CCU_PARTIAL_BLOCK_READ"/>
+ <value value="8" name="A7XX_PERF_CCU_GMEM_READ"/>
+ <value value="9" name="A7XX_PERF_CCU_GMEM_WRITE"/>
+ <value value="10" name="A7XX_PERF_CCU_2D_RD_REQ"/>
+ <value value="11" name="A7XX_PERF_CCU_2D_WR_REQ"/>
+ <value value="12" name="A7XX_PERF_CCU_UBWC_COLOR_BLOCKS_CONCURRENT"/>
+ <value value="13" name="A7XX_PERF_CCU_UBWC_DEPTH_BLOCKS_CONCURRENT"/>
+ <value value="14" name="A7XX_PERF_CCU_COLOR_RESOLVE_DROPPED"/>
+ <value value="15" name="A7XX_PERF_CCU_DEPTH_RESOLVE_DROPPED"/>
+ <value value="16" name="A7XX_PERF_CCU_COLOR_RENDER_CONCURRENT"/>
+ <value value="17" name="A7XX_PERF_CCU_DEPTH_RENDER_CONCURRENT"/>
+ <value value="18" name="A7XX_PERF_CCU_COLOR_RESOLVE_AFTER_RENDER"/>
+ <value value="19" name="A7XX_PERF_CCU_DEPTH_RESOLVE_AFTER_RENDER"/>
+ <value value="20" name="A7XX_PERF_CCU_GMEM_EXTRA_DEPTH_READ"/>
+ <value value="21" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA"/>
+ <value value="22" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA_FULL"/>
+</enum>
+
+<enum name="a7xx_lrz_perfcounter_select">
+ <value value="0" name="A7XX_PERF_LRZ_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_LRZ_STARVE_CYCLES_RAS"/>
+ <value value="2" name="A7XX_PERF_LRZ_STALL_CYCLES_RB"/>
+ <value value="3" name="A7XX_PERF_LRZ_STALL_CYCLES_VSC"/>
+ <value value="4" name="A7XX_PERF_LRZ_STALL_CYCLES_VPC"/>
+ <value value="5" name="A7XX_PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
+ <value value="6" name="A7XX_PERF_LRZ_STALL_CYCLES_UCHE"/>
+ <value value="7" name="A7XX_PERF_LRZ_LRZ_READ"/>
+ <value value="8" name="A7XX_PERF_LRZ_LRZ_WRITE"/>
+ <value value="9" name="A7XX_PERF_LRZ_READ_LATENCY"/>
+ <value value="10" name="A7XX_PERF_LRZ_MERGE_CACHE_UPDATING"/>
+ <value value="11" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
+ <value value="12" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
+ <value value="13" name="A7XX_PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
+ <value value="14" name="A7XX_PERF_LRZ_FULL_8X8_TILES"/>
+ <value value="15" name="A7XX_PERF_LRZ_PARTIAL_8X8_TILES"/>
+ <value value="16" name="A7XX_PERF_LRZ_TILE_KILLED"/>
+ <value value="17" name="A7XX_PERF_LRZ_TOTAL_PIXEL"/>
+ <value value="18" name="A7XX_PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
+ <value value="19" name="A7XX_PERF_LRZ_FEEDBACK_ACCEPT"/>
+ <value value="20" name="A7XX_PERF_LRZ_FEEDBACK_DISCARD"/>
+ <value value="21" name="A7XX_PERF_LRZ_FEEDBACK_STALL"/>
+ <value value="22" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
+ <value value="23" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
+ <value value="24" name="A7XX_PERF_LRZ_RAS_MASK_TRANS"/>
+ <value value="25" name="A7XX_PERF_LRZ_STALL_CYCLES_MVC"/>
+ <value value="26" name="A7XX_PERF_LRZ_TILE_KILLED_BY_IMAGE_VRS"/>
+ <value value="27" name="A7XX_PERF_LRZ_TILE_KILLED_BY_Z"/>
+</enum>
+
+<enum name="a7xx_cmp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CMPDECMP_STALL_CYCLES_ARB"/>
+ <value value="1" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
+ <value value="2" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
+ <value value="3" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
+ <value value="4" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
+ <value value="5" name="A7XX_PERF_CMPDECMP_VBIF_READ_REQUEST"/>
+ <value value="6" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
+ <value value="7" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA"/>
+ <value value="8" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA"/>
+ <value value="9" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
+ <value value="10" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
+ <value value="11" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
+ <value value="12" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
+ <value value="13" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
+ <value value="14" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
+ <value value="15" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
+ <value value="16" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
+ <value value="17" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
+ <value value="18" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
+ <value value="19" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
+ <value value="20" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
+ <value value="21" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
+ <value value="22" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
+ <value value="23" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
+ <value value="24" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
+ <value value="25" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
+ <value value="26" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
+ <value value="27" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
+ <value value="28" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
+ <value value="29" name="A7XX_PERF_CMPDECMP_RESOLVE_EVENTS"/>
+ <value value="30" name="A7XX_PERF_CMPDECMP_CONCURRENT_RESOLVE_EVENTS"/>
+ <value value="31" name="A7XX_PERF_CMPDECMP_DROPPED_CLEAR_EVENTS"/>
+ <value value="32" name="A7XX_PERF_CMPDECMP_ST_BLOCKS_CONCURRENT"/>
+ <value value="33" name="A7XX_PERF_CMPDECMP_LRZ_ST_BLOCKS_CONCURRENT"/>
+ <value value="34" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG0_COUNT"/>
+ <value value="35" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG1_COUNT"/>
+ <value value="36" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG2_COUNT"/>
+ <value value="37" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG3_COUNT"/>
+ <value value="38" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG4_COUNT"/>
+ <value value="39" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG5_COUNT"/>
+ <value value="40" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG6_COUNT"/>
+ <value value="41" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG8_COUNT"/>
+ <value value="42" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG0_COUNT"/>
+ <value value="43" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG1_COUNT"/>
+ <value value="44" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG2_COUNT"/>
+ <value value="45" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG3_COUNT"/>
+ <value value="46" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG4_COUNT"/>
+ <value value="47" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG5_COUNT"/>
+ <value value="48" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG6_COUNT"/>
+ <value value="49" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG8_COUNT"/>
+</enum>
+
+<enum name="a7xx_gbif_perfcounter_select">
+ <value value="0" name="A7XX_PERF_GBIF_RESERVED_0"/>
+ <value value="1" name="A7XX_PERF_GBIF_RESERVED_1"/>
+ <value value="2" name="A7XX_PERF_GBIF_RESERVED_2"/>
+ <value value="3" name="A7XX_PERF_GBIF_RESERVED_3"/>
+ <value value="4" name="A7XX_PERF_GBIF_RESERVED_4"/>
+ <value value="5" name="A7XX_PERF_GBIF_RESERVED_5"/>
+ <value value="6" name="A7XX_PERF_GBIF_RESERVED_6"/>
+ <value value="7" name="A7XX_PERF_GBIF_RESERVED_7"/>
+ <value value="8" name="A7XX_PERF_GBIF_RESERVED_8"/>
+ <value value="9" name="A7XX_PERF_GBIF_RESERVED_9"/>
+ <value value="10" name="A7XX_PERF_GBIF_AXI0_READ_REQUESTS_TOTAL"/>
+ <value value="11" name="A7XX_PERF_GBIF_AXI1_READ_REQUESTS_TOTAL"/>
+ <value value="12" name="A7XX_PERF_GBIF_RESERVED_12"/>
+ <value value="13" name="A7XX_PERF_GBIF_RESERVED_13"/>
+ <value value="14" name="A7XX_PERF_GBIF_RESERVED_14"/>
+ <value value="15" name="A7XX_PERF_GBIF_RESERVED_15"/>
+ <value value="16" name="A7XX_PERF_GBIF_RESERVED_16"/>
+ <value value="17" name="A7XX_PERF_GBIF_RESERVED_17"/>
+ <value value="18" name="A7XX_PERF_GBIF_RESERVED_18"/>
+ <value value="19" name="A7XX_PERF_GBIF_RESERVED_19"/>
+ <value value="20" name="A7XX_PERF_GBIF_RESERVED_20"/>
+ <value value="21" name="A7XX_PERF_GBIF_RESERVED_21"/>
+ <value value="22" name="A7XX_PERF_GBIF_AXI0_WRITE_REQUESTS_TOTAL"/>
+ <value value="23" name="A7XX_PERF_GBIF_AXI1_WRITE_REQUESTS_TOTAL"/>
+ <value value="24" name="A7XX_PERF_GBIF_RESERVED_24"/>
+ <value value="25" name="A7XX_PERF_GBIF_RESERVED_25"/>
+ <value value="26" name="A7XX_PERF_GBIF_RESERVED_26"/>
+ <value value="27" name="A7XX_PERF_GBIF_RESERVED_27"/>
+ <value value="28" name="A7XX_PERF_GBIF_RESERVED_28"/>
+ <value value="29" name="A7XX_PERF_GBIF_RESERVED_29"/>
+ <value value="30" name="A7XX_PERF_GBIF_RESERVED_30"/>
+ <value value="31" name="A7XX_PERF_GBIF_RESERVED_31"/>
+ <value value="32" name="A7XX_PERF_GBIF_RESERVED_32"/>
+ <value value="33" name="A7XX_PERF_GBIF_RESERVED_33"/>
+ <value value="34" name="A7XX_PERF_GBIF_AXI0_READ_DATA_BEATS_TOTAL"/>
+ <value value="35" name="A7XX_PERF_GBIF_AXI1_READ_DATA_BEATS_TOTAL"/>
+ <value value="36" name="A7XX_PERF_GBIF_RESERVED_36"/>
+ <value value="37" name="A7XX_PERF_GBIF_RESERVED_37"/>
+ <value value="38" name="A7XX_PERF_GBIF_RESERVED_38"/>
+ <value value="39" name="A7XX_PERF_GBIF_RESERVED_39"/>
+ <value value="40" name="A7XX_PERF_GBIF_RESERVED_40"/>
+ <value value="41" name="A7XX_PERF_GBIF_RESERVED_41"/>
+ <value value="42" name="A7XX_PERF_GBIF_RESERVED_42"/>
+ <value value="43" name="A7XX_PERF_GBIF_RESERVED_43"/>
+ <value value="44" name="A7XX_PERF_GBIF_RESERVED_44"/>
+ <value value="45" name="A7XX_PERF_GBIF_RESERVED_45"/>
+ <value value="46" name="A7XX_PERF_GBIF_AXI0_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="47" name="A7XX_PERF_GBIF_AXI1_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="48" name="A7XX_PERF_GBIF_RESERVED_48"/>
+ <value value="49" name="A7XX_PERF_GBIF_RESERVED_49"/>
+ <value value="50" name="A7XX_PERF_GBIF_RESERVED_50"/>
+ <value value="51" name="A7XX_PERF_GBIF_RESERVED_51"/>
+ <value value="52" name="A7XX_PERF_GBIF_RESERVED_52"/>
+ <value value="53" name="A7XX_PERF_GBIF_RESERVED_53"/>
+ <value value="54" name="A7XX_PERF_GBIF_RESERVED_54"/>
+ <value value="55" name="A7XX_PERF_GBIF_RESERVED_55"/>
+ <value value="56" name="A7XX_PERF_GBIF_RESERVED_56"/>
+ <value value="57" name="A7XX_PERF_GBIF_RESERVED_57"/>
+ <value value="58" name="A7XX_PERF_GBIF_RESERVED_58"/>
+ <value value="59" name="A7XX_PERF_GBIF_RESERVED_59"/>
+ <value value="60" name="A7XX_PERF_GBIF_RESERVED_60"/>
+ <value value="61" name="A7XX_PERF_GBIF_RESERVED_61"/>
+ <value value="62" name="A7XX_PERF_GBIF_RESERVED_62"/>
+ <value value="63" name="A7XX_PERF_GBIF_RESERVED_63"/>
+ <value value="64" name="A7XX_PERF_GBIF_RESERVED_64"/>
+ <value value="65" name="A7XX_PERF_GBIF_RESERVED_65"/>
+ <value value="66" name="A7XX_PERF_GBIF_RESERVED_66"/>
+ <value value="67" name="A7XX_PERF_GBIF_RESERVED_67"/>
+ <value value="68" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_RD_ALL"/>
+ <value value="69" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_RD_ALL"/>
+ <value value="70" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_WR_ALL"/>
+ <value value="71" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_WR_ALL"/>
+ <value value="72" name="A7XX_PERF_GBIF_AXI_CH0_REQUEST_HELD_OFF"/>
+ <value value="73" name="A7XX_PERF_GBIF_AXI_CH1_REQUEST_HELD_OFF"/>
+ <value value="74" name="A7XX_PERF_GBIF_AXI_REQUEST_HELD_OFF"/>
+ <value value="75" name="A7XX_PERF_GBIF_AXI_CH0_WRITE_DATA_HELD_OFF"/>
+ <value value="76" name="A7XX_PERF_GBIF_AXI_CH1_WRITE_DATA_HELD_OFF"/>
+ <value value="77" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_DATA_HELD_OFF"/>
+ <value value="78" name="A7XX_PERF_GBIF_AXI_ALL_READ_BEATS"/>
+ <value value="79" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_BEATS"/>
+ <value value="80" name="A7XX_PERF_GBIF_AXI_ALL_BEATS"/>
+</enum>
+
+<enum name="a7xx_ufc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_UFC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_UFC_READ_DATA_VBIF"/>
+ <value value="2" name="A7XX_PERF_UFC_WRITE_DATA_VBIF"/>
+ <value value="3" name="A7XX_PERF_UFC_READ_REQUEST_VBIF"/>
+ <value value="4" name="A7XX_PERF_UFC_WRITE_REQUEST_VBIF"/>
+ <value value="5" name="A7XX_PERF_UFC_LRZ_FILTER_HIT"/>
+ <value value="6" name="A7XX_PERF_UFC_LRZ_FILTER_MISS"/>
+ <value value="7" name="A7XX_PERF_UFC_CRE_FILTER_HIT"/>
+ <value value="8" name="A7XX_PERF_UFC_CRE_FILTER_MISS"/>
+ <value value="9" name="A7XX_PERF_UFC_SP_FILTER_HIT"/>
+ <value value="10" name="A7XX_PERF_UFC_SP_FILTER_MISS"/>
+ <value value="11" name="A7XX_PERF_UFC_SP_REQUESTS"/>
+ <value value="12" name="A7XX_PERF_UFC_TP_FILTER_HIT"/>
+ <value value="13" name="A7XX_PERF_UFC_TP_FILTER_MISS"/>
+ <value value="14" name="A7XX_PERF_UFC_TP_REQUESTS"/>
+ <value value="15" name="A7XX_PERF_UFC_MAIN_HIT_LRZ_PREFETCH"/>
+ <value value="16" name="A7XX_PERF_UFC_MAIN_HIT_CRE_PREFETCH"/>
+ <value value="17" name="A7XX_PERF_UFC_MAIN_HIT_SP_PREFETCH"/>
+ <value value="18" name="A7XX_PERF_UFC_MAIN_HIT_TP_PREFETCH"/>
+ <value value="19" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_READ"/>
+ <value value="20" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_WRITE"/>
+ <value value="21" name="A7XX_PERF_UFC_MAIN_MISS_LRZ_PREFETCH"/>
+ <value value="22" name="A7XX_PERF_UFC_MAIN_MISS_CRE_PREFETCH"/>
+ <value value="23" name="A7XX_PERF_UFC_MAIN_MISS_SP_PREFETCH"/>
+ <value value="24" name="A7XX_PERF_UFC_MAIN_MISS_TP_PREFETCH"/>
+ <value value="25" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_READ"/>
+ <value value="26" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_WRITE"/>
+ <value value="27" name="A7XX_PERF_UFC_UBWC_READ_UFC_TRANS"/>
+ <value value="28" name="A7XX_PERF_UFC_UBWC_WRITE_UFC_TRANS"/>
+ <value value="29" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_CMD"/>
+ <value value="30" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_RDATA"/>
+ <value value="31" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_WDATA"/>
+ <value value="32" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_WR_FLAG"/>
+ <value value="33" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_FLAG_RTN"/>
+ <value value="34" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_EVENT"/>
+ <value value="35" name="A7XX_PERF_UFC_LRZ_PREFETCH_STALLED_CYCLES"/>
+ <value value="36" name="A7XX_PERF_UFC_CRE_PREFETCH_STALLED_CYCLES"/>
+ <value value="37" name="A7XX_PERF_UFC_SPTP_PREFETCH_STALLED_CYCLES"/>
+ <value value="38" name="A7XX_PERF_UFC_UBWC_RD_STALLED_CYCLES"/>
+ <value value="39" name="A7XX_PERF_UFC_UBWC_WR_STALLED_CYCLES"/>
+ <value value="40" name="A7XX_PERF_UFC_PREFETCH_STALLED_CYCLES"/>
+ <value value="41" name="A7XX_PERF_UFC_EVICTION_STALLED_CYCLES"/>
+ <value value="42" name="A7XX_PERF_UFC_LOCK_STALLED_CYCLES"/>
+ <value value="43" name="A7XX_PERF_UFC_MISS_LATENCY_CYCLES"/>
+ <value value="44" name="A7XX_PERF_UFC_MISS_LATENCY_SAMPLES"/>
+ <value value="45" name="A7XX_PERF_UFC_UBWC_REQ_STALLED_CYCLES"/>
+ <value value="46" name="A7XX_PERF_UFC_TP_HINT_TAG_MISS"/>
+ <value value="47" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_RDY"/>
+ <value value="48" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_NRDY"/>
+ <value value="49" name="A7XX_PERF_UFC_TP_HINT_IS_FCLEAR"/>
+ <value value="50" name="A7XX_PERF_UFC_TP_HINT_IS_ALPHA0"/>
+ <value value="51" name="A7XX_PERF_UFC_SP_L1_FILTER_HIT"/>
+ <value value="52" name="A7XX_PERF_UFC_SP_L1_FILTER_MISS"/>
+ <value value="53" name="A7XX_PERF_UFC_SP_L1_FILTER_REQUESTS"/>
+ <value value="54" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_RDY"/>
+ <value value="55" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_NRDY"/>
+ <value value="56" name="A7XX_PERF_UFC_TP_L1_TAG_MISS"/>
+ <value value="57" name="A7XX_PERF_UFC_TP_L1_FILTER_REQUESTS"/>
+</enum>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
index 462713401622..7abc08635495 100644
--- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
@@ -21,9 +21,9 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="HLSQ_FLUSH" value="7" variants="A3XX-A4XX"/>
<value name="VIZQUERY_END" value="8" variants="A2XX"/>
<value name="SC_WAIT_WC" value="9" variants="A2XX"/>
- <value name="WRITE_PRIMITIVE_COUNTS" value="9" variants="A6XX"/>
- <value name="START_PRIMITIVE_CTRS" value="11" variants="A6XX"/>
- <value name="STOP_PRIMITIVE_CTRS" value="12" variants="A6XX"/>
+ <value name="WRITE_PRIMITIVE_COUNTS" value="9" variants="A6XX-"/>
+ <value name="START_PRIMITIVE_CTRS" value="11" variants="A6XX-"/>
+ <value name="STOP_PRIMITIVE_CTRS" value="12" variants="A6XX-"/>
<!-- Not sure that these 4 events don't have the same meaning as on A5XX+ -->
<value name="RST_PIX_CNT" value="13" variants="A2XX-A4XX"/>
<value name="RST_VTX_CNT" value="14" variants="A2XX-A4XX"/>
@@ -31,8 +31,8 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="STAT_EVENT" value="16" variants="A2XX-A4XX"/>
<value name="CACHE_FLUSH_AND_INV_TS_EVENT" value="20" variants="A2XX-A4XX"/>
<doc>
- If A6XX_RB_SAMPLE_COUNT_CONTROL.copy is true, writes OQ Z passed
- sample counts to RB_SAMPLE_COUNT_ADDR. This writes to main
+ If A6XX_RB_SAMPLE_COUNTER_CNTL.copy is true, writes OQ Z passed
+ sample counts to RB_SAMPLE_COUNTER_BASE. This writes to main
memory, skipping UCHE.
</doc>
<value name="ZPASS_DONE" value="21"/>
@@ -98,6 +98,13 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="BLIT" value="30" variants="A5XX-"/>
<doc>
+ Flip between the primary and secondary LRZ buffers. This is used
+ for concurrent binning, so that BV can write to one buffer while
+ BR reads from the other.
+ </doc>
+ <value name="LRZ_FLIP_BUFFER" value="36" variants="A7XX"/>
+
+ <doc>
Clears based on GRAS_LRZ_CNTL configuration, could clear
fast-clear buffer or LRZ direction.
LRZ direction is stored at lrz_fc_offset + 0x200, has 1 byte which
@@ -114,6 +121,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="BLIT_OP_FILL_2D" value="39" variants="A5XX-"/>
<value name="BLIT_OP_COPY_2D" value="40" variants="A5XX-A6XX"/>
<value name="UNK_40" value="40" variants="A7XX"/>
+ <value name="LRZ_Q_CACHE_INVALIDATE" value="41" variants="A7XX"/>
<value name="BLIT_OP_SCALE_2D" value="42" variants="A5XX-"/>
<value name="CONTEXT_DONE_2D" value="43" variants="A5XX-"/>
<value name="UNK_2C" value="44" variants="A5XX-"/>
@@ -372,7 +380,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="CP_LOAD_STATE" value="0x30" variants="A3XX"/>
<value name="CP_LOAD_STATE4" value="0x30" variants="A4XX-A5XX"/>
<doc>Conditionally load a IB based on a flag, prefetch enabled</doc>
- <value name="CP_COND_INDIRECT_BUFFER_PFE" value="0x3a"/>
+ <value name="CP_COND_INDIRECT_BUFFER_PFE" value="0x3a" variants="A3XX-A5XX"/>
<doc>Conditionally load a IB based on a flag, prefetch disabled</doc>
<value name="CP_COND_INDIRECT_BUFFER_PFD" value="0x32" variants="A3XX"/>
<doc>Load a buffer with pre-fetch enabled</doc>
@@ -538,7 +546,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="CP_LOAD_STATE6_GEOM" value="0x32" variants="A6XX-"/>
<value name="CP_LOAD_STATE6_FRAG" value="0x34" variants="A6XX-"/>
<!--
- Note: For IBO state (Image/SSBOs) which have shared state across
+ Note: For UAV state (Image/SSBOs) which have shared state across
shader stages, for 3d pipeline CP_LOAD_STATE6 is used. But for
compute shaders, CP_LOAD_STATE6_FRAG is used. Possibly they are
interchangable.
@@ -567,7 +575,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="IN_PREEMPT" value="0x0f" variants="A6XX-"/>
<!-- TODO do these exist on A5xx? -->
- <value name="CP_SCRATCH_WRITE" value="0x4c" variants="A6XX"/>
+ <value name="CP_SCRATCH_WRITE" value="0x4c" variants="A6XX-"/>
<value name="CP_REG_TO_MEM_OFFSET_MEM" value="0x74" variants="A6XX-"/>
<value name="CP_REG_TO_MEM_OFFSET_REG" value="0x72" variants="A6XX-"/>
<value name="CP_WAIT_MEM_GTE" value="0x14" variants="A6XX"/>
@@ -650,6 +658,11 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<doc>Reset various on-chip state used for synchronization</doc>
<value name="CP_RESET_CONTEXT_STATE" value="0x1f" variants="A7XX-"/>
+
+ <doc>Invalidates the "CCHE" introduced on a740</doc>
+ <value name="CP_CCHE_INVALIDATE" value="0x3a" variants="A7XX-"/>
+
+ <value name="CP_SCOPE_CNTL" value="0x6c" variants="A7XX-"/>
</enum>
@@ -792,14 +805,14 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<value name="SB6_GS_SHADER" value="0xb"/>
<value name="SB6_FS_SHADER" value="0xc"/>
<value name="SB6_CS_SHADER" value="0xd"/>
- <value name="SB6_IBO" value="0xe"/>
- <value name="SB6_CS_IBO" value="0xf"/>
+ <value name="SB6_UAV" value="0xe"/>
+ <value name="SB6_CS_UAV" value="0xf"/>
</enum>
<enum name="a6xx_state_type">
<value name="ST6_SHADER" value="0"/>
<value name="ST6_CONSTANTS" value="1"/>
<value name="ST6_UBO" value="2"/>
- <value name="ST6_IBO" value="3"/>
+ <value name="ST6_UAV" value="3"/>
</enum>
<enum name="a6xx_state_src">
<value name="SS6_DIRECT" value="0"/>
@@ -1121,39 +1134,93 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</domain>
+<enum name="a7xx_abs_mask_mode">
+ <value name="ABS_MASK" value="0x1"/>
+ <value name="NO_ABS_MASK" value="0x0"/>
+</enum>
+
<domain name="CP_SET_BIN_DATA5" width="32">
<reg32 offset="0" name="0">
+ <bitfield name="VSC_MASK" low="0" high="15" type="hex">
+ <doc>
+ A mask of bins, starting at VSC_N, whose
+ visibility is OR'd together. A value of 0 is
+ interpreted as 1 (i.e. just use VSC_N for
+ visbility) for backwards compatibility. Only
+ exists on a7xx.
+ </doc>
+ </bitfield>
<!-- equiv to PC_VSTREAM_CONTROL.SIZE on a3xx/a4xx: -->
<bitfield name="VSC_SIZE" low="16" high="21" type="uint"/>
<!-- equiv to PC_VSTREAM_CONTROL.N on a3xx/a4xx: -->
<bitfield name="VSC_N" low="22" high="26" type="uint"/>
+ <bitfield name="ABS_MASK" pos="28" type="a7xx_abs_mask_mode" addvariant="yes">
+ <doc>
+ If this field is 1, VSC_MASK and VSC_N are
+ ignored and instead a new ordinal immediately
+ after specifies the full 32-bit mask of bins
+ to use. The mask is "absolute" instead of
+ relative to VSC_N.
+ </doc>
+ </bitfield>
</reg32>
- <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
- <reg32 offset="1" name="1">
- <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
- <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
- <reg32 offset="3" name="3">
- <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="4" name="4">
- <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
- </reg32>
- <!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
- <reg32 offset="5" name="5">
- <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="6" name="6">
- <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
- </reg32>
- <!--
- a7xx adds a few more addresses to the end of the pkt
- -->
- <reg64 offset="7" name="7"/>
- <reg64 offset="9" name="9"/>
+ <stripe varset="a7xx_abs_mask_mode" variants="NO_ABS_MASK">
+ <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
+ <reg32 offset="1" name="1">
+ <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
+ </reg32>
+ <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
+ <reg32 offset="3" name="3">
+ <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
+ </reg32>
+ <!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
+ <reg32 offset="5" name="5">
+ <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="6" name="6">
+ <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
+ </reg32>
+ <!--
+ a7xx adds a few more addresses to the end of the pkt
+ -->
+ <reg64 offset="7" name="7"/>
+ <reg64 offset="9" name="9"/>
+ </stripe>
+ <stripe varset="a7xx_abs_mask_mode" variants="ABS_MASK">
+ <reg32 offset="1" name="ABS_MASK"/>
+ <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
+ <reg32 offset="2" name="2">
+ <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
+ </reg32>
+ <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
+ <reg32 offset="4" name="4">
+ <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
+ </reg32>
+ <!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
+ <reg32 offset="6" name="6">
+ <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
+ </reg32>
+ <reg32 offset="7" name="7">
+ <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
+ </reg32>
+ <!--
+ a7xx adds a few more addresses to the end of the pkt
+ -->
+ <reg64 offset="8" name="8"/>
+ <reg64 offset="10" name="10"/>
+ </stripe>
</domain>
<domain name="CP_SET_BIN_DATA5_OFFSET" width="32">
@@ -1164,23 +1231,42 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
stream is recorded.
</doc>
<reg32 offset="0" name="0">
+ <bitfield name="VSC_MASK" low="0" high="15" type="hex"/>
<!-- equiv to PC_VSTREAM_CONTROL.SIZE on a3xx/a4xx: -->
<bitfield name="VSC_SIZE" low="16" high="21" type="uint"/>
<!-- equiv to PC_VSTREAM_CONTROL.N on a3xx/a4xx: -->
<bitfield name="VSC_N" low="22" high="26" type="uint"/>
+ <bitfield name="ABS_MASK" pos="28" type="a7xx_abs_mask_mode" addvariant="yes"/>
</reg32>
- <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
- <reg32 offset="1" name="1">
- <bitfield name="BIN_DATA_OFFSET" low="0" high="31" type="uint"/>
- </reg32>
- <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
- <reg32 offset="2" name="2">
- <bitfield name="BIN_SIZE_OFFSET" low="0" high="31" type="uint"/>
- </reg32>
- <!-- BIN_DATA2_ADDR -> VSC_PIPE[p].DATA2_ADDRESS -->
- <reg32 offset="3" name="3">
- <bitfield name="BIN_DATA2_OFFSET" low="0" high="31" type="uint"/>
- </reg32>
+ <stripe varset="a7xx_abs_mask_mode" variants="NO_ABS_MASK">
+ <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
+ <reg32 offset="1" name="1">
+ <bitfield name="BIN_DATA_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
+ <reg32 offset="2" name="2">
+ <bitfield name="BIN_SIZE_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ <!-- BIN_DATA2_ADDR -> VSC_PIPE[p].DATA2_ADDRESS -->
+ <reg32 offset="3" name="3">
+ <bitfield name="BIN_DATA2_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ </stripe>
+ <stripe varset="a7xx_abs_mask_mode" variants="ABS_MASK">
+ <reg32 offset="1" name="ABS_MASK"/>
+ <!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
+ <reg32 offset="2" name="2">
+ <bitfield name="BIN_DATA_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ <!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
+ <reg32 offset="3" name="3">
+ <bitfield name="BIN_SIZE_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ <!-- BIN_DATA2_ADDR -> VSC_PIPE[p].DATA2_ADDRESS -->
+ <reg32 offset="4" name="4">
+ <bitfield name="BIN_DATA2_OFFSET" low="0" high="31" type="uint"/>
+ </reg32>
+ </stripe>
</domain>
<domain name="CP_REG_RMW" width="32">
@@ -1198,6 +1284,9 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</doc>
<reg32 offset="0" name="0">
<bitfield name="DST_REG" low="0" high="17" type="hex"/>
+ <bitfield name="DST_SCRATCH" pos="19" type="boolean" varset="chip" variants="A7XX-"/>
+ <!-- skip implied CP_WAIT_FOR_IDLE + CP_WAIT_FOR_ME -->
+ <bitfield name="SKIP_WAIT_FOR_ME" pos="23" type="boolean" varset="chip" variants="A7XX-"/>
<bitfield name="ROTATE" low="24" high="28" type="uint"/>
<bitfield name="SRC1_ADD" pos="29" type="boolean"/>
<bitfield name="SRC1_IS_REG" pos="30" type="boolean"/>
@@ -1348,6 +1437,8 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="SCRATCH" low="20" high="22" type="uint"/>
<!-- number of registers/dwords copied is CNT + 1. -->
<bitfield name="CNT" low="24" high="26" type="uint"/>
+ <!-- skip implied CP_WAIT_FOR_IDLE + CP_WAIT_FOR_ME -->
+ <bitfield name="SKIP_WAIT_FOR_ME" pos="27" type="boolean" varset="chip" variants="A7XX-"/>
</reg32>
</domain>
@@ -1655,8 +1746,8 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="WRITE_SAMPLE_COUNT" pos="12" type="boolean"/>
<!-- Write sample count at (iova + 16) -->
<bitfield name="SAMPLE_COUNT_END_OFFSET" pos="13" type="boolean"/>
- <!-- *(iova + 8) = *(iova + 16) - *iova -->
- <bitfield name="WRITE_SAMPLE_COUNT_DIFF" pos="14" type="boolean"/>
+ <!-- *(iova + 8) += *(iova + 16) - *iova -->
+ <bitfield name="WRITE_ACCUM_SAMPLE_COUNT_DIFF" pos="14" type="boolean"/>
<!-- Next 4 flags are valid to set only when concurrent binning is enabled -->
<!-- Increment 16b BV counter. Valid only in BV pipe -->
@@ -1670,15 +1761,11 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="WRITE_DST" pos="24" type="event_write_dst" addvariant="yes"/>
<!-- Writes into WRITE_DST from WRITE_SRC. RB_DONE_TS requires WRITE_ENABLED. -->
<bitfield name="WRITE_ENABLED" pos="27" type="boolean"/>
+ <bitfield name="IRQ" pos="31" type="boolean"/>
</reg32>
<stripe varset="event_write_dst" variants="EV_DST_RAM">
- <reg32 offset="1" name="1">
- <bitfield name="ADDR_0_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="ADDR_0_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="1" name="1" type="waddress"/>
<reg32 offset="3" name="3">
<bitfield name="PAYLOAD_0" low="0" high="31"/>
</reg32>
@@ -1773,13 +1860,23 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<domain name="CP_SET_MARKER" width="32" varset="chip" prefix="chip" variants="A6XX-">
<doc>Tell CP the current operation mode, indicates save and restore procedure</doc>
+ <enum name="set_marker_mode">
+ <value value="0" name="SET_RENDER_MODE"/>
+ <!-- IFPC - inter-frame power collapse -->
+ <value value="1" name="SET_IFPC_MODE"/>
+ </enum>
+ <enum name="a6xx_ifpc_mode">
+ <value value="0" name="IFPC_ENABLE"/>
+ <value value="1" name="IFPC_DISABLE"/>
+ </enum>
<enum name="a6xx_marker">
- <value value="1" name="RM6_BYPASS"/>
- <value value="2" name="RM6_BINNING"/>
- <value value="4" name="RM6_GMEM"/>
- <value value="5" name="RM6_ENDVIS"/>
- <value value="6" name="RM6_RESOLVE"/>
- <value value="7" name="RM6_YIELD"/>
+ <value value="1" name="RM6_DIRECT_RENDER"/>
+ <value value="2" name="RM6_BIN_VISIBILITY"/>
+ <value value="3" name="RM6_BIN_DIRECT"/>
+ <value value="4" name="RM6_BIN_RENDER_START"/>
+ <value value="5" name="RM6_BIN_END_OF_DRAWS"/>
+ <value value="6" name="RM6_BIN_RESOLVE"/>
+ <value value="7" name="RM6_BIN_RENDER_END"/>
<value value="8" name="RM6_COMPUTE"/>
<value value="0xc" name="RM6_BLIT2DSCALE"/> <!-- no-op (at least on current sqe fw) -->
@@ -1789,23 +1886,40 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
-->
<value value="0xd" name="RM6_IB1LIST_START"/>
<value value="0xe" name="RM6_IB1LIST_END"/>
- <!-- IFPC - inter-frame power collapse -->
- <value value="0x100" name="RM6_IFPC_ENABLE"/>
- <value value="0x101" name="RM6_IFPC_DISABLE"/>
</enum>
<reg32 offset="0" name="0">
+ <!-- if b8 is set, the low bits are interpreted differently (and b4 ignored) -->
+ <bitfield name="MARKER_MODE" pos="8" type="set_marker_mode" addvariant="yes"/>
+
+ <bitfield name="MODE" low="0" high="3" type="a6xx_marker" varset="set_marker_mode" variants="SET_RENDER_MODE"/>
+ <!-- used by preemption to determine if GMEM needs to be saved or not -->
+ <bitfield name="USES_GMEM" pos="4" type="boolean" varset="set_marker_mode" variants="SET_RENDER_MODE"/>
+
+ <bitfield name="IFPC_MODE" pos="0" type="a6xx_ifpc_mode" varset="set_marker_mode" variants="SET_IFPC_MODE"/>
+
<!--
- NOTE: blob driver and some versions of freedreno/turnip set
- b4, which is unused (at least by current sqe fw), but interferes
- with parsing if we extend the size of the bitfield to include
- b8 (only sent by kernel mode driver). Really, the way the
- parsing works in the firmware, only b0-b3 are considered, but
- if b8 is set, the low bits are interpreted differently. To
- model this, without getting confused by spurious b4, this is
- described as two overlapping bitfields:
- -->
- <bitfield name="MODE" low="0" high="8" type="a6xx_marker"/>
- <bitfield name="MARKER" low="0" high="3" type="a6xx_marker"/>
+ CP_SET_MARKER is used with these bits to create a
+ critical section around a workaround for ray tracing.
+ The workaround happens after BVH building, and appears
+ to invalidate the RTU's BVH node cache. It makes sure
+ that only one of BR/BV/LPAC is executing the
+ workaround at a time, and no draws using RT on BV/LPAC
+ are executing while the workaround is executed on BR (or
+ vice versa, that no draws on BV/BR using RT are executed
+ while the workaround executes on LPAC), by
+ hooking subsequent CP_EVENT_WRITE/CP_DRAW_*/CP_EXEC_CS.
+ The blob usage is:
+
+ CP_SET_MARKER(RT_WA_START)
+ ... workaround here ...
+ CP_SET_MARKER(RT_WA_END)
+ ...
+ CP_SET_MARKER(SHADER_USES_RT)
+ CP_DRAW_INDX(...) or CP_EXEC_CS(...)
+ -->
+ <bitfield name="SHADER_USES_RT" pos="9" type="boolean" variants="A7XX-"/>
+ <bitfield name="RT_WA_START" pos="10" type="boolean" variants="A7XX-"/>
+ <bitfield name="RT_WA_END" pos="11" type="boolean" variants="A7XX-"/>
</reg32>
</domain>
@@ -1832,9 +1946,9 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
If concurrent binning is disabled then BR also does binning so it will also
write the "real" registers in BR.
-->
- <value value="8" name="DRAW_STRM_ADDRESS"/>
- <value value="9" name="DRAW_STRM_SIZE_ADDRESS"/>
- <value value="10" name="PRIM_STRM_ADDRESS"/>
+ <value value="8" name="VSC_PIPE_DATA_DRAW_BASE"/>
+ <value value="9" name="VSC_SIZE_BASE"/>
+ <value value="10" name="VSC_PIPE_DATA_PRIM_BASE"/>
<value value="11" name="UNK_STRM_ADDRESS"/>
<value value="12" name="UNK_STRM_SIZE_ADDRESS"/>
@@ -1935,11 +2049,11 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
a bitmask of which modes pass the test.
-->
- <!-- RM6_BINNING -->
+ <!-- RM6_BIN_VISIBILITY -->
<bitfield name="BINNING" pos="25" variants="RENDER_MODE" type="boolean"/>
<!-- all others -->
<bitfield name="GMEM" pos="26" variants="RENDER_MODE" type="boolean"/>
- <!-- RM6_BYPASS -->
+ <!-- RM6_DIRECT_RENDER -->
<bitfield name="SYSMEM" pos="27" variants="RENDER_MODE" type="boolean"/>
<bitfield name="BV" pos="25" variants="THREAD_MODE" type="boolean"/>
@@ -2014,10 +2128,10 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<domain name="CP_SET_AMBLE" width="32">
<doc>
- Used by the userspace and kernel drivers to set various IB's
- which are executed during context save/restore for handling
- state that isn't restored by the context switch routine itself.
- </doc>
+ Used by the userspace and kernel drivers to set various IB's
+ which are executed during context save/restore for handling
+ state that isn't restored by the context switch routine itself.
+ </doc>
<enum name="amble_type">
<value name="PREAMBLE_AMBLE_TYPE" value="0">
<doc>Executed unconditionally when switching back to the context.</doc>
@@ -2087,12 +2201,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<value name="UNK_EVENT_WRITE" value="0x4"/>
<doc>
Tracks GRAS_LRZ_CNTL::GREATER, GRAS_LRZ_CNTL::DIR, and
- GRAS_LRZ_DEPTH_VIEW with previous values, and if one of
+ GRAS_LRZ_VIEW_INFO with previous values, and if one of
the following is true:
- GRAS_LRZ_CNTL::GREATER has changed
- GRAS_LRZ_CNTL::DIR has changed, the old value is not
CUR_DIR_GE, and the new value is not CUR_DIR_DISABLED
- - GRAS_LRZ_DEPTH_VIEW has changed
+ - GRAS_LRZ_VIEW_INFO has changed
then it does a LRZ_FLUSH with GRAS_LRZ_CNTL::ENABLE
forced to 1.
Only exists in a650_sqe.fw.
@@ -2207,7 +2321,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<domain name="CP_MEM_TO_SCRATCH_MEM" width="32">
<doc>
- Best guess is that it is a faster way to fetch all the VSC_STATE registers
+ Best guess is that it is a faster way to fetch all the VSC_CHANNEL_VISIBILITY registers
and keep them in a local scratch memory instead of fetching every time
when skipping IBs.
</doc>
@@ -2260,6 +2374,16 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</domain>
+<domain name="CP_SCOPE_CNTL" width="32">
+ <enum name="cp_scope">
+ <value value="0" name="INTERRUPTS"/>
+ </enum>
+ <reg32 offset="0" name="0">
+ <bitfield name="DISABLE_PREEMPTION" pos="0" type="boolean"/>
+ <bitfield low="28" high="31" name="SCOPE" type="cp_scope"/>
+ </reg32>
+</domain>
+
<domain name="CP_INDIRECT_BUFFER" width="32" varset="chip" prefix="chip" variants="A5XX-">
<reg64 offset="0" name="IB_BASE" type="address"/>
<reg32 offset="2" name="2">
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
index d2c8c46bb041..4e5ac0f25dea 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
@@ -26,6 +26,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00028" name="CTRL_1"/>
<reg32 offset="0x0002c" name="CTRL_2"/>
<reg32 offset="0x00030" name="CTRL_3"/>
+ <reg32 offset="0x001b0" name="CTRL_5"/>
<reg32 offset="0x00034" name="LANE_CFG0"/>
<reg32 offset="0x00038" name="LANE_CFG1"/>
<reg32 offset="0x0003c" name="PLL_CNTRL"/>
@@ -191,11 +192,24 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x01b0" name="COMMON_STATUS_ONE"/>
<reg32 offset="0x01b4" name="COMMON_STATUS_TWO"/>
<reg32 offset="0x01b8" name="BAND_SEL_CAL"/>
+ <!--
+ Starting with SM8750, offset moved from 0x01bc to 0x01cc, however
+ we keep only one register map. That's not a problem, so far,
+ because this register is not used. The register map should be split
+ once it is going to be used. Comment out the code to prevent
+ any misuse due to the change in the offset.
<reg32 offset="0x01bc" name="ICODE_ACCUM_STATUS_LOW"/>
+ <reg32 offset="0x01cc" name="ICODE_ACCUM_STATUS_LOW"/>
+ -->
<reg32 offset="0x01c0" name="ICODE_ACCUM_STATUS_HIGH"/>
<reg32 offset="0x01c4" name="FD_OUT_LOW"/>
<reg32 offset="0x01c8" name="FD_OUT_HIGH"/>
+ <!--
+ Starting with SM8750, offset moved from 0x01cc to 0x01bc, however
+ we keep only one register map. See above comment.
<reg32 offset="0x01cc" name="ALOG_OBSV_BUS_STATUS_1"/>
+ <reg32 offset="0x01bc" name="ALOG_OBSV_BUS_STATUS_1"/>
+ -->
<reg32 offset="0x01d0" name="PLL_MISC_CONFIG"/>
<reg32 offset="0x01d4" name="FLL_CONFIG"/>
<reg32 offset="0x01d8" name="FLL_FREQ_ACQ_TIME"/>
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index c183b1112bc4..0b756da2fec2 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -91,20 +91,15 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
static struct drm_framebuffer *
mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info;
-
- info = drm_get_format_info(dev, mode_cmd);
- if (!info)
- return ERR_PTR(-EINVAL);
-
if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n");
return ERR_PTR(-EINVAL);
}
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
}
static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index e5d37eee4301..e97e39abf3a2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1839,7 +1839,7 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
backlight = nv_connector->backlight;
if (backlight && backlight->uses_dpcd)
drm_edp_backlight_enable(&nv_connector->aux, &backlight->edp_info,
- (u16)backlight->dev->props.brightness);
+ backlight->dev->props.brightness);
#endif
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 9aae26eb7d8f..4a75d146a171 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -245,7 +245,7 @@ nv50_backlight_init(struct nouveau_backlight *bl,
if (nv_conn->type == DCB_CONNECTOR_eDP) {
int ret;
- u16 current_level;
+ u32 current_level;
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
u8 current_mode;
@@ -261,8 +261,9 @@ nv50_backlight_init(struct nouveau_backlight *bl,
NV_DEBUG(drm, "DPCD backlight controls supported on %s\n",
nv_conn->base.name);
- ret = drm_edp_backlight_init(&nv_conn->aux, &bl->edp_info, 0, edp_dpcd,
- &current_level, &current_mode);
+ ret = drm_edp_backlight_init(&nv_conn->aux, &bl->edp_info,
+ 0, 0, edp_dpcd,
+ &current_level, &current_mode, false);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index add006fc8d81..e1e542126310 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -295,7 +295,8 @@ nouveau_framebuffer_new(struct drm_device *dev,
kind = nvbo->kind;
}
- info = drm_get_format_info(dev, mode_cmd);
+ info = drm_get_format_info(dev, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
for (i = 0; i < info->num_planes; i++) {
height = drm_format_info_plane_height(info,
@@ -320,7 +321,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd);
fb->obj[0] = gem;
ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
@@ -332,6 +333,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_framebuffer *fb;
@@ -495,7 +497,7 @@ nouveau_display_hpd_work(struct work_struct *work)
if (first_changed_connector)
drm_connector_put(first_changed_connector);
- pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
noop:
pm_runtime_put_autosuspend(dev->dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 1f506f8b289c..e45f211501f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -67,5 +67,6 @@ nouveau_framebuffer_get_layout(struct drm_framebuffer *fb, uint32_t *tile_mode,
struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *, struct drm_file *,
+ const struct drm_format_info *,
const struct drm_mode_fb_cmd2 *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index 41b7c608c905..edbbda78bac9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -189,7 +189,7 @@ nouveau_exec_job_timeout(struct nouveau_job *job)
NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
chan->chid);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static const struct nouveau_job_ops nouveau_exec_job_ops = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 6ded8c2b6d3b..9f345a008717 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -38,22 +38,16 @@
static const struct dma_fence_ops nouveau_fence_ops_uevent;
static const struct dma_fence_ops nouveau_fence_ops_legacy;
-static inline struct nouveau_fence *
-from_fence(struct dma_fence *fence)
-{
- return container_of(fence, struct nouveau_fence, base);
-}
-
static inline struct nouveau_fence_chan *
nouveau_fctx(struct nouveau_fence *fence)
{
return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
}
-static int
+static bool
nouveau_fence_signal(struct nouveau_fence *fence)
{
- int drop = 0;
+ bool drop = false;
dma_fence_signal_locked(&fence->base);
list_del(&fence->head);
@@ -63,7 +57,7 @@ nouveau_fence_signal(struct nouveau_fence *fence)
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
if (!--fctx->notify_ref)
- drop = 1;
+ drop = true;
}
dma_fence_put(&fence->base);
@@ -77,19 +71,17 @@ nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
fence->ops != &nouveau_fence_ops_uevent)
return NULL;
- return from_fence(fence);
+ return to_nouveau_fence(fence);
}
void
nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
{
- struct nouveau_fence *fence;
+ struct nouveau_fence *fence, *tmp;
unsigned long flags;
spin_lock_irqsave(&fctx->lock, flags);
- while (!list_empty(&fctx->pending)) {
- fence = list_entry(fctx->pending.next, typeof(*fence), head);
-
+ list_for_each_entry_safe(fence, tmp, &fctx->pending, head) {
if (error && !dma_fence_is_signaled_locked(&fence->base))
dma_fence_set_error(&fence->base, error);
@@ -127,23 +119,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
}
-static int
+static void
nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
- struct nouveau_fence *fence;
- int drop = 0;
+ struct nouveau_fence *fence, *tmp;
+ bool drop = false;
u32 seq = fctx->read(chan);
- while (!list_empty(&fctx->pending)) {
- fence = list_entry(fctx->pending.next, typeof(*fence), head);
-
+ list_for_each_entry_safe(fence, tmp, &fctx->pending, head) {
if ((int)(seq - fence->base.seqno) < 0)
break;
- drop |= nouveau_fence_signal(fence);
+ if (nouveau_fence_signal(fence))
+ drop = true;
}
- return drop;
+ if (drop)
+ nvif_event_block(&fctx->event);
}
static void
@@ -151,22 +143,16 @@ nouveau_fence_uevent_work(struct work_struct *work)
{
struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
uevent_work);
+ struct nouveau_channel *chan;
+ struct nouveau_fence *fence;
unsigned long flags;
- int drop = 0;
spin_lock_irqsave(&fctx->lock, flags);
- if (!list_empty(&fctx->pending)) {
- struct nouveau_fence *fence;
- struct nouveau_channel *chan;
-
- fence = list_entry(fctx->pending.next, typeof(*fence), head);
+ fence = list_first_entry_or_null(&fctx->pending, typeof(*fence), head);
+ if (fence) {
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
- if (nouveau_fence_update(chan, fctx))
- drop = 1;
+ nouveau_fence_update(chan, fctx);
}
- if (drop)
- nvif_event_block(&fctx->event);
-
spin_unlock_irqrestore(&fctx->lock, flags);
}
@@ -246,9 +232,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
return -ENODEV;
}
- if (nouveau_fence_update(chan, fctx))
- nvif_event_block(&fctx->event);
-
+ nouveau_fence_update(chan, fctx);
list_add_tail(&fence->head, &fctx->pending);
spin_unlock_irq(&fctx->lock);
}
@@ -256,31 +240,44 @@ nouveau_fence_emit(struct nouveau_fence *fence)
return ret;
}
+void
+nouveau_fence_cancel(struct nouveau_fence *fence)
+{
+ struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fctx->lock, flags);
+ if (!dma_fence_is_signaled_locked(&fence->base)) {
+ dma_fence_set_error(&fence->base, -ECANCELED);
+ if (nouveau_fence_signal(fence))
+ nvif_event_block(&fctx->event);
+ }
+ spin_unlock_irqrestore(&fctx->lock, flags);
+}
+
bool
nouveau_fence_done(struct nouveau_fence *fence)
{
- if (fence->base.ops == &nouveau_fence_ops_legacy ||
- fence->base.ops == &nouveau_fence_ops_uevent) {
- struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
- struct nouveau_channel *chan;
- unsigned long flags;
+ struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+ struct nouveau_channel *chan;
+ unsigned long flags;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
- return true;
+ if (dma_fence_is_signaled(&fence->base))
+ return true;
+
+ spin_lock_irqsave(&fctx->lock, flags);
+ chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
+ if (chan)
+ nouveau_fence_update(chan, fctx);
+ spin_unlock_irqrestore(&fctx->lock, flags);
- spin_lock_irqsave(&fctx->lock, flags);
- chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
- if (chan && nouveau_fence_update(chan, fctx))
- nvif_event_block(&fctx->event);
- spin_unlock_irqrestore(&fctx->lock, flags);
- }
return dma_fence_is_signaled(&fence->base);
}
static long
nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
unsigned long t = jiffies, timeout = t + wait;
@@ -460,7 +457,7 @@ static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
return !fctx->dead ? fctx->name : "dead channel";
@@ -474,7 +471,7 @@ static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
*/
static bool nouveau_fence_is_signaled(struct dma_fence *f)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
struct nouveau_channel *chan;
bool ret = false;
@@ -490,7 +487,7 @@ static bool nouveau_fence_is_signaled(struct dma_fence *f)
static bool nouveau_fence_no_signaling(struct dma_fence *f)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
/*
* caller should have a reference on the fence,
@@ -515,7 +512,7 @@ static bool nouveau_fence_no_signaling(struct dma_fence *f)
static void nouveau_fence_release(struct dma_fence *f)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
@@ -533,7 +530,7 @@ static const struct dma_fence_ops nouveau_fence_ops_legacy = {
static bool nouveau_fence_enable_signaling(struct dma_fence *f)
{
- struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence *fence = to_nouveau_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
bool ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 6a983dd9f7b9..9957a919bd38 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -17,12 +17,19 @@ struct nouveau_fence {
unsigned long timeout;
};
+static inline struct nouveau_fence *
+to_nouveau_fence(struct dma_fence *fence)
+{
+ return container_of(fence, struct nouveau_fence, base);
+}
+
int nouveau_fence_create(struct nouveau_fence **, struct nouveau_channel *);
int nouveau_fence_new(struct nouveau_fence **, struct nouveau_channel *);
void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *);
bool nouveau_fence_done(struct nouveau_fence *);
+void nouveau_fence_cancel(struct nouveau_fence *fence);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index d326e55d2d24..0cc0bc9f9952 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -11,6 +11,7 @@
#include "nouveau_exec.h"
#include "nouveau_abi16.h"
#include "nouveau_sched.h"
+#include "nouveau_chan.h"
#define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000
@@ -87,7 +88,8 @@ nouveau_job_init(struct nouveau_job *job,
}
ret = drm_sched_job_init(&job->base, &sched->entity,
- args->credits, NULL);
+ args->credits, NULL,
+ job->file_priv->client_id);
if (ret)
goto err_free_chains;
@@ -120,11 +122,9 @@ nouveau_job_done(struct nouveau_job *job)
{
struct nouveau_sched *sched = job->sched;
- spin_lock(&sched->job.list.lock);
+ spin_lock(&sched->job_list.lock);
list_del(&job->entry);
- spin_unlock(&sched->job.list.lock);
-
- wake_up(&sched->job.wq);
+ spin_unlock(&sched->job_list.lock);
}
void
@@ -305,9 +305,9 @@ nouveau_job_submit(struct nouveau_job *job)
}
/* Submit was successful; add the job to the schedulers job list. */
- spin_lock(&sched->job.list.lock);
- list_add(&job->entry, &sched->job.list.head);
- spin_unlock(&sched->job.list.lock);
+ spin_lock(&sched->job_list.lock);
+ list_add(&job->entry, &sched->job_list.head);
+ spin_unlock(&sched->job_list.lock);
drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
@@ -370,7 +370,7 @@ nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
{
struct drm_gpu_scheduler *sched = sched_job->sched;
struct nouveau_job *job = to_nouveau_job(sched_job);
- enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_NOMINAL;
+ enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_RESET;
drm_sched_stop(sched, sched_job);
@@ -392,10 +392,23 @@ nouveau_sched_free_job(struct drm_sched_job *sched_job)
nouveau_job_fini(job);
}
+static void
+nouveau_sched_cancel_job(struct drm_sched_job *sched_job)
+{
+ struct nouveau_fence *fence;
+ struct nouveau_job *job;
+
+ job = to_nouveau_job(sched_job);
+ fence = to_nouveau_fence(job->done_fence);
+
+ nouveau_fence_cancel(fence);
+}
+
static const struct drm_sched_backend_ops nouveau_sched_ops = {
.run_job = nouveau_sched_run_job,
.timedout_job = nouveau_sched_timedout_job,
.free_job = nouveau_sched_free_job,
+ .cancel_job = nouveau_sched_cancel_job,
};
static int
@@ -445,9 +458,8 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
goto fail_sched;
mutex_init(&sched->mutex);
- spin_lock_init(&sched->job.list.lock);
- INIT_LIST_HEAD(&sched->job.list.head);
- init_waitqueue_head(&sched->job.wq);
+ spin_lock_init(&sched->job_list.lock);
+ INIT_LIST_HEAD(&sched->job_list.head);
return 0;
@@ -481,16 +493,12 @@ nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
return 0;
}
-
static void
nouveau_sched_fini(struct nouveau_sched *sched)
{
struct drm_gpu_scheduler *drm_sched = &sched->base;
struct drm_sched_entity *entity = &sched->entity;
- rmb(); /* for list_empty to work without lock */
- wait_event(sched->job.wq, list_empty(&sched->job.list.head));
-
drm_sched_entity_fini(entity);
drm_sched_fini(drm_sched);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h
index 20cd1da8db73..b98c3f0bef30 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.h
@@ -103,12 +103,9 @@ struct nouveau_sched {
struct mutex mutex;
struct {
- struct {
- struct list_head head;
- spinlock_t lock;
- } list;
- struct wait_queue_head wq;
- } job;
+ struct list_head head;
+ spinlock_t lock;
+ } job_list;
};
int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 48f105239f42..ddfc46bc1b3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1019,8 +1019,8 @@ bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
u64 end = addr + range;
again:
- spin_lock(&sched->job.list.lock);
- list_for_each_entry(__job, &sched->job.list.head, entry) {
+ spin_lock(&sched->job_list.lock);
+ list_for_each_entry(__job, &sched->job_list.head, entry) {
struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job);
list_for_each_op(op, &bind_job->ops) {
@@ -1030,7 +1030,7 @@ again:
if (!(end <= op_addr || addr >= op_end)) {
nouveau_uvmm_bind_job_get(bind_job);
- spin_unlock(&sched->job.list.lock);
+ spin_unlock(&sched->job_list.lock);
wait_for_completion(&bind_job->complete);
nouveau_uvmm_bind_job_put(bind_job);
goto again;
@@ -1038,7 +1038,7 @@ again:
}
}
}
- spin_unlock(&sched->job.list.lock);
+ spin_unlock(&sched->job_list.lock);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index 4e09985424b6..e5bbd8563007 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -104,7 +104,7 @@ nvkm_chan_cctx_get(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_c
if (cctx) {
refcount_inc(&cctx->refs);
*pcctx = cctx;
- mutex_unlock(&chan->cgrp->mutex);
+ mutex_unlock(&cgrp->mutex);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
index 7e9e2d3564da..6e63df816d85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
@@ -648,7 +648,7 @@ r535_conn_new(struct nvkm_disp *disp, u32 id)
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, sizeof(*ctrl));
if (IS_ERR(ctrl))
- return (void *)ctrl;
+ return ERR_CAST(ctrl);
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(id);
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 533f70e8a4a6..cf055815077c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -524,7 +524,7 @@ static void dispc_save_context(struct dispc_device *dispc)
DSSDBG("context saved\n");
}
-static void dispc_restore_context(struct dispc_device *dispc)
+static noinline_for_stack void dispc_restore_context(struct dispc_device *dispc)
{
int i, j;
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 6eff97a09160..9f86db774c39 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -562,7 +562,6 @@ static const struct drm_bridge_funcs dpi_bridge_funcs = {
static void dpi_bridge_init(struct dpi_data *dpi)
{
- dpi->bridge.funcs = &dpi_bridge_funcs;
dpi->bridge.of_node = dpi->pdev->dev.of_node;
dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
@@ -707,9 +706,9 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
u32 datalines;
int r;
- dpi = devm_kzalloc(&pdev->dev, sizeof(*dpi), GFP_KERNEL);
- if (!dpi)
- return -ENOMEM;
+ dpi = devm_drm_bridge_alloc(&pdev->dev, struct dpi_data, bridge, &dpi_bridge_funcs);
+ if (IS_ERR(dpi))
+ return PTR_ERR(dpi);
ep = of_graph_get_next_port_endpoint(port, NULL);
if (!ep)
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 91ee63bfe0bc..b129e5a8d791 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -4701,7 +4701,6 @@ static const struct drm_bridge_funcs dsi_bridge_funcs = {
static void dsi_bridge_init(struct dsi_data *dsi)
{
- dsi->bridge.funcs = &dsi_bridge_funcs;
dsi->bridge.of_node = dsi->host.dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
@@ -4894,9 +4893,9 @@ static int dsi_probe(struct platform_device *pdev)
unsigned int i;
int r;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct dsi_data, bridge, &dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dsi->dev = dev;
dev_set_drvdata(dev, dsi);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index a3b22952fdc3..3cd612af2449 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -505,7 +505,6 @@ static const struct drm_bridge_funcs hdmi4_bridge_funcs = {
static void hdmi4_bridge_init(struct omap_hdmi *hdmi)
{
- hdmi->bridge.funcs = &hdmi4_bridge_funcs;
hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
@@ -761,9 +760,9 @@ static int hdmi4_probe(struct platform_device *pdev)
int irq;
int r;
- hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
+ hdmi = devm_drm_bridge_alloc(&pdev->dev, struct omap_hdmi, bridge, &hdmi4_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
hdmi->pdev = pdev;
@@ -774,25 +773,24 @@ static int hdmi4_probe(struct platform_device *pdev)
r = hdmi4_probe_of(hdmi);
if (r)
- goto err_free;
+ return r;
r = hdmi_wp_init(pdev, &hdmi->wp, 4);
if (r)
- goto err_free;
+ return r;
r = hdmi_phy_init(pdev, &hdmi->phy, 4);
if (r)
- goto err_free;
+ return r;
r = hdmi4_core_init(pdev, &hdmi->core);
if (r)
- goto err_free;
+ return r;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
- r = -ENODEV;
- goto err_free;
+ return -ENODEV;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -800,7 +798,7 @@ static int hdmi4_probe(struct platform_device *pdev)
IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err_free;
+ return r;
}
hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda");
@@ -808,7 +806,7 @@ static int hdmi4_probe(struct platform_device *pdev)
r = PTR_ERR(hdmi->vdda_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDA regulator\n");
- goto err_free;
+ return r;
}
pm_runtime_enable(&pdev->dev);
@@ -827,8 +825,6 @@ err_uninit_output:
hdmi4_uninit_output(hdmi);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
-err_free:
- kfree(hdmi);
return r;
}
@@ -841,8 +837,6 @@ static void hdmi4_remove(struct platform_device *pdev)
hdmi4_uninit_output(hdmi);
pm_runtime_disable(&pdev->dev);
-
- kfree(hdmi);
}
static const struct of_device_id hdmi_of_match[] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 0c98444d39a9..5636b3dfec1c 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -480,7 +480,6 @@ static const struct drm_bridge_funcs hdmi5_bridge_funcs = {
static void hdmi5_bridge_init(struct omap_hdmi *hdmi)
{
- hdmi->bridge.funcs = &hdmi5_bridge_funcs;
hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
@@ -727,9 +726,9 @@ static int hdmi5_probe(struct platform_device *pdev)
int irq;
int r;
- hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
+ hdmi = devm_drm_bridge_alloc(&pdev->dev, struct omap_hdmi, bridge, &hdmi5_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
hdmi->pdev = pdev;
@@ -740,25 +739,24 @@ static int hdmi5_probe(struct platform_device *pdev)
r = hdmi5_probe_of(hdmi);
if (r)
- goto err_free;
+ return r;
r = hdmi_wp_init(pdev, &hdmi->wp, 5);
if (r)
- goto err_free;
+ return r;
r = hdmi_phy_init(pdev, &hdmi->phy, 5);
if (r)
- goto err_free;
+ return r;
r = hdmi5_core_init(pdev, &hdmi->core);
if (r)
- goto err_free;
+ return r;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
- r = -ENODEV;
- goto err_free;
+ return -ENODEV;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -766,7 +764,7 @@ static int hdmi5_probe(struct platform_device *pdev)
IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err_free;
+ return r;
}
hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda");
@@ -774,7 +772,7 @@ static int hdmi5_probe(struct platform_device *pdev)
r = PTR_ERR(hdmi->vdda_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDA regulator\n");
- goto err_free;
+ return r;
}
pm_runtime_enable(&pdev->dev);
@@ -793,8 +791,6 @@ err_uninit_output:
hdmi5_uninit_output(hdmi);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
-err_free:
- kfree(hdmi);
return r;
}
@@ -807,8 +803,6 @@ static void hdmi5_remove(struct platform_device *pdev)
hdmi5_uninit_output(hdmi);
pm_runtime_disable(&pdev->dev);
-
- kfree(hdmi);
}
static const struct of_device_id hdmi_of_match[] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index e78826e4b560..df4cbc683e2c 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -284,7 +284,6 @@ static const struct drm_bridge_funcs sdi_bridge_funcs = {
static void sdi_bridge_init(struct sdi_device *sdi)
{
- sdi->bridge.funcs = &sdi_bridge_funcs;
sdi->bridge.of_node = sdi->pdev->dev.of_node;
sdi->bridge.type = DRM_MODE_CONNECTOR_LVDS;
@@ -344,21 +343,19 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
u32 datapairs;
int r;
- sdi = kzalloc(sizeof(*sdi), GFP_KERNEL);
- if (!sdi)
- return -ENOMEM;
+ sdi = devm_drm_bridge_alloc(&pdev->dev, struct sdi_device, bridge, &sdi_bridge_funcs);
+ if (IS_ERR(sdi))
+ return PTR_ERR(sdi);
ep = of_graph_get_next_port_endpoint(port, NULL);
- if (!ep) {
- r = 0;
- goto err_free;
- }
+ if (!ep)
+ return 0;
r = of_property_read_u32(ep, "datapairs", &datapairs);
of_node_put(ep);
if (r) {
DSSERR("failed to parse datapairs\n");
- goto err_free;
+ return r;
}
sdi->datapairs = datapairs;
@@ -372,19 +369,14 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
r = PTR_ERR(sdi->vdds_sdi_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDS_SDI regulator\n");
- goto err_free;
+ return r;
}
r = sdi_init_output(sdi);
if (r)
- goto err_free;
+ return r;
return 0;
-
-err_free:
- kfree(sdi);
-
- return r;
}
void sdi_uninit_port(struct device_node *port)
@@ -395,5 +387,4 @@ void sdi_uninit_port(struct device_node *port)
return;
sdi_uninit_output(sdi);
- kfree(sdi);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 50349518eda1..9b5d53dc361e 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -664,7 +664,6 @@ static const struct drm_bridge_funcs venc_bridge_funcs = {
static void venc_bridge_init(struct venc_device *venc)
{
- venc->bridge.funcs = &venc_bridge_funcs;
venc->bridge.of_node = venc->pdev->dev.of_node;
venc->bridge.ops = DRM_BRIDGE_OP_MODES;
venc->bridge.type = DRM_MODE_CONNECTOR_SVIDEO;
@@ -809,9 +808,9 @@ static int venc_probe(struct platform_device *pdev)
struct venc_device *venc;
int r;
- venc = kzalloc(sizeof(*venc), GFP_KERNEL);
- if (!venc)
- return -ENOMEM;
+ venc = devm_drm_bridge_alloc(&pdev->dev, struct venc_device, bridge, &venc_bridge_funcs);
+ if (IS_ERR(venc))
+ return PTR_ERR(venc);
venc->pdev = pdev;
@@ -824,26 +823,24 @@ static int venc_probe(struct platform_device *pdev)
venc->config = &venc_config_pal_trm;
venc->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(venc->base)) {
- r = PTR_ERR(venc->base);
- goto err_free;
- }
+ if (IS_ERR(venc->base))
+ return PTR_ERR(venc->base);
venc->vdda_dac_reg = devm_regulator_get(&pdev->dev, "vdda");
if (IS_ERR(venc->vdda_dac_reg)) {
r = PTR_ERR(venc->vdda_dac_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDA_DAC regulator\n");
- goto err_free;
+ return r;
}
r = venc_get_clocks(venc);
if (r)
- goto err_free;
+ return r;
r = venc_probe_of(venc);
if (r)
- goto err_free;
+ return r;
pm_runtime_enable(&pdev->dev);
@@ -861,8 +858,6 @@ err_uninit_output:
venc_uninit_output(venc);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
-err_free:
- kfree(venc);
return r;
}
@@ -875,8 +870,6 @@ static void venc_remove(struct platform_device *pdev)
venc_uninit_output(venc);
pm_runtime_disable(&pdev->dev);
-
- kfree(venc);
}
static __maybe_unused int venc_runtime_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 449d521c78fe..30c81e2e5d6b 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -335,10 +335,9 @@ void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
#endif
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
- struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
+ struct drm_file *file, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info = drm_get_format_info(dev,
- mode_cmd);
unsigned int num_planes = info->num_planes;
struct drm_gem_object *bos[4];
struct drm_framebuffer *fb;
@@ -378,7 +377,8 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
- format = drm_get_format_info(dev, mode_cmd);
+ format = drm_get_format_info(dev, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (formats[i] == mode_cmd->pixel_format)
@@ -440,7 +440,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
plane->dma_addr = 0;
}
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
if (ret) {
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h
index b75f0b5ef1d8..0873f953cf1d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.h
+++ b/drivers/gpu/drm/omapdrm/omap_fb.h
@@ -20,7 +20,8 @@ struct omap_overlay_info;
struct seq_file;
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
- struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
+ struct drm_file *file, const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
int omap_framebuffer_pin(struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index cfebb08e8a62..09b9f7ff9340 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -193,6 +193,16 @@ config DRM_PANEL_HIMAX_HX83112A
Say Y here if you want to enable support for Himax HX83112A-based
display panels, such as the one found in the Fairphone 4 smartphone.
+config DRM_PANEL_HIMAX_HX83112B
+ tristate "Himax HX83112B-based DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_KMS_HELPER
+ help
+ Say Y here if you want to enable support for Himax HX83112B-based
+ display panels, such as the one found in the Fairphone 3 smartphone.
+
config DRM_PANEL_HIMAX_HX8394
tristate "HIMAX HX8394 MIPI-DSI LCD panels"
depends on OF
@@ -647,6 +657,32 @@ config DRM_PANEL_RAYDIUM_RM69380
This panel controller can be found in the Lenovo Xiaoxin Pad Pro 2021
in combination with an EDO OLED panel.
+config DRM_PANEL_RENESAS_R61307
+ tristate "Renesas R61307 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for KOE tx13d100vm0eaa
+ IPS-LCD module with Renesas R69328 IC. The panel has a 1024x768
+ resolution and uses 24 bit RGB per pixel.
+
+ This panel controller can be found in LG Optimus Vu P895 smartphone
+ in combination with LCD panel.
+
+config DRM_PANEL_RENESAS_R69328
+ tristate "Renesas R69328 720x1280 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for JDI dx12d100vm0eaa
+ IPS-LCD module with Renesas R69328 IC. The panel has a 720x1280
+ resolution and uses 24 bit RGB per pixel.
+
+ This panel controller can be found in LG Optimus 4X P895 smartphone
+ in combination with LCD panel.
+
config DRM_PANEL_RONBO_RB070D30
tristate "Ronbo Electronics RB070D30 panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 714cbac830e3..957555b49996 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8279) += panel-himax-hx8279.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83102) += panel-himax-hx83102.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o
+obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112B) += panel-himax-hx83112b.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o
@@ -65,6 +66,8 @@ obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM67200) += panel-raydium-rm67200.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM692E5) += panel-raydium-rm692e5.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM69380) += panel-raydium-rm69380.o
+obj-$(CONFIG_DRM_PANEL_RENESAS_R61307) += panel-renesas-r61307.o
+obj-$(CONFIG_DRM_PANEL_RENESAS_R69328) += panel-renesas-r69328.o
obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS581VF01) += panel-samsung-ams581vf01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS639RQ08) += panel-samsung-ams639rq08.o
diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
index df746baae301..4a8560b4b899 100644
--- a/drivers/gpu/drm/panel/panel-boe-himax8279d.c
+++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
@@ -847,9 +847,6 @@ static int panel_add(struct panel_info *pinfo)
"failed to get enable gpio\n");
}
- drm_panel_init(&pinfo->base, dev, &panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&pinfo->base);
if (ret)
return ret;
@@ -865,9 +862,11 @@ static int panel_probe(struct mipi_dsi_device *dsi)
const struct panel_desc *desc;
int err;
- pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL);
- if (!pinfo)
- return -ENOMEM;
+ pinfo = devm_drm_panel_alloc(&dsi->dev, __typeof(*pinfo), base,
+ &panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(pinfo))
+ return PTR_ERR(pinfo);
desc = of_device_get_match_data(&dsi->dev);
dsi->mode_flags = desc->mode_flags;
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 3e5b0d8636d0..d5fe105bdbdd 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1720,8 +1720,6 @@ static int boe_panel_add(struct boe_panel *boe)
boe->base.prepare_prev_first = true;
- drm_panel_init(&boe->base, dev, &boe_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
err = of_drm_get_panel_orientation(dev->of_node, &boe->orientation);
if (err < 0) {
dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
@@ -1746,9 +1744,11 @@ static int boe_panel_probe(struct mipi_dsi_device *dsi)
int ret;
const struct panel_desc *desc;
- boe = devm_kzalloc(&dsi->dev, sizeof(*boe), GFP_KERNEL);
- if (!boe)
- return -ENOMEM;
+ boe = devm_drm_panel_alloc(&dsi->dev, __typeof(*boe), base,
+ &boe_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(boe))
+ return PTR_ERR(boe);
desc = of_device_get_match_data(&dsi->dev);
dsi->lanes = desc->lanes;
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 90e8c154a978..9a56e208cbdd 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -113,7 +113,7 @@ struct panel_delay {
* // do fixed enable delay
* // enforce prepare_to_enable min time
*
- * This is not specified in a standard way on eDP timing diagrams.
+ * This is usually (T4+T5+T6+T8)-min on eDP timing diagrams.
* It is effectively the time from HPD going high till you can
* turn on the backlight.
*/
@@ -1869,6 +1869,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x30ed, &delay_200_500_e50, "G156HAN03.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAN04.0"),
EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
@@ -1923,6 +1924,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0951, &delay_200_500_e80, "NV116WHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x095f, &delay_200_500_e50, "NE135FBM-N41 v8.1"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0964, &delay_200_500_e50, "NV133WUM-N61"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x096e, &delay_200_500_e50_po2e200, "NV116WHM-T07 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0979, &delay_200_500_e50, "NV116WHM-N49 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
@@ -1937,6 +1939,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b1e, &delay_200_500_e80, "NE140QDM-N6A"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
@@ -1965,6 +1968,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115e, &delay_200_500_e80_d50, "N116BCA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1160, &delay_200_500_e80_d50, "N116BCJ-EAK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1161, &delay_200_500_e80, "N116BCP-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1163, &delay_200_500_e80_d50, "N116BCJ-EAK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"),
@@ -1973,6 +1977,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x162b, &delay_200_500_e80_d50, "N160JCE-ELL"),
EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_p2e200, "MNE007JA1-2"),
@@ -2005,6 +2010,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('K', 'D', 'C', 0x044f, &delay_200_500_e50, "KD116N9-30NH-F3"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x05f1, &delay_200_500_e80_d50, "KD116N5-30NV-G7"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x1220, &delay_200_500_e50, "KD116N3730A05"),
EDP_PANEL_ENTRY('L', 'G', 'D', 0x0000, &delay_200_500_e200_d200, "Unknown"),
EDP_PANEL_ENTRY('L', 'G', 'D', 0x048d, &delay_200_500_e200_d200, "Unknown"),
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index b904d5437444..1f177834d629 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -206,9 +206,10 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
struct kd35t133 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct kd35t133, panel,
+ &kd35t133_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
@@ -248,9 +249,6 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
index 986e3e192881..6225501cb174 100644
--- a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -443,9 +443,11 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
unsigned int i;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct k101_im2ba02, panel,
+ &k101_im2ba02_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
@@ -463,9 +465,6 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset),
"Couldn't get our reset GPIO\n");
- drm_panel_init(&ctx->panel, &dsi->dev, &k101_im2ba02_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index 48e3acaecdf3..4f8d6d8c07e4 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -189,16 +189,14 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
struct feiyang *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct feiyang, panel,
+ &feiyang_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel, &dsi->dev, &feiyang_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
if (IS_ERR(ctx->dvdd))
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->dvdd),
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83102.c b/drivers/gpu/drm/panel/panel-himax-hx83102.c
index 66abfc44e424..4c432d207634 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx83102.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx83102.c
@@ -989,8 +989,6 @@ static int hx83102_panel_add(struct hx83102 *ctx)
ctx->base.prepare_prev_first = true;
- drm_panel_init(&ctx->base, dev, &hx83102_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
err = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
if (err < 0)
return dev_err_probe(dev, err, "failed to get orientation\n");
@@ -1013,9 +1011,11 @@ static int hx83102_probe(struct mipi_dsi_device *dsi)
int ret;
const struct hx83102_panel_desc *desc;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, __typeof(*ctx), base,
+ &hx83102_drm_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
desc = of_device_get_match_data(&dsi->dev);
dsi->lanes = 4;
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83112a.c b/drivers/gpu/drm/panel/panel-himax-hx83112a.c
index 47bce087e339..142cb1cc067a 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx83112a.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx83112a.c
@@ -269,9 +269,11 @@ static int hx83112a_probe(struct mipi_dsi_device *dsi)
struct hx83112a_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct hx83112a_panel, panel,
+ &hx83112a_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vdd1";
ctx->supplies[1].supply = "vsn";
@@ -295,8 +297,6 @@ static int hx83112a_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_HSE |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &hx83112a_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83112b.c b/drivers/gpu/drm/panel/panel-himax-hx83112b.c
new file mode 100644
index 000000000000..263f79a967de
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-himax-hx83112b.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree.
+ * Copyright (c) 2025 Luca Weiss <luca@lucaweiss.eu>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+/* Manufacturer specific DSI commands */
+#define HX83112B_SETPOWER1 0xb1
+#define HX83112B_SETDISP 0xb2
+#define HX83112B_SETDRV 0xb4
+#define HX83112B_SETEXTC 0xb9
+#define HX83112B_SETBANK 0xbd
+#define HX83112B_SETDGCLUT 0xc1
+#define HX83112B_SETDISMO 0xc2
+#define HX83112B_UNKNOWN1 0xc6
+#define HX83112B_SETPANEL 0xcc
+#define HX83112B_UNKNOWN2 0xd1
+#define HX83112B_SETPOWER2 0xd2
+#define HX83112B_SETGIP0 0xd3
+#define HX83112B_SETGIP1 0xd5
+#define HX83112B_SETGIP2 0xd6
+#define HX83112B_SETGIP3 0xd8
+#define HX83112B_SETIDLE 0xdd
+#define HX83112B_UNKNOWN3 0xe7
+#define HX83112B_UNKNOWN4 0xe9
+
+struct hx83112b_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data *supplies;
+ struct gpio_desc *reset_gpio;
+};
+
+static const struct regulator_bulk_data hx83112b_supplies[] = {
+ { .supply = "iovcc" },
+ { .supply = "vsn" },
+ { .supply = "vsp" },
+};
+
+static inline struct hx83112b_panel *to_hx83112b_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct hx83112b_panel, panel);
+}
+
+static void hx83112b_reset(struct hx83112b_panel *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static int hx83112b_on(struct hx83112b_panel *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETEXTC, 0x83, 0x11, 0x2b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISMO, 0x08, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP, 0x04, 0x38, 0x08, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETPOWER1,
+ 0xf8, 0x27, 0x27, 0x00, 0x00, 0x0b, 0x0e,
+ 0x0b, 0x0e, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETPOWER2, 0x2d, 0x2d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP,
+ 0x80, 0x02, 0x18, 0x80, 0x70, 0x00, 0x08,
+ 0x1c, 0x08, 0x11, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xd1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP, 0x00, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP, 0xb5, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETIDLE,
+ 0x00, 0x00, 0x08, 0x1c, 0x08, 0x34, 0x34,
+ 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDRV,
+ 0x65, 0x6b, 0x00, 0x00, 0xd0, 0xd4, 0x36,
+ 0xcf, 0x06, 0xce, 0x00, 0xce, 0x00, 0x00,
+ 0x00, 0x07, 0x00, 0x2a, 0x07, 0x01, 0x07,
+ 0x00, 0x00, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDRV, 0x01, 0x67, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT,
+ 0xff, 0xfb, 0xf9, 0xf6, 0xf4, 0xf1, 0xef,
+ 0xea, 0xe7, 0xe5, 0xe2, 0xdf, 0xdd, 0xda,
+ 0xd8, 0xd5, 0xd2, 0xcf, 0xcc, 0xc5, 0xbe,
+ 0xb7, 0xb0, 0xa8, 0xa0, 0x98, 0x8e, 0x85,
+ 0x7b, 0x72, 0x69, 0x5e, 0x53, 0x48, 0x3e,
+ 0x35, 0x2b, 0x22, 0x17, 0x0d, 0x09, 0x07,
+ 0x05, 0x01, 0x00, 0x26, 0xf0, 0x86, 0x25,
+ 0x6e, 0xb6, 0xdd, 0xf3, 0xd8, 0xcc, 0x9b,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT,
+ 0xff, 0xfb, 0xf9, 0xf6, 0xf4, 0xf1, 0xef,
+ 0xea, 0xe7, 0xe5, 0xe2, 0xdf, 0xdd, 0xda,
+ 0xd8, 0xd5, 0xd2, 0xcf, 0xcc, 0xc5, 0xbe,
+ 0xb7, 0xb0, 0xa8, 0xa0, 0x98, 0x8e, 0x85,
+ 0x7b, 0x72, 0x69, 0x5e, 0x53, 0x48, 0x3e,
+ 0x35, 0x2b, 0x22, 0x17, 0x0d, 0x09, 0x07,
+ 0x05, 0x01, 0x00, 0x26, 0xf0, 0x86, 0x25,
+ 0x6e, 0xb6, 0xdd, 0xf3, 0xd8, 0xcc, 0x9b,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT,
+ 0xff, 0xfb, 0xf9, 0xf6, 0xf4, 0xf1, 0xef,
+ 0xea, 0xe7, 0xe5, 0xe2, 0xdf, 0xdd, 0xda,
+ 0xd8, 0xd5, 0xd2, 0xcf, 0xcc, 0xc5, 0xbe,
+ 0xb7, 0xb0, 0xa8, 0xa0, 0x98, 0x8e, 0x85,
+ 0x7b, 0x72, 0x69, 0x5e, 0x53, 0x48, 0x3e,
+ 0x35, 0x2b, 0x22, 0x17, 0x0d, 0x09, 0x07,
+ 0x05, 0x01, 0x00, 0x26, 0xf0, 0x86, 0x25,
+ 0x6e, 0xb6, 0xdd, 0xf3, 0xd8, 0xcc, 0x9b,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISMO, 0xc8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETPANEL, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP0,
+ 0x81, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+ 0x04, 0x00, 0x01, 0x13, 0x40, 0x04, 0x09,
+ 0x09, 0x0b, 0x0b, 0x32, 0x10, 0x08, 0x00,
+ 0x08, 0x32, 0x10, 0x08, 0x00, 0x08, 0x32,
+ 0x10, 0x08, 0x00, 0x08, 0x00, 0x00, 0x0a,
+ 0x08, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN1, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xd4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN1, 0x6e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xef);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP0, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP0, 0xa1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP1,
+ 0x18, 0x18, 0x19, 0x18, 0x18, 0x20, 0x18,
+ 0x18, 0x18, 0x10, 0x10, 0x18, 0x18, 0x00,
+ 0x00, 0x18, 0x18, 0x01, 0x01, 0x18, 0x18,
+ 0x28, 0x28, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x35,
+ 0x35, 0x36, 0x36, 0x37, 0x37, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xfc,
+ 0xfc, 0x00, 0x00, 0xfc, 0xfc, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP2,
+ 0x18, 0x18, 0x19, 0x18, 0x18, 0x20, 0x19,
+ 0x18, 0x18, 0x10, 0x10, 0x18, 0x18, 0x00,
+ 0x00, 0x18, 0x18, 0x01, 0x01, 0x18, 0x18,
+ 0x28, 0x28, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x35,
+ 0x35, 0x36, 0x36, 0x37, 0x37, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3,
+ 0xaa, 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa, 0xaa,
+ 0xab, 0xaf, 0xef, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaf, 0xea, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3,
+ 0xaa, 0xaa, 0xab, 0xaf, 0xea, 0xaa, 0xaa,
+ 0xaa, 0xae, 0xaf, 0xea, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3,
+ 0xaa, 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaf, 0xea, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3,
+ 0xba, 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xba, 0xaa,
+ 0xaa, 0xaf, 0xea, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaf, 0xea, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xe4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3, 0x17, 0x69);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3,
+ 0x09, 0x09, 0x00, 0x07, 0xe8, 0x00, 0x26,
+ 0x00, 0x07, 0x00, 0x00, 0xe8, 0x32, 0x00,
+ 0xe9, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x01,
+ 0x01, 0x00, 0x12, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3,
+ 0x02, 0x00, 0x01, 0x20, 0x01, 0x18, 0x08,
+ 0xa8, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3, 0x20, 0x20, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3,
+ 0x00, 0xdc, 0x11, 0x70, 0x00, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3,
+ 0x2a, 0xce, 0x02, 0x70, 0x01, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN2, 0x27);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x0000);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x24);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+
+ return dsi_ctx.accum_err;
+}
+
+static int hx83112b_off(struct hx83112b_panel *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ return dsi_ctx.accum_err;
+}
+
+static int hx83112b_prepare(struct drm_panel *panel)
+{
+ struct hx83112b_panel *ctx = to_hx83112b_panel(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(hx83112b_supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ hx83112b_reset(ctx);
+
+ ret = hx83112b_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(hx83112b_supplies), ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hx83112b_unprepare(struct drm_panel *panel)
+{
+ struct hx83112b_panel *ctx = to_hx83112b_panel(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = hx83112b_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(hx83112b_supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode hx83112b_mode = {
+ .clock = (1080 + 40 + 4 + 12) * (2160 + 32 + 2 + 2) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 40,
+ .hsync_end = 1080 + 40 + 4,
+ .htotal = 1080 + 40 + 4 + 12,
+ .vdisplay = 2160,
+ .vsync_start = 2160 + 32,
+ .vsync_end = 2160 + 32 + 2,
+ .vtotal = 2160 + 32 + 2 + 2,
+ .width_mm = 65,
+ .height_mm = 128,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int hx83112b_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &hx83112b_mode);
+}
+
+static const struct drm_panel_funcs hx83112b_panel_funcs = {
+ .prepare = hx83112b_prepare,
+ .unprepare = hx83112b_unprepare,
+ .get_modes = hx83112b_get_modes,
+};
+
+static int hx83112b_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops hx83112b_bl_ops = {
+ .update_status = hx83112b_bl_update_status,
+};
+
+static struct backlight_device *
+hx83112b_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 4095,
+ .max_brightness = 4095,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &hx83112b_bl_ops, &props);
+}
+
+static int hx83112b_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct hx83112b_panel *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct hx83112b_panel, panel,
+ &hx83112b_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(hx83112b_supplies),
+ hx83112b_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_LPM;
+
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = hx83112b_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void hx83112b_remove(struct mipi_dsi_device *dsi)
+{
+ struct hx83112b_panel *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id hx83112b_of_match[] = {
+ { .compatible = "djn,98-03057-6598b-i" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hx83112b_of_match);
+
+static struct mipi_dsi_driver hx83112b_driver = {
+ .probe = hx83112b_probe,
+ .remove = hx83112b_remove,
+ .driver = {
+ .name = "panel-himax-hx83112b",
+ .of_match_table = hx83112b_of_match,
+ },
+};
+module_mipi_dsi_driver(hx83112b_driver);
+
+MODULE_DESCRIPTION("DRM driver for hx83112b-equipped DSI panels");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index ff994bf0e3cc..c4d3e09a228d 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -477,6 +477,147 @@ static const struct hx8394_panel_desc mchp_ac40t08a_desc = {
.init_sequence = mchp_ac40t08a_init_sequence,
};
+/*
+ * HL055FHAV028C is based on Himax HX8399, so datasheet pages are
+ * slightly different than HX8394 based panels.
+ */
+static void hl055fhav028c_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
+{
+ /* 6.3.6 SETEXTC: Set extension command (B9h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x99);
+
+ /* 6.3.17 SETOFFSET: Set offset voltage (D2h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETOFFSET,
+ 0x77);
+
+ /* 6.3.1 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x02, 0x04, 0x74, 0x94, 0x01, 0x32,
+ 0x33, 0x11, 0x11, 0xab, 0x4d, 0x56,
+ 0x73, 0x02, 0x02);
+
+ /* 6.3.2 SETDISP: Set display related register (B2h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x80, 0xae, 0x05, 0x07,
+ 0x5a, 0x11, 0x00, 0x00, 0x10, 0x1e,
+ 0x70, 0x03, 0xd4);
+
+ /* 6.3.3 SETCYC: Set display waveform cycles (B4h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x00, 0xff, 0x02, 0xc0, 0x02, 0xc0,
+ 0x00, 0x00, 0x08, 0x00, 0x04, 0x06,
+ 0x00, 0x32, 0x04, 0x0a, 0x08, 0x21,
+ 0x03, 0x01, 0x00, 0x0f, 0xb8, 0x8b,
+ 0x02, 0xc0, 0x02, 0xc0, 0x00, 0x00,
+ 0x08, 0x00, 0x04, 0x06, 0x00, 0x32,
+ 0x04, 0x0a, 0x08, 0x01, 0x00, 0x0f,
+ 0xb8, 0x01);
+
+ /* 6.3.18 SETGIP0: Set GIP Option0 (D3h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x10, 0x04, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x05, 0x05, 0x07, 0x00, 0x00,
+ 0x00, 0x05, 0x40);
+
+ /* 6.3.19 Set GIP Option1 (D5h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x18, 0x18, 0x19, 0x19, 0x18, 0x18,
+ 0x21, 0x20, 0x01, 0x00, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x2f, 0x2f,
+ 0x30, 0x30, 0x31, 0x31, 0x18, 0x18,
+ 0x18, 0x18);
+
+ /* 6.3.20 Set GIP Option2 (D6h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x18, 0x18, 0x19, 0x19, 0x40, 0x40,
+ 0x20, 0x21, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x00, 0x01, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x2f, 0x2f,
+ 0x30, 0x30, 0x31, 0x31, 0x40, 0x40,
+ 0x40, 0x40);
+
+ /* 6.3.21 Set GIP Option3 (D8h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xa2, 0xaa, 0x02, 0xa0, 0xa2, 0xa8,
+ 0x02, 0xa0, 0xb0, 0x00, 0x00, 0x00,
+ 0xb0, 0x00, 0x00, 0x00);
+
+ /* 6.3.9 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
+
+ /* 6.3.21 Set GIP Option3 (D8h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xb0, 0x00, 0x00, 0x00, 0xb0, 0x00,
+ 0x00, 0x00, 0xe2, 0xaa, 0x03, 0xf0,
+ 0xe2, 0xaa, 0x03, 0xf0);
+
+ /* 6.3.9 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x02);
+
+ /* 6.3.21 Set GIP Option3 (D8h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xe2, 0xaa, 0x03, 0xf0, 0xe2, 0xaa,
+ 0x03, 0xf0);
+
+ /* 6.3.9 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* 6.3.4 SETVCOM: Set VCOM voltage (B6h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x7a, 0x7a);
+
+ /* 6.3.26 SETGAMMA: Set gamma curve related setting (E0h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x18, 0x27, 0x24, 0x5a, 0x68,
+ 0x79, 0x78, 0x81, 0x8a, 0x92, 0x99,
+ 0x9e, 0xa7, 0xaf, 0xb4, 0xb9, 0xc3,
+ 0xc7, 0xd1, 0xc6, 0xd4, 0xd5, 0x6c,
+ 0x67, 0x71, 0x77, 0x00, 0x00, 0x18,
+ 0x27, 0x24, 0x5a, 0x68, 0x79, 0x78,
+ 0x81, 0x8a, 0x92, 0x99, 0x9e, 0xa7,
+ 0xaf, 0xb4, 0xb9, 0xc3, 0xc7, 0xd1,
+ 0xc6, 0xd4, 0xd5, 0x6c, 0x67, 0x77);
+
+ /* Unknown command, not listed in the HX8399-C datasheet (C6h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2,
+ 0xff, 0xf9);
+
+ /* 6.3.16 SETPANEL (CCh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x08);
+}
+
+static const struct drm_display_mode hl055fhav028c_mode = {
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 32,
+ .hsync_end = 1080 + 32 + 8,
+ .htotal = 1080 + 32 + 8 + 32,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 16,
+ .vsync_end = 1920 + 16 + 2,
+ .vtotal = 1920 + 16 + 2 + 14,
+ .clock = 134920,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 70,
+ .height_mm = 127,
+};
+
+static const struct hx8394_panel_desc hl055fhav028c_desc = {
+ .mode = &hl055fhav028c_mode,
+ .lanes = 4,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = hl055fhav028c_init_sequence,
+};
+
static int hx8394_enable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
@@ -611,9 +752,11 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
struct hx8394 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct hx8394, panel,
+ &hx8394_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
@@ -645,9 +788,6 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
"Failed to request iovcc regulator\n");
- drm_panel_init(&ctx->panel, dev, &hx8394_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
@@ -683,6 +823,7 @@ static void hx8394_remove(struct mipi_dsi_device *dsi)
static const struct of_device_id hx8394_of_match[] = {
{ .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc },
+ { .compatible = "huiling,hl055fhav028c", .data = &hl055fhav028c_desc },
{ .compatible = "powkiddy,x55-panel", .data = &powkiddy_x55_desc },
{ .compatible = "microchip,ac40t08a-mipi-panel", .data = &mchp_ac40t08a_desc },
{ /* sentinel */ }
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 94b7dfef3b5e..6ed544a83bdd 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -722,9 +722,10 @@ static int ili9322_probe(struct spi_device *spi)
int ret;
int i;
- ili = devm_kzalloc(dev, sizeof(struct ili9322), GFP_KERNEL);
- if (!ili)
- return -ENOMEM;
+ ili = devm_drm_panel_alloc(dev, struct ili9322, panel,
+ &ili9322_drm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ili))
+ return PTR_ERR(ili);
spi_set_drvdata(spi, ili);
@@ -883,9 +884,6 @@ static int ili9322_probe(struct spi_device *spi)
ili->input = ili->conf->input;
}
- drm_panel_init(&ili->panel, dev, &ili9322_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&ili->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index ff39f5dd4097..f7425dfaa50d 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -173,7 +173,6 @@ struct ili9341_config {
};
struct ili9341 {
- struct device *dev;
const struct ili9341_config *conf;
struct drm_panel panel;
struct gpio_desc *reset_gpio;
@@ -490,9 +489,11 @@ static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc,
struct ili9341 *ili;
int ret;
- ili = devm_kzalloc(dev, sizeof(struct ili9341), GFP_KERNEL);
- if (!ili)
- return -ENOMEM;
+ ili = devm_drm_panel_alloc(dev, struct ili9341, panel,
+ &ili9341_dpi_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ili))
+ return PTR_ERR(ili);
ili->dbi = devm_kzalloc(dev, sizeof(struct mipi_dbi),
GFP_KERNEL);
@@ -526,8 +527,6 @@ static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc,
}
ili->max_spi_speed = ili->conf->max_spi_speed;
- drm_panel_init(&ili->panel, dev, &ili9341_dpi_funcs,
- DRM_MODE_CONNECTOR_DPI);
drm_panel_add(&ili->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9805.c b/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
index 1cbc25758bd2..e6c483851f1f 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
@@ -307,9 +307,12 @@ static int ili9805_dsi_probe(struct mipi_dsi_device *dsi)
struct ili9805 *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct ili9805, panel,
+ &ili9805_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
ctx->desc = of_device_get_match_data(&dsi->dev);
@@ -320,9 +323,6 @@ static int ili9805_dsi_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_NO_EOT_PACKET;
dsi->lanes = 2;
- drm_panel_init(&ctx->panel, &dsi->dev, &ili9805_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
if (IS_ERR(ctx->dvdd))
return PTR_ERR(ctx->dvdd);
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c b/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
index a3c79ad99d0b..18aa6222b0c5 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
@@ -166,9 +166,10 @@ static int ili9806e_dsi_probe(struct mipi_dsi_device *dsi)
struct ili9806e_panel *ctx;
int i, ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ili9806e_panel, panel, &ili9806e_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->desc = device_get_match_data(dev);
@@ -192,9 +193,6 @@ static int ili9806e_dsi_probe(struct mipi_dsi_device *dsi)
dsi->format = ctx->desc->format;
dsi->lanes = ctx->desc->lanes;
- drm_panel_init(&ctx->panel, dev, &ili9806e_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
if (ret)
return dev_err_probe(dev, ret, "Failed to get orientation\n");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 28cd7560e5db..ac433345a179 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -43,6 +43,7 @@ struct ili9881c_desc {
const struct drm_display_mode *mode;
const unsigned long mode_flags;
u8 default_address_mode;
+ unsigned int lanes;
};
struct ili9881c {
@@ -1223,6 +1224,199 @@ static const struct ili9881c_instr am8001280g_init[] = {
ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_POWER_SAVE, 0x00),
};
+static const struct ili9881c_instr rpi_7inch_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x73),
+ ILI9881C_COMMAND_INSTR(0x04, 0x00),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x07, 0x00),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x61),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x61),
+ ILI9881C_COMMAND_INSTR(0x10, 0x61),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0x40),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x06),
+ ILI9881C_COMMAND_INSTR(0x21, 0x01),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x04),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x00),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x50, 0x10),
+ ILI9881C_COMMAND_INSTR(0x51, 0x32),
+ ILI9881C_COMMAND_INSTR(0x52, 0x54),
+ ILI9881C_COMMAND_INSTR(0x53, 0x76),
+ ILI9881C_COMMAND_INSTR(0x54, 0x98),
+ ILI9881C_COMMAND_INSTR(0x55, 0xba),
+ ILI9881C_COMMAND_INSTR(0x56, 0x10),
+ ILI9881C_COMMAND_INSTR(0x57, 0x32),
+ ILI9881C_COMMAND_INSTR(0x58, 0x54),
+ ILI9881C_COMMAND_INSTR(0x59, 0x76),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x98),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xba),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xdc),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xfe),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x60, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x61, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x63, 0x06),
+ ILI9881C_COMMAND_INSTR(0x64, 0x07),
+ ILI9881C_COMMAND_INSTR(0x65, 0x02),
+ ILI9881C_COMMAND_INSTR(0x66, 0x02),
+ ILI9881C_COMMAND_INSTR(0x67, 0x02),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x01),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x14),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x76, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x77, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x78, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x79, 0x06),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x07),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x01),
+ ILI9881C_COMMAND_INSTR(0x80, 0x00),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x14),
+ ILI9881C_COMMAND_INSTR(0x83, 0x15),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x6C, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6E, 0x2A),
+ ILI9881C_COMMAND_INSTR(0x6F, 0x33),
+ ILI9881C_COMMAND_INSTR(0x3B, 0x98),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x94),
+ ILI9881C_COMMAND_INSTR(0x8D, 0x14),
+ ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+ ILI9881C_COMMAND_INSTR(0xB5, 0x06),
+ ILI9881C_COMMAND_INSTR(0x38, 0x01),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x53, 0x7d),
+ ILI9881C_COMMAND_INSTR(0x55, 0x8f),
+ ILI9881C_COMMAND_INSTR(0x40, 0x33),
+ ILI9881C_COMMAND_INSTR(0x50, 0x96),
+ ILI9881C_COMMAND_INSTR(0x51, 0x96),
+ ILI9881C_COMMAND_INSTR(0x60, 0x23),
+ ILI9881C_COMMAND_INSTR(0xA0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xA1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xA2, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xA3, 0x10),
+ ILI9881C_COMMAND_INSTR(0xA4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xA5, 0x28),
+ ILI9881C_COMMAND_INSTR(0xA6, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xA7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xA8, 0x7e),
+ ILI9881C_COMMAND_INSTR(0xA9, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xAA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xAB, 0x6b),
+ ILI9881C_COMMAND_INSTR(0xAC, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xAD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xAE, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xAF, 0x20),
+ ILI9881C_COMMAND_INSTR(0xB0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xB1, 0x50),
+ ILI9881C_COMMAND_INSTR(0xB2, 0x64),
+ ILI9881C_COMMAND_INSTR(0xB3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xC0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xC1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xC2, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xC3, 0x10),
+ ILI9881C_COMMAND_INSTR(0xC4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xC5, 0x28),
+ ILI9881C_COMMAND_INSTR(0xC6, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xC7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xC8, 0x7e),
+ ILI9881C_COMMAND_INSTR(0xC9, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xCA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xCB, 0x6b),
+ ILI9881C_COMMAND_INSTR(0xCC, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xCD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xCE, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xCF, 0x20),
+ ILI9881C_COMMAND_INSTR(0xD0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xD1, 0x50),
+ ILI9881C_COMMAND_INSTR(0xD2, 0x64),
+ ILI9881C_COMMAND_INSTR(0xD3, 0x39),
+};
+
static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
{
return container_of(panel, struct ili9881c, panel);
@@ -1449,6 +1643,23 @@ static const struct drm_display_mode am8001280g_default_mode = {
.height_mm = 151,
};
+static const struct drm_display_mode rpi_7inch_default_mode = {
+ .clock = 83330,
+
+ .hdisplay = 720,
+ .hsync_start = 720 + 239,
+ .hsync_end = 720 + 239 + 33,
+ .htotal = 720 + 239 + 33 + 50,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 20,
+ .vsync_end = 1280 + 20 + 2,
+ .vtotal = 1280 + 20 + 2 + 30,
+
+ .width_mm = 90,
+ .height_mm = 151,
+};
+
static int ili9881c_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -1506,16 +1717,15 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
struct ili9881c *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct ili9881c, panel, &ili9881c_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
ctx->desc = of_device_get_match_data(&dsi->dev);
- drm_panel_init(&ctx->panel, &dsi->dev, &ili9881c_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->power = devm_regulator_get(&dsi->dev, "power");
if (IS_ERR(ctx->power))
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->power),
@@ -1549,7 +1759,7 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = ctx->desc->mode_flags;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->lanes = 4;
+ dsi->lanes = ctx->desc->lanes;
return mipi_dsi_attach(dsi);
}
@@ -1567,6 +1777,7 @@ static const struct ili9881c_desc lhr050h41_desc = {
.init_length = ARRAY_SIZE(lhr050h41_init),
.mode = &lhr050h41_default_mode,
.mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .lanes = 4,
};
static const struct ili9881c_desc k101_im2byl02_desc = {
@@ -1574,6 +1785,7 @@ static const struct ili9881c_desc k101_im2byl02_desc = {
.init_length = ARRAY_SIZE(k101_im2byl02_init),
.mode = &k101_im2byl02_default_mode,
.mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .lanes = 4,
};
static const struct ili9881c_desc kd050hdfia020_desc = {
@@ -1599,6 +1811,7 @@ static const struct ili9881c_desc w552946aba_desc = {
.mode = &w552946aba_default_mode,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
+ .lanes = 4,
};
static const struct ili9881c_desc am8001280g_desc = {
@@ -1609,6 +1822,14 @@ static const struct ili9881c_desc am8001280g_desc = {
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
};
+static const struct ili9881c_desc rpi_7inch_desc = {
+ .init = rpi_7inch_init,
+ .init_length = ARRAY_SIZE(rpi_7inch_init),
+ .mode = &rpi_7inch_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM,
+ .lanes = 2,
+};
+
static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
@@ -1616,6 +1837,7 @@ static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
{ .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
{ .compatible = "ampire,am8001280g", .data = &am8001280g_desc },
+ { .compatible = "raspberrypi,dsi-7inch", &rpi_7inch_desc },
{ }
};
MODULE_DEVICE_TABLE(of, ili9881c_of_match);
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
index 3c24a63b6be8..85c7059be214 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
@@ -614,8 +614,6 @@ static int ili9882t_add(struct ili9882t *ili)
gpiod_set_value(ili->enable_gpio, 0);
- drm_panel_init(&ili->base, dev, &ili9882t_funcs,
- DRM_MODE_CONNECTOR_DSI);
err = of_drm_get_panel_orientation(dev->of_node, &ili->orientation);
if (err < 0) {
dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
@@ -640,9 +638,11 @@ static int ili9882t_probe(struct mipi_dsi_device *dsi)
int ret;
const struct panel_desc *desc;
- ili = devm_kzalloc(&dsi->dev, sizeof(*ili), GFP_KERNEL);
- if (!ili)
- return -ENOMEM;
+ ili = devm_drm_panel_alloc(&dsi->dev, __typeof(*ili), base,
+ &ili9882t_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(ili))
+ return PTR_ERR(ili);
desc = of_device_get_match_data(&dsi->dev);
dsi->lanes = desc->lanes;
diff --git a/drivers/gpu/drm/panel/panel-innolux-ej030na.c b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
index f85b7a4cbb42..b2309900873b 100644
--- a/drivers/gpu/drm/panel/panel-innolux-ej030na.c
+++ b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
@@ -204,9 +204,11 @@ static int ej030na_probe(struct spi_device *spi)
struct ej030na *priv;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct ej030na, panel,
+ &ej030na_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->spi = spi;
spi_set_drvdata(spi, priv);
@@ -231,9 +233,6 @@ static int ej030na_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
"Failed to get reset GPIO\n");
- drm_panel_init(&priv->panel, dev, &ej030na_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index d95c0d4f3e35..80afeeab9475 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -382,9 +382,11 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
struct device *dev = &dsi->dev;
int err, i;
- innolux = devm_kzalloc(dev, sizeof(*innolux), GFP_KERNEL);
- if (!innolux)
- return -ENOMEM;
+ innolux = devm_drm_panel_alloc(dev, struct innolux_panel, base,
+ &innolux_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(innolux))
+ return PTR_ERR(innolux);
innolux->desc = desc;
@@ -410,9 +412,6 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
innolux->enable_gpio = NULL;
}
- drm_panel_init(&innolux->base, dev, &innolux_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
err = drm_panel_of_backlight(&innolux->base);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index eb0f8373258c..5c2530598ddb 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -1120,9 +1120,10 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
struct jadard *jadard;
int ret;
- jadard = devm_kzalloc(&dsi->dev, sizeof(*jadard), GFP_KERNEL);
- if (!jadard)
- return -ENOMEM;
+ jadard = devm_drm_panel_alloc(dev, struct jadard, panel, &jadard_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(jadard))
+ return PTR_ERR(jadard);
desc = of_device_get_match_data(dev);
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
@@ -1148,9 +1149,6 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(jadard->vccio);
}
- drm_panel_init(&jadard->panel, dev, &jadard_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = of_drm_get_panel_orientation(dev->of_node, &jadard->orientation);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to get orientation\n");
diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
index 4eb71e85e9e9..cbe354b51bce 100644
--- a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
+++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
@@ -175,9 +175,11 @@ static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi)
struct jdi_fhd_r63452 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct jdi_fhd_r63452, panel,
+ &jdi_fhd_r63452_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
@@ -192,8 +194,6 @@ static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &jdi_fhd_r63452_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
index 5b5082efb282..5f897e143758 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
@@ -435,9 +435,6 @@ static int jdi_panel_add(struct jdi_panel *jdi)
return dev_err_probe(dev, PTR_ERR(jdi->backlight),
"failed to create backlight\n");
- drm_panel_init(&jdi->base, &jdi->link1->dev, &jdi_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&jdi->base);
return 0;
@@ -475,10 +472,13 @@ static int jdi_panel_dsi_probe(struct mipi_dsi_device *dsi)
/* register a panel for only the DSI-LINK1 interface */
if (secondary) {
- jdi = devm_kzalloc(&dsi->dev, sizeof(*jdi), GFP_KERNEL);
- if (!jdi) {
+ jdi = devm_drm_panel_alloc(&dsi->dev, __typeof(*jdi),
+ base, &jdi_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(jdi)) {
put_device(&secondary->dev);
- return -ENOMEM;
+ return PTR_ERR(jdi);
}
mipi_dsi_set_drvdata(dsi, jdi);
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index b1ce186de261..3513e5c4dd8c 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -402,9 +402,6 @@ static int jdi_panel_add(struct jdi_panel *jdi)
return dev_err_probe(dev, PTR_ERR(jdi->backlight),
"failed to register backlight %d\n", ret);
- drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&jdi->base);
return 0;
@@ -426,9 +423,11 @@ static int jdi_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- jdi = devm_kzalloc(&dsi->dev, sizeof(*jdi), GFP_KERNEL);
- if (!jdi)
- return -ENOMEM;
+ jdi = devm_drm_panel_alloc(&dsi->dev, __typeof(*jdi), base,
+ &jdi_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(jdi))
+ return PTR_ERR(jdi);
mipi_dsi_set_drvdata(dsi, jdi);
diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c
index 0e5e8e57bd1e..67ca055f06f3 100644
--- a/drivers/gpu/drm/panel/panel-khadas-ts050.c
+++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c
@@ -821,9 +821,6 @@ static int khadas_ts050_panel_add(struct khadas_ts050_panel *khadas_ts050)
return dev_err_probe(dev, PTR_ERR(khadas_ts050->enable_gpio),
"failed to get enable gpio");
- drm_panel_init(&khadas_ts050->base, &khadas_ts050->link->dev,
- &khadas_ts050_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
err = drm_panel_of_backlight(&khadas_ts050->base);
if (err)
return err;
@@ -850,10 +847,12 @@ static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
- khadas_ts050 = devm_kzalloc(&dsi->dev, sizeof(*khadas_ts050),
- GFP_KERNEL);
- if (!khadas_ts050)
- return -ENOMEM;
+ khadas_ts050 = devm_drm_panel_alloc(&dsi->dev, __typeof(*khadas_ts050),
+ base, &khadas_ts050_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(khadas_ts050))
+ return PTR_ERR(khadas_ts050);
khadas_ts050->panel_data = (struct khadas_ts050_panel_data *)data;
mipi_dsi_set_drvdata(dsi, khadas_ts050);
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index d6b912277196..2fc7b0779b37 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -337,9 +337,6 @@ static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay)
kingdisplay->enable_gpio = NULL;
}
- drm_panel_init(&kingdisplay->base, &kingdisplay->link->dev,
- &kingdisplay_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
err = drm_panel_of_backlight(&kingdisplay->base);
if (err)
return err;
@@ -364,9 +361,12 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
- kingdisplay = devm_kzalloc(&dsi->dev, sizeof(*kingdisplay), GFP_KERNEL);
- if (!kingdisplay)
- return -ENOMEM;
+ kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base,
+ &kingdisplay_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(kingdisplay))
+ return PTR_ERR(kingdisplay);
mipi_dsi_set_drvdata(dsi, kingdisplay);
kingdisplay->link = dsi;
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index 77f74e6c467e..0856df5a6ee2 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -548,9 +548,11 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
struct ltk050h3146w *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ltk050h3146w, panel,
+ &ltk050h3146w_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->panel_desc = of_device_get_match_data(dev);
if (!ctx->panel_desc)
@@ -577,9 +579,6 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = ctx->panel_desc->mode_flags;
- drm_panel_init(&ctx->panel, &dsi->dev, &ltk050h3146w_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
index 6b18cf00fd4a..7f19fd5b8060 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
@@ -604,9 +604,11 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
struct device *dev = &dsi->dev;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ltk500hd1829, panel,
+ &ltk500hd1829_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->panel_desc = of_device_get_match_data(dev);
if (!ctx->panel_desc)
@@ -643,9 +645,6 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&ctx->panel, &dsi->dev, &ltk500hd1829_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index 9d0d4faa3f58..b2be6727bf73 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -178,9 +178,10 @@ static int lb035q02_probe(struct spi_device *spi)
struct lb035q02_device *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct lb035q02_device, panel,
+ &lb035q02_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -195,9 +196,6 @@ static int lb035q02_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &lb035q02_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index cf246d15b7b6..dec619902c15 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -243,9 +243,11 @@ static int lg4573_probe(struct spi_device *spi)
struct lg4573 *ctx;
int ret;
- ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&spi->dev, struct lg4573, panel,
+ &lg4573_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->spi = spi;
@@ -258,9 +260,6 @@ static int lg4573_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel, &spi->dev, &lg4573_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&ctx->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-lg-sw43408.c b/drivers/gpu/drm/panel/panel-lg-sw43408.c
index f3dcc39670ea..46a56ea92ad9 100644
--- a/drivers/gpu/drm/panel/panel-lg-sw43408.c
+++ b/drivers/gpu/drm/panel/panel-lg-sw43408.c
@@ -246,8 +246,6 @@ static int sw43408_add(struct sw43408_panel *ctx)
ctx->base.prepare_prev_first = true;
- drm_panel_init(&ctx->base, dev, &sw43408_funcs, DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&ctx->base);
return ret;
}
@@ -257,9 +255,11 @@ static int sw43408_probe(struct mipi_dsi_device *dsi)
struct sw43408_panel *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, __typeof(*ctx), base,
+ &sw43408_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
dsi->mode_flags = MIPI_DSI_MODE_LPM;
dsi->format = MIPI_DSI_FMT_RGB888;
diff --git a/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c b/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c
index 032c542aab0f..24b34443ace0 100644
--- a/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c
+++ b/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c
@@ -190,9 +190,11 @@ static int lincoln_lcd197_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = (MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_BURST);
- lcd = devm_kzalloc(&dsi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(dev, struct lincoln_lcd197_panel, panel,
+ &lincoln_lcd197_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
mipi_dsi_set_drvdata(dsi, lcd);
lcd->dsi = dsi;
@@ -214,9 +216,6 @@ static int lincoln_lcd197_panel_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(lcd->reset_gpio),
"failed to get reset gpio");
- drm_panel_init(&lcd->panel, dev,
- &lincoln_lcd197_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
err = drm_panel_of_backlight(&lcd->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index ba6c015aabba..23fd535d8f47 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -164,9 +164,11 @@ static int panel_lvds_probe(struct platform_device *pdev)
struct panel_lvds *lvds;
int ret;
- lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (!lvds)
- return -ENOMEM;
+ lvds = devm_drm_panel_alloc(&pdev->dev, struct panel_lvds, panel,
+ &panel_lvds_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
lvds->dev = &pdev->dev;
@@ -214,10 +216,6 @@ static int panel_lvds_probe(struct platform_device *pdev)
* driver.
*/
- /* Register the panel. */
- drm_panel_init(&lvds->panel, lvds->dev, &panel_lvds_funcs,
- DRM_MODE_CONNECTOR_LVDS);
-
ret = drm_panel_of_backlight(&lvds->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
index 799c2161fc85..cde168ec631c 100644
--- a/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
+++ b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
@@ -370,9 +370,11 @@ static int d53e6ea8966_probe(struct spi_device *spi)
.node = NULL,
};
- db = devm_kzalloc(dev, sizeof(*db), GFP_KERNEL);
- if (!db)
- return -ENOMEM;
+ db = devm_drm_panel_alloc(dev, struct d53e6ea8966, panel,
+ &d53e6ea8966_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(db))
+ return PTR_ERR(db);
spi_set_drvdata(spi, db);
@@ -425,9 +427,6 @@ static int d53e6ea8966_probe(struct spi_device *spi)
db->dsi_dev->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&db->panel, dev, &d53e6ea8966_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
if (db->panel_info->backlight_register) {
ret = db->panel_info->backlight_register(db);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 4db852ffb0f6..55664f5d5aa5 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -234,9 +234,11 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
struct mantix *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct mantix, panel, &mantix_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
ctx->default_mode = of_device_get_match_data(dev);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
@@ -271,9 +273,6 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx->vddi))
return dev_err_probe(dev, PTR_ERR(ctx->vddi), "Failed to request vddi regulator\n");
- drm_panel_init(&ctx->panel, dev, &mantix_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index 81c5c541a351..d5c7210de4af 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -178,9 +178,10 @@ static int nl8048_probe(struct spi_device *spi)
struct nl8048_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct nl8048_panel, panel,
+ &nl8048_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -204,9 +205,6 @@ static int nl8048_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &nl8048_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
index b6429795e8f5..22560384e48e 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
@@ -361,9 +361,11 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi)
struct panel_nv3051d *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct panel_nv3051d, panel,
+ &panel_nv3051d_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
@@ -391,9 +393,6 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = ctx->panel_info->mode_flags;
- drm_panel_init(&ctx->panel, &dsi->dev, &panel_nv3051d_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
index 06e16a7c14a7..0db9cadd868e 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
@@ -777,9 +777,10 @@ static int nv3052c_probe(struct spi_device *spi)
struct nv3052c *priv;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct nv3052c, panel, &nv3052c_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->dev = dev;
@@ -803,9 +804,6 @@ static int nv3052c_probe(struct spi_device *spi)
spi_set_drvdata(spi, priv);
- drm_panel_init(&priv->panel, dev, &nv3052c_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return dev_err_probe(dev, err, "Failed to attach backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index 549b86f2cc28..3189d89c7ca0 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -1087,9 +1087,12 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
struct nt35510 *nt;
int ret;
- nt = devm_kzalloc(dev, sizeof(struct nt35510), GFP_KERNEL);
- if (!nt)
- return -ENOMEM;
+ nt = devm_drm_panel_alloc(dev, struct nt35510, panel,
+ &nt35510_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(nt))
+ return PTR_ERR(nt);
+
mipi_dsi_set_drvdata(dsi, nt);
nt->dev = dev;
@@ -1142,9 +1145,6 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(nt->reset_gpio);
}
- drm_panel_init(&nt->panel, dev, &nt35510_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
/*
* First, try to locate an external backlight (such as on GPIO)
* if this fails, assume we will want to use the internal backlight
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
index 5bbea734123b..98f0782c8411 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
@@ -456,9 +456,12 @@ static int nt35560_probe(struct mipi_dsi_device *dsi)
struct nt35560 *nt;
int ret;
- nt = devm_kzalloc(dev, sizeof(struct nt35560), GFP_KERNEL);
- if (!nt)
- return -ENOMEM;
+ nt = devm_drm_panel_alloc(dev, struct nt35560, panel,
+ &nt35560_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(nt))
+ return PTR_ERR(nt);
+
nt->video_mode = of_property_read_bool(dev->of_node,
"enforce-video-mode");
@@ -502,9 +505,6 @@ static int nt35560_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(nt->reset_gpio),
"failed to request GPIO\n");
- drm_panel_init(&nt->panel, dev, &nt35560_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
nt->panel.backlight = devm_backlight_device_register(dev, "nt35560", dev, nt,
&nt35560_bl_ops, &nt35560_bl_props);
if (IS_ERR(nt->panel.backlight))
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 08b22b592ab0..94aa6489d99f 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -449,9 +449,10 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
const struct mipi_dsi_device_info *info;
int i, num_dsis = 1, ret;
- nt = devm_kzalloc(dev, sizeof(*nt), GFP_KERNEL);
- if (!nt)
- return -ENOMEM;
+ nt = devm_drm_panel_alloc(dev, struct nt35950, panel, &nt35950_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(nt))
+ return PTR_ERR(nt);
ret = nt35950_sharp_init_vregs(nt, dev);
if (ret)
@@ -491,9 +492,6 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
nt->dsi[0] = dsi;
mipi_dsi_set_drvdata(dsi, nt);
- drm_panel_init(&nt->panel, dev, &nt35950_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&nt->panel);
if (ret) {
if (num_dsis == 2)
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 116d67bfa114..32cf64c7c18b 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -1171,9 +1171,11 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
const struct mipi_dsi_device_info *info;
int i, ret;
- pinfo = devm_kzalloc(dev, sizeof(*pinfo), GFP_KERNEL);
- if (!pinfo)
- return -ENOMEM;
+ pinfo = devm_drm_panel_alloc(dev, struct panel_info, panel,
+ &nt36523_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(pinfo))
+ return PTR_ERR(pinfo);
pinfo->vddio = devm_regulator_get(dev, "vddio");
if (IS_ERR(pinfo->vddio))
@@ -1211,7 +1213,6 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
pinfo->dsi[0] = dsi;
mipi_dsi_set_drvdata(dsi, pinfo);
- drm_panel_init(&pinfo->panel, dev, &nt36523_panel_funcs, DRM_MODE_CONNECTOR_DSI);
ret = of_drm_get_panel_orientation(dev->of_node, &pinfo->orientation);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
index c2abd20e0734..29e1f6aea480 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
@@ -608,8 +608,6 @@ static int nt36672a_panel_add(struct nt36672a_panel *pinfo)
return dev_err_probe(dev, PTR_ERR(pinfo->reset_gpio),
"failed to get reset gpio from DT\n");
- drm_panel_init(&pinfo->base, dev, &panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&pinfo->base);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
@@ -625,9 +623,11 @@ static int nt36672a_panel_probe(struct mipi_dsi_device *dsi)
const struct nt36672a_panel_desc *desc;
int err;
- pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL);
- if (!pinfo)
- return -ENOMEM;
+ pinfo = devm_drm_panel_alloc(&dsi->dev, __typeof(*pinfo), base,
+ &panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(pinfo))
+ return PTR_ERR(pinfo);
desc = of_device_get_match_data(&dsi->dev);
dsi->mode_flags = desc->mode_flags;
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
index 8c9e04207ba9..c5e00eb55722 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
@@ -522,9 +522,11 @@ static int nt36672e_panel_probe(struct mipi_dsi_device *dsi)
struct nt36672e_panel *ctx;
int i, ret = 0;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct nt36672e_panel, panel,
+ &nt36672e_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->desc = of_device_get_match_data(dev);
if (!ctx->desc) {
@@ -553,8 +555,6 @@ static int nt36672e_panel_probe(struct mipi_dsi_device *dsi)
dsi->format = ctx->desc->format;
dsi->mode_flags = ctx->desc->mode_flags;
- drm_panel_init(&ctx->panel, dev, &nt36672e_drm_funcs, DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index 9fa7654e2b67..a629976bae54 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -246,9 +246,10 @@ static int nt39016_probe(struct spi_device *spi)
struct nt39016 *panel;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct nt39016, drm_panel, &nt39016_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
spi_set_drvdata(spi, panel);
@@ -279,9 +280,6 @@ static int nt39016_probe(struct spi_device *spi)
return PTR_ERR(panel->map);
}
- drm_panel_init(&panel->drm_panel, dev, &nt39016_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&panel->drm_panel);
if (err)
return dev_err_probe(dev, err, "Failed to get backlight handle\n");
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index 94ae8c8270b8..66f99982f360 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -175,9 +175,11 @@ static int lcd_olinuxino_probe(struct i2c_client *client)
I2C_FUNC_SMBUS_READ_I2C_BLOCK))
return -ENODEV;
- lcd = devm_kzalloc(dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(dev, struct lcd_olinuxino, panel,
+ &lcd_olinuxino_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
i2c_set_clientdata(client, lcd);
lcd->dev = dev;
@@ -234,9 +236,6 @@ static int lcd_olinuxino_probe(struct i2c_client *client)
if (IS_ERR(lcd->enable_gpio))
return PTR_ERR(lcd->enable_gpio);
- drm_panel_init(&lcd->panel, dev, &lcd_olinuxino_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&lcd->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
index fc87f61d4400..3231e84dc66c 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
@@ -237,9 +237,11 @@ static int ota5601a_probe(struct spi_device *spi)
struct ota5601a *panel;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct ota5601a, drm_panel,
+ &ota5601a_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
spi_set_drvdata(spi, panel);
@@ -273,9 +275,6 @@ static int ota5601a_probe(struct spi_device *spi)
return PTR_ERR(panel->map);
}
- drm_panel_init(&panel->drm_panel, dev, &ota5601a_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&panel->drm_panel);
if (err) {
if (err != -EPROBE_DEFER)
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index 87bbb25d119a..a0f58c3b73f6 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -424,9 +424,11 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
struct otm8009a *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct otm8009a, panel,
+ &otm8009a_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
@@ -451,9 +453,6 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
dev, ctx,
&otm8009a_backlight_ops,
diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
index dbea84f51514..2334b77f348c 100644
--- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
+++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
@@ -132,9 +132,6 @@ static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587)
if (IS_ERR(osd101t2587->supply))
return PTR_ERR(osd101t2587->supply);
- drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev,
- &osd101t2587_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&osd101t2587->base);
if (ret)
return ret;
@@ -161,9 +158,12 @@ static int osd101t2587_panel_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_NO_EOT_PACKET;
- osd101t2587 = devm_kzalloc(&dsi->dev, sizeof(*osd101t2587), GFP_KERNEL);
- if (!osd101t2587)
- return -ENOMEM;
+ osd101t2587 = devm_drm_panel_alloc(&dsi->dev, __typeof(*osd101t2587), base,
+ &osd101t2587_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(osd101t2587))
+ return PTR_ERR(osd101t2587);
mipi_dsi_set_drvdata(dsi, osd101t2587);
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index d1c5c9bc3c56..3c3308fc55df 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -166,9 +166,6 @@ static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
if (IS_ERR(wuxga_nt->supply))
return PTR_ERR(wuxga_nt->supply);
- drm_panel_init(&wuxga_nt->base, &wuxga_nt->dsi->dev,
- &wuxga_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&wuxga_nt->base);
if (ret)
return ret;
@@ -196,9 +193,12 @@ static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_CLOCK_NON_CONTINUOUS |
MIPI_DSI_MODE_LPM;
- wuxga_nt = devm_kzalloc(&dsi->dev, sizeof(*wuxga_nt), GFP_KERNEL);
- if (!wuxga_nt)
- return -ENOMEM;
+ wuxga_nt = devm_drm_panel_alloc(&dsi->dev, __typeof(*wuxga_nt), base,
+ &wuxga_nt_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(wuxga_nt))
+ return PTR_ERR(wuxga_nt);
mipi_dsi_set_drvdata(dsi, wuxga_nt);
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index e10e469aa7a6..dc4bb8ad9131 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -373,9 +373,12 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c)
.node = NULL,
};
- ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
- if (!ts)
- return -ENOMEM;
+ ts = devm_drm_panel_alloc(dev, __typeof(*ts), base,
+ &rpi_touchscreen_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (IS_ERR(ts))
+ return PTR_ERR(ts);
i2c_set_clientdata(i2c, ts);
@@ -428,9 +431,6 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c)
return PTR_ERR(ts->dsi);
}
- drm_panel_init(&ts->base, dev, &rpi_touchscreen_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
/* This appears last, as it's what will unblock the DSI host
* driver's component bind function.
*/
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
index b2029e035635..2af6aa47a551 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -527,9 +527,11 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
int ret;
u32 video_mode;
- panel = devm_kzalloc(&dsi->dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct rad_panel, panel,
+ &rad_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
mipi_dsi_set_drvdata(dsi, panel);
@@ -586,8 +588,6 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
- drm_panel_init(&panel->panel, dev, &rad_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
dev_set_drvdata(dev, panel);
drm_panel_add(&panel->panel);
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67200.c b/drivers/gpu/drm/panel/panel-raydium-rm67200.c
index 64b685dc11f6..333faed62da7 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67200.c
@@ -36,12 +36,14 @@ static inline struct raydium_rm67200 *to_raydium_rm67200(struct drm_panel *panel
static void raydium_rm67200_reset(struct raydium_rm67200 *ctx)
{
- gpiod_set_value_cansleep(ctx->reset_gpio, 0);
- msleep(60);
- gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- msleep(60);
- gpiod_set_value_cansleep(ctx->reset_gpio, 0);
- msleep(60);
+ if (ctx->reset_gpio) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(60);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ msleep(60);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(60);
+ }
}
static void raydium_rm67200_write(struct mipi_dsi_multi_context *ctx,
@@ -318,6 +320,7 @@ static void w552793baa_setup(struct mipi_dsi_multi_context *ctx)
static int raydium_rm67200_prepare(struct drm_panel *panel)
{
struct raydium_rm67200 *ctx = to_raydium_rm67200(panel);
+ struct mipi_dsi_multi_context mctx = { .dsi = ctx->dsi };
int ret;
ret = regulator_bulk_enable(ctx->num_supplies, ctx->supplies);
@@ -328,6 +331,12 @@ static int raydium_rm67200_prepare(struct drm_panel *panel)
msleep(60);
+ ctx->panel_info->panel_setup(&mctx);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&mctx);
+ mipi_dsi_msleep(&mctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&mctx);
+ mipi_dsi_msleep(&mctx, 30);
+
return 0;
}
@@ -343,20 +352,6 @@ static int raydium_rm67200_unprepare(struct drm_panel *panel)
return 0;
}
-static int raydium_rm67200_enable(struct drm_panel *panel)
-{
- struct raydium_rm67200 *rm67200 = to_raydium_rm67200(panel);
- struct mipi_dsi_multi_context ctx = { .dsi = rm67200->dsi };
-
- rm67200->panel_info->panel_setup(&ctx);
- mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
- mipi_dsi_msleep(&ctx, 120);
- mipi_dsi_dcs_set_display_on_multi(&ctx);
- mipi_dsi_msleep(&ctx, 30);
-
- return ctx.accum_err;
-}
-
static int raydium_rm67200_disable(struct drm_panel *panel)
{
struct raydium_rm67200 *rm67200 = to_raydium_rm67200(panel);
@@ -381,7 +376,6 @@ static const struct drm_panel_funcs raydium_rm67200_funcs = {
.prepare = raydium_rm67200_prepare,
.unprepare = raydium_rm67200_unprepare,
.get_modes = raydium_rm67200_get_modes,
- .enable = raydium_rm67200_enable,
.disable = raydium_rm67200_disable,
};
@@ -391,9 +385,11 @@ static int raydium_rm67200_probe(struct mipi_dsi_device *dsi)
struct raydium_rm67200 *ctx;
int ret = 0;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct raydium_rm67200, panel,
+ &raydium_rm67200_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->panel_info = device_get_match_data(dev);
if (!ctx->panel_info)
@@ -407,7 +403,7 @@ static int raydium_rm67200_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset-gpios\n");
@@ -421,9 +417,6 @@ static int raydium_rm67200_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_LPM;
ctx->panel.prepare_prev_first = true;
- drm_panel_init(&ctx->panel, dev, &raydium_rm67200_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
@@ -471,6 +464,7 @@ static const struct raydium_rm67200_panel_info w552793baa_info = {
.vtotal = 1952,
.width_mm = 68, /* 68.04mm */
.height_mm = 121, /* 120.96mm */
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
.type = DRM_MODE_TYPE_DRIVER,
},
.regulators = w552793baa_regulators,
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index 7b7fe987e292..669b5f5c1ad9 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -327,9 +327,11 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
struct rm68200 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct rm68200, panel,
+ &rm68200_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
@@ -355,9 +357,6 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm692e5.c b/drivers/gpu/drm/panel/panel-raydium-rm692e5.c
index ea1b728e85a2..8e9484768657 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm692e5.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm692e5.c
@@ -281,9 +281,11 @@ static int rm692e5_probe(struct mipi_dsi_device *dsi)
struct rm692e5_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct rm692e5_panel, panel,
+ &rm692e5_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vddio";
ctx->supplies[1].supply = "dvdd";
@@ -306,8 +308,6 @@ static int rm692e5_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &rm692e5_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = rm692e5_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm69380.c b/drivers/gpu/drm/panel/panel-raydium-rm69380.c
index d3071c01aaea..86769cadec97 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm69380.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm69380.c
@@ -208,9 +208,11 @@ static int rm69380_probe(struct mipi_dsi_device *dsi)
struct device_node *dsi_sec;
int ret, i;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct rm69380_panel, panel,
+ &rm69380_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vddio";
ctx->supplies[1].supply = "avdd";
@@ -248,8 +250,6 @@ static int rm69380_probe(struct mipi_dsi_device *dsi)
ctx->dsi[0] = dsi;
mipi_dsi_set_drvdata(dsi, ctx);
- drm_panel_init(&ctx->panel, dev, &rm69380_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = rm69380_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-renesas-r61307.c b/drivers/gpu/drm/panel/panel-renesas-r61307.c
new file mode 100644
index 000000000000..319415194839
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-renesas-r61307.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define R61307_MACP 0xb0 /* Manufacturer CMD Protect */
+#define R61307_MACP_ON 0x03
+#define R61307_MACP_OFF 0x04
+
+#define R61307_INVERSION 0xc1
+#define R61307_GAMMA_SET_A 0xc8 /* Gamma Setting A */
+#define R61307_GAMMA_SET_B 0xc9 /* Gamma Setting B */
+#define R61307_GAMMA_SET_C 0xca /* Gamma Setting C */
+#define R61307_CONTRAST_SET 0xcc
+
+struct renesas_r61307 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct regulator *vcc_supply;
+ struct regulator *iovcc_supply;
+
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+
+ bool dig_cont_adj;
+ bool inversion;
+ u32 gamma;
+};
+
+static const u8 gamma_setting[][25] = {
+ { /* sentinel */ },
+ {
+ R61307_GAMMA_SET_A,
+ 0x00, 0x06, 0x0a, 0x0f,
+ 0x14, 0x1f, 0x1f, 0x17,
+ 0x12, 0x0c, 0x09, 0x06,
+ 0x00, 0x06, 0x0a, 0x0f,
+ 0x14, 0x1f, 0x1f, 0x17,
+ 0x12, 0x0c, 0x09, 0x06
+ },
+ {
+ R61307_GAMMA_SET_A,
+ 0x00, 0x05, 0x0b, 0x0f,
+ 0x11, 0x1d, 0x20, 0x18,
+ 0x18, 0x09, 0x07, 0x06,
+ 0x00, 0x05, 0x0b, 0x0f,
+ 0x11, 0x1d, 0x20, 0x18,
+ 0x18, 0x09, 0x07, 0x06
+ },
+ {
+ R61307_GAMMA_SET_A,
+ 0x0b, 0x0d, 0x10, 0x14,
+ 0x13, 0x1d, 0x20, 0x18,
+ 0x12, 0x09, 0x07, 0x06,
+ 0x0a, 0x0c, 0x10, 0x14,
+ 0x13, 0x1d, 0x20, 0x18,
+ 0x12, 0x09, 0x07, 0x06
+ },
+};
+
+static inline struct renesas_r61307 *to_renesas_r61307(struct drm_panel *panel)
+{
+ return container_of(panel, struct renesas_r61307, panel);
+}
+
+static void renesas_r61307_reset(struct renesas_r61307 *priv)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(2000, 3000);
+}
+
+static int renesas_r61307_prepare(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+ struct device *dev = &priv->dsi->dev;
+ int ret;
+
+ if (priv->prepared)
+ return 0;
+
+ ret = regulator_enable(priv->vcc_supply);
+ if (ret) {
+ dev_err(dev, "failed to enable vcc power supply\n");
+ return ret;
+ }
+
+ usleep_range(2000, 3000);
+
+ ret = regulator_enable(priv->iovcc_supply);
+ if (ret) {
+ dev_err(dev, "failed to enable iovcc power supply\n");
+ return ret;
+ }
+
+ usleep_range(2000, 3000);
+
+ renesas_r61307_reset(priv);
+
+ priv->prepared = true;
+ return 0;
+}
+
+static int renesas_r61307_enable(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 80);
+
+ mipi_dsi_dcs_write_seq_multi(&ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_msleep(&ctx, 20);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&ctx, MIPI_DCS_PIXEL_FMT_24BIT << 4);
+
+ /* MACP Off */
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_MACP, R61307_MACP_OFF);
+
+ if (priv->dig_cont_adj)
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_CONTRAST_SET,
+ 0xdc, 0xb4, 0xff);
+
+ if (priv->gamma)
+ mipi_dsi_generic_write_multi(&ctx, gamma_setting[priv->gamma],
+ sizeof(gamma_setting[priv->gamma]));
+
+ if (priv->inversion)
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_INVERSION,
+ 0x00, 0x50, 0x03, 0x22,
+ 0x16, 0x06, 0x60, 0x11);
+ else
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_INVERSION,
+ 0x00, 0x10, 0x03, 0x22,
+ 0x16, 0x06, 0x60, 0x01);
+
+ /* MACP On */
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_MACP, R61307_MACP_ON);
+
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 50);
+
+ return 0;
+}
+
+static int renesas_r61307_disable(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 100);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+
+ return 0;
+}
+
+static int renesas_r61307_unprepare(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+
+ if (!priv->prepared)
+ return 0;
+
+ usleep_range(10000, 11000);
+
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(5000, 6000);
+
+ regulator_disable(priv->iovcc_supply);
+ usleep_range(2000, 3000);
+ regulator_disable(priv->vcc_supply);
+
+ priv->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode renesas_r61307_mode = {
+ .clock = (768 + 116 + 81 + 5) * (1024 + 24 + 8 + 2) * 60 / 1000,
+ .hdisplay = 768,
+ .hsync_start = 768 + 116,
+ .hsync_end = 768 + 116 + 81,
+ .htotal = 768 + 116 + 81 + 5,
+ .vdisplay = 1024,
+ .vsync_start = 1024 + 24,
+ .vsync_end = 1024 + 24 + 8,
+ .vtotal = 1024 + 24 + 8 + 2,
+ .width_mm = 76,
+ .height_mm = 101,
+};
+
+static int renesas_r61307_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &renesas_r61307_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs renesas_r61307_panel_funcs = {
+ .prepare = renesas_r61307_prepare,
+ .enable = renesas_r61307_enable,
+ .disable = renesas_r61307_disable,
+ .unprepare = renesas_r61307_unprepare,
+ .get_modes = renesas_r61307_get_modes,
+};
+
+static int renesas_r61307_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct renesas_r61307 *priv;
+ int ret;
+
+ priv = devm_drm_panel_alloc(dev, struct renesas_r61307, panel,
+ &renesas_r61307_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ priv->vcc_supply = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(priv->vcc_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->vcc_supply),
+ "Failed to get vcc-supply\n");
+
+ priv->iovcc_supply = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(priv->iovcc_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->iovcc_supply),
+ "Failed to get iovcc-supply\n");
+
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "Failed to get reset gpios\n");
+
+ if (device_property_read_bool(dev, "renesas,inversion"))
+ priv->inversion = true;
+
+ if (device_property_read_bool(dev, "renesas,contrast"))
+ priv->dig_cont_adj = true;
+
+ priv->gamma = 0;
+ device_property_read_u32(dev, "renesas,gamma", &priv->gamma);
+
+ priv->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, priv);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ ret = drm_panel_of_backlight(&priv->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&priv->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ drm_panel_remove(&priv->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void renesas_r61307_remove(struct mipi_dsi_device *dsi)
+{
+ struct renesas_r61307 *priv = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&priv->panel);
+}
+
+static const struct of_device_id renesas_r61307_of_match[] = {
+ { .compatible = "hit,tx13d100vm0eaa" },
+ { .compatible = "koe,tx13d100vm0eaa" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, renesas_r61307_of_match);
+
+static struct mipi_dsi_driver renesas_r61307_driver = {
+ .probe = renesas_r61307_probe,
+ .remove = renesas_r61307_remove,
+ .driver = {
+ .name = "panel-renesas-r61307",
+ .of_match_table = renesas_r61307_of_match,
+ },
+};
+module_mipi_dsi_driver(renesas_r61307_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Renesas R61307-based panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-renesas-r69328.c b/drivers/gpu/drm/panel/panel-renesas-r69328.c
new file mode 100644
index 000000000000..46287ab04c30
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-renesas-r69328.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define R69328_MACP 0xb0 /* Manufacturer Access CMD Protect */
+#define R69328_MACP_ON 0x03
+#define R69328_MACP_OFF 0x04
+
+#define R69328_GAMMA_SET_A 0xc8 /* Gamma Setting A */
+#define R69328_GAMMA_SET_B 0xc9 /* Gamma Setting B */
+#define R69328_GAMMA_SET_C 0xca /* Gamma Setting C */
+
+#define R69328_POWER_SET 0xd1
+
+struct renesas_r69328 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct regulator *vdd_supply;
+ struct regulator *vddio_supply;
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+};
+
+static inline struct renesas_r69328 *to_renesas_r69328(struct drm_panel *panel)
+{
+ return container_of(panel, struct renesas_r69328, panel);
+}
+
+static void renesas_r69328_reset(struct renesas_r69328 *priv)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(2000, 3000);
+}
+
+static int renesas_r69328_prepare(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+ struct device *dev = &priv->dsi->dev;
+ int ret;
+
+ if (priv->prepared)
+ return 0;
+
+ ret = regulator_enable(priv->vdd_supply);
+ if (ret) {
+ dev_err(dev, "failed to enable vdd power supply\n");
+ return ret;
+ }
+
+ usleep_range(10000, 11000);
+
+ ret = regulator_enable(priv->vddio_supply);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable vddio power supply\n");
+ return ret;
+ }
+
+ usleep_range(10000, 11000);
+
+ renesas_r69328_reset(priv);
+
+ priv->prepared = true;
+ return 0;
+}
+
+static int renesas_r69328_enable(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ /* Set address mode */
+ mipi_dsi_dcs_write_seq_multi(&ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_dcs_set_pixel_format_multi(&ctx, MIPI_DCS_PIXEL_FMT_24BIT << 4);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+
+ mipi_dsi_msleep(&ctx, 100);
+
+ /* MACP Off */
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_MACP, R69328_MACP_OFF);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_POWER_SET, 0x14, 0x1d,
+ 0x21, 0x67, 0x11, 0x9a);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_GAMMA_SET_A, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_GAMMA_SET_B, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_GAMMA_SET_C, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00);
+
+ /* MACP On */
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_MACP, R69328_MACP_ON);
+
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 50);
+
+ return 0;
+}
+
+static int renesas_r69328_disable(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 60);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+
+ return 0;
+}
+
+static int renesas_r69328_unprepare(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+
+ if (!priv->prepared)
+ return 0;
+
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+
+ usleep_range(5000, 6000);
+
+ regulator_disable(priv->vddio_supply);
+ regulator_disable(priv->vdd_supply);
+
+ priv->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode renesas_r69328_mode = {
+ .clock = (720 + 92 + 62 + 4) * (1280 + 6 + 3 + 1) * 60 / 1000,
+ .hdisplay = 720,
+ .hsync_start = 720 + 92,
+ .hsync_end = 720 + 92 + 62,
+ .htotal = 720 + 92 + 62 + 4,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 6,
+ .vsync_end = 1280 + 6 + 3,
+ .vtotal = 1280 + 6 + 3 + 1,
+ .width_mm = 59,
+ .height_mm = 105,
+};
+
+static int renesas_r69328_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &renesas_r69328_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs renesas_r69328_panel_funcs = {
+ .prepare = renesas_r69328_prepare,
+ .enable = renesas_r69328_enable,
+ .disable = renesas_r69328_disable,
+ .unprepare = renesas_r69328_unprepare,
+ .get_modes = renesas_r69328_get_modes,
+};
+
+static int renesas_r69328_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct renesas_r69328 *priv;
+ int ret;
+
+ priv = devm_drm_panel_alloc(dev, struct renesas_r69328, panel,
+ &renesas_r69328_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ priv->vdd_supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(priv->vdd_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->vdd_supply),
+ "Failed to get vdd-supply\n");
+
+ priv->vddio_supply = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(priv->vddio_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->vddio_supply),
+ "Failed to get vddio-supply\n");
+
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ priv->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, priv);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ ret = drm_panel_of_backlight(&priv->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&priv->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ drm_panel_remove(&priv->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void renesas_r69328_remove(struct mipi_dsi_device *dsi)
+{
+ struct renesas_r69328 *priv = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&priv->panel);
+}
+
+static const struct of_device_id renesas_r69328_of_match[] = {
+ { .compatible = "jdi,dx12d100vm0eaa" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, renesas_r69328_of_match);
+
+static struct mipi_dsi_driver renesas_r69328_driver = {
+ .probe = renesas_r69328_probe,
+ .remove = renesas_r69328_remove,
+ .driver = {
+ .name = "panel-renesas-r69328",
+ .of_match_table = renesas_r69328_of_match,
+ },
+};
+module_mipi_dsi_driver(renesas_r69328_driver);
+
+MODULE_AUTHOR("Maxim Schwalm <maxim.schwalm@gmail.com>");
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Renesas R69328-based panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index 2ef5ea5eaeeb..ad35d0fb0a16 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -143,9 +143,11 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
struct rb070d30_panel *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct rb070d30_panel, panel,
+ &rb070d30_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supply = devm_regulator_get(&dsi->dev, "vcc-lcd");
if (IS_ERR(ctx->supply))
@@ -154,9 +156,6 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel, &dsi->dev, &rb070d30_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->gpios.reset)) {
dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
diff --git a/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c
index cf6186312252..188dd7cf0297 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c
@@ -211,9 +211,11 @@ static int ams581vf01_probe(struct mipi_dsi_device *dsi)
struct ams581vf01 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct ams581vf01, panel,
+ &ams581vf01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(ams581vf01_supplies),
@@ -235,8 +237,6 @@ static int ams581vf01_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &ams581vf01_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = ams581vf01_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c b/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c
index 817365cb5e46..f8ebbd4a530b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c
@@ -257,9 +257,11 @@ static int ams639rq08_probe(struct mipi_dsi_device *dsi)
struct ams639rq08 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ams639rq08, panel,
+ &ams639rq08_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(ams639rq08_supplies),
@@ -281,8 +283,6 @@ static int ams639rq08_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &ams639rq08_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = ams639rq08_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
index 9a482a744b8c..20ec27d2d6c2 100644
--- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
@@ -266,9 +266,12 @@ static int atana33xc20_probe(struct dp_aux_ep_device *aux_ep)
struct device *dev = &aux_ep->dev;
int ret;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct atana33xc20_panel, base,
+ &atana33xc20_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+
dev_set_drvdata(dev, panel);
panel->aux = aux_ep->aux;
@@ -301,8 +304,6 @@ static int atana33xc20_probe(struct dp_aux_ep_device *aux_ep)
if (ret)
return ret;
- drm_panel_init(&panel->base, dev, &atana33xc20_funcs, DRM_MODE_CONNECTOR_eDP);
-
pm_runtime_get_sync(dev);
ret = drm_panel_dp_aux_backlight(&panel->base, aux_ep->aux);
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/gpu/drm/panel/panel-samsung-db7430.c b/drivers/gpu/drm/panel/panel-samsung-db7430.c
index 14c6700e37b3..a97182f3c990 100644
--- a/drivers/gpu/drm/panel/panel-samsung-db7430.c
+++ b/drivers/gpu/drm/panel/panel-samsung-db7430.c
@@ -267,9 +267,11 @@ static int db7430_probe(struct spi_device *spi)
struct db7430 *db;
int ret;
- db = devm_kzalloc(dev, sizeof(*db), GFP_KERNEL);
- if (!db)
- return -ENOMEM;
+ db = devm_drm_panel_alloc(dev, struct db7430, panel, &db7430_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(db))
+ return PTR_ERR(db);
+
db->dev = dev;
/*
@@ -294,9 +296,6 @@ static int db7430_probe(struct spi_device *spi)
if (ret)
return dev_err_probe(dev, ret, "MIPI DBI init failed\n");
- drm_panel_init(&db->panel, dev, &db7430_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
/* FIXME: if no external backlight, use internal backlight */
ret = drm_panel_of_backlight(&db->panel);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 9f438683a6f6..c7f2241523a0 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -339,9 +339,11 @@ static int ld9040_probe(struct spi_device *spi)
struct ld9040 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct ld9040), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ld9040, panel,
+ &ld9040_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
spi_set_drvdata(spi, ctx);
@@ -373,9 +375,6 @@ static int ld9040_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
bldev = devm_backlight_device_register(dev, dev_name(dev), dev,
ctx, &ld9040_bl_ops,
&ld9040_bl_props);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index 79f611963c61..ba1a02000bb9 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -166,9 +166,11 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
struct s6d16d0 *s6;
int ret;
- s6 = devm_kzalloc(dev, sizeof(struct s6d16d0), GFP_KERNEL);
- if (!s6)
- return -ENOMEM;
+ s6 = devm_drm_panel_alloc(dev, struct s6d16d0, panel,
+ &s6d16d0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(s6))
+ return PTR_ERR(s6);
mipi_dsi_set_drvdata(dsi, s6);
s6->dev = dev;
@@ -200,9 +202,6 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
return ret;
}
- drm_panel_init(&s6->panel, dev, &s6d16d0_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&s6->panel);
ret = mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
index 2adb223a895c..300dc19bd9d1 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
@@ -247,9 +247,11 @@ static int s6d27a1_probe(struct spi_device *spi)
struct s6d27a1 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6d27a1, panel,
+ &s6d27a1_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
@@ -277,9 +279,6 @@ static int s6d27a1_probe(struct spi_device *spi)
ctx->dbi.read_commands = s6d27a1_dbi_read_commands;
- drm_panel_init(&ctx->panel, dev, &s6d27a1_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "failed to add backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
index 93f11e2e9398..692020081524 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
@@ -244,7 +244,7 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
.init_func = s6d7aa0_lsl080al02_init,
.off_func = s6d7aa0_lsl080al02_off,
.drm_mode = &s6d7aa0_lsl080al02_mode,
- .mode_flags = MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_NO_HFP,
+ .mode_flags = MIPI_DSI_MODE_VIDEO_NO_HFP,
.bus_flags = 0,
.has_backlight = false,
@@ -392,9 +392,11 @@ static int s6d7aa0_probe(struct mipi_dsi_device *dsi)
struct s6d7aa0 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6d7aa0, panel,
+ &s6d7aa0_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->desc = of_device_get_match_data(dev);
if (!ctx->desc)
@@ -420,8 +422,6 @@ static int s6d7aa0_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
| ctx->desc->mode_flags;
- drm_panel_init(&ctx->panel, dev, &s6d7aa0_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
index 27a059b55ae5..f4d75eca3cdf 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
@@ -185,9 +185,11 @@ static int s6e3fa7_panel_probe(struct mipi_dsi_device *dsi)
struct s6e3fa7_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e3fa7_panel, panel,
+ &s6e3fa7_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
@@ -202,8 +204,6 @@ static int s6e3fa7_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &s6e3fa7_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = s6e3fa7_panel_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index ab8b58545284..1db0c63b1131 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -681,9 +681,11 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
struct s6e3ha2 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e3ha2, panel,
+ &s6e3ha2_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -731,8 +733,6 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS;
ctx->bl_dev->props.power = BACKLIGHT_POWER_OFF;
- drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
index 64c6f7d45bed..550e9ef9bb71 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
@@ -253,9 +253,11 @@ static int s6e3ha8_amb577px01_wqhd_probe(struct mipi_dsi_device *dsi)
struct s6e3ha8 *priv;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct s6e3ha8, panel,
+ &s6e3ha8_amb577px01_wqhd_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(s6e3ha8_supplies),
s6e3ha8_supplies,
@@ -279,8 +281,6 @@ static int s6e3ha8_amb577px01_wqhd_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&priv->panel, dev, &s6e3ha8_amb577px01_wqhd_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
priv->panel.prepare_prev_first = true;
drm_panel_add(&priv->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index 364f1c9a16d9..6f3d39556f92 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -437,9 +437,11 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
struct s6e63j0x03 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct s6e63j0x03), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e63j0x03, panel,
+ &s6e63j0x03_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -462,8 +464,6 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"cannot get reset-gpio\n");
- drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 6917ffda5b2b..ea241c89593b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -13,6 +13,7 @@
#include <linux/backlight.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
index e92e95158d1f..e91f50662997 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
@@ -687,9 +687,11 @@ static int s6e88a0_ams427ap24_probe(struct mipi_dsi_device *dsi)
struct s6e88a0_ams427ap24 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e88a0_ams427ap24, panel,
+ &s6e88a0_ams427ap24_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(dev,
ARRAY_SIZE(s6e88a0_ams427ap24_supplies),
@@ -711,8 +713,6 @@ static int s6e88a0_ams427ap24_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_NO_HFP;
- drm_panel_init(&ctx->panel, dev, &s6e88a0_ams427ap24_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->flip_horizontal = device_property_read_bool(dev, "flip-horizontal");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
index 57b1a899bbdc..ca5cad41ff1d 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -165,9 +165,11 @@ static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
struct s6e88a0_ams452ef01 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e88a0_ams452ef01, panel,
+ &s6e88a0_ams452ef01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vdd3";
ctx->supplies[1].supply = "vci";
@@ -192,9 +194,6 @@ static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
- drm_panel_init(&ctx->panel, dev, &s6e88a0_ams452ef01_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index c51d07ec1529..1b5c500d4f4e 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -979,9 +979,11 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
struct s6e8aa0 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct s6e8aa0), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e8aa0, panel,
+ &s6e8aa0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -990,7 +992,7 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
- | MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
+ | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
ret = s6e8aa0_parse_dt(ctx);
if (ret < 0)
@@ -1014,8 +1016,6 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
ctx->brightness = GAMMA_LEVEL_NUM - 1;
- drm_panel_init(&ctx->panel, dev, &s6e8aa0_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index d92ae6b6100f..064258217d50 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -191,9 +191,11 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
struct sofef00_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct sofef00_panel, panel,
+ &sofef00_panel_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supply = devm_regulator_get(dev, "vddio");
if (IS_ERR(ctx->supply))
@@ -211,9 +213,6 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
- drm_panel_init(&ctx->panel, dev, &sofef00_panel_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = sofef00_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 7d1b421ea9dd..0935d83ee2db 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -204,9 +204,11 @@ static int seiko_panel_probe(struct device *dev,
struct seiko_panel *panel;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct seiko_panel, base,
+ &seiko_panel_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
panel->desc = desc;
@@ -224,9 +226,6 @@ static int seiko_panel_probe(struct device *dev,
return dev_err_probe(dev, PTR_ERR(panel->enable_gpio),
"failed to request GPIO\n");
- drm_panel_init(&panel->base, dev, &seiko_panel_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&panel->base);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index a0d76d588da1..d159b0e4fdb6 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -279,9 +279,6 @@ static int sharp_panel_add(struct sharp_panel *sharp)
if (IS_ERR(sharp->supply))
return PTR_ERR(sharp->supply);
- drm_panel_init(&sharp->base, &sharp->link1->dev, &sharp_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&sharp->base);
if (ret)
return ret;
@@ -323,10 +320,12 @@ static int sharp_panel_probe(struct mipi_dsi_device *dsi)
/* register a panel for only the DSI-LINK1 interface */
if (secondary) {
- sharp = devm_kzalloc(&dsi->dev, sizeof(*sharp), GFP_KERNEL);
- if (!sharp) {
+ sharp = devm_drm_panel_alloc(&dsi->dev, __typeof(*sharp), base,
+ &sharp_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(sharp)) {
put_device(&secondary->dev);
- return -ENOMEM;
+ return PTR_ERR(sharp);
}
mipi_dsi_set_drvdata(dsi, sharp);
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
index a9673a52b861..938beac4655d 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -138,9 +138,10 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
{
struct ls037v7dw01_panel *lcd;
- lcd = devm_kzalloc(&pdev->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&pdev->dev, struct ls037v7dw01_panel, panel,
+ &ls037v7dw01_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
platform_set_drvdata(pdev, lcd);
lcd->pdev = pdev;
@@ -181,9 +182,6 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
return PTR_ERR(lcd->ud_gpio);
}
- drm_panel_init(&lcd->panel, &pdev->dev, &ls037v7dw01_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
index 0b4e0983639b..0456f3d705e7 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
@@ -193,9 +193,11 @@ static int sharp_ls060_probe(struct mipi_dsi_device *dsi)
struct sharp_ls060 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct sharp_ls060, panel,
+ &sharp_ls060_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->vddi_supply = devm_regulator_get(dev, "vddi");
if (IS_ERR(ctx->vddi_supply))
@@ -227,9 +229,6 @@ static int sharp_ls060_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &sharp_ls060_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9f81fa960b46..3333d4a07504 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1305,6 +1305,30 @@ static const struct panel_desc auo_g190ean01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing auo_p238han01_timings = {
+ .pixelclock = { 107400000, 142400000, 180000000 },
+ .hactive = { 1920, 1920, 1920 },
+ .hfront_porch = { 30, 70, 650 },
+ .hback_porch = { 30, 70, 650 },
+ .hsync_len = { 20, 40, 136 },
+ .vactive = { 1080, 1080, 1080 },
+ .vfront_porch = { 5, 19, 318 },
+ .vback_porch = { 5, 19, 318 },
+ .vsync_len = { 4, 12, 120 },
+};
+
+static const struct panel_desc auo_p238han01 = {
+ .timings = &auo_p238han01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 527,
+ .height = 296,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_p320hvn03_timings = {
.pixelclock = { 106000000, 148500000, 164000000 },
.hactive = { 1920, 1920, 1920 },
@@ -4976,6 +5000,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,g190ean01",
.data = &auo_g190ean01,
}, {
+ .compatible = "auo,p238han01",
+ .data = &auo_p238han01,
+ }, {
.compatible = "auo,p320hvn03",
.data = &auo_p320hvn03,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 1f72ef7ca74c..2f79ec4a2063 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -520,6 +520,28 @@ static void rg28xx_gip_sequence(struct st7701 *st7701)
st7701_switch_cmd_bkx(st7701, false, 0);
}
+static void wf40eswaa6mnn0_gip_sequence(struct st7701 *st7701)
+{
+ ST7701_WRITE(st7701, 0xE0, 0x00, 0x28, 0x02);
+ ST7701_WRITE(st7701, 0xE1, 0x08, 0xA0, 0x00, 0x00, 0x07, 0xA0, 0x00,
+ 0x00, 0x00, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE2, 0x11, 0x11, 0x44, 0x44, 0xED, 0xA0, 0x00,
+ 0x00, 0xEC, 0xA0, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xE3, 0x00, 0x00, 0x11, 0x11);
+ ST7701_WRITE(st7701, 0xE4, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE5, 0x0A, 0xE9, 0xD8, 0xA0, 0x0C, 0xEB, 0xD8,
+ 0xA0, 0x0E, 0xED, 0xD8, 0xA0, 0x10, 0xEF, 0xD8, 0xA0);
+ ST7701_WRITE(st7701, 0xE6, 0x00, 0x00, 0x11, 0x11);
+ ST7701_WRITE(st7701, 0xE7, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE8, 0x09, 0xE8, 0xD8, 0xA0, 0x0B, 0xEA, 0xD8,
+ 0xA0, 0x0D, 0xEC, 0xD8, 0xA0, 0x0F, 0xEE, 0xD8, 0xA0);
+ ST7701_WRITE(st7701, 0xEB, 0x00, 0x00, 0xE4, 0xE4, 0x88, 0x00, 0x40);
+ ST7701_WRITE(st7701, 0xEC, 0x3C, 0x00);
+ ST7701_WRITE(st7701, 0xED, 0xAB, 0x89, 0x76, 0x54, 0x02, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x20, 0x45, 0x67, 0x98, 0xBA);
+ ST7701_WRITE(st7701, MIPI_DCS_SET_ADDRESS_MODE, 0);
+}
+
static int st7701_prepare(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
@@ -1135,6 +1157,107 @@ static const struct st7701_panel_desc rg28xx_desc = {
.gip_sequence = rg28xx_gip_sequence,
};
+static const struct drm_display_mode wf40eswaa6mnn0_mode = {
+ .clock = 18306,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 2,
+ .hsync_end = 480 + 2 + 45,
+ .htotal = 480 + 2 + 45 + 13,
+
+ .vdisplay = 480,
+ .vsync_start = 480 + 2,
+ .vsync_end = 480 + 2 + 70,
+ .vtotal = 480 + 2 + 70 + 13,
+
+ .width_mm = 72,
+ .height_mm = 70,
+
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct st7701_panel_desc wf40eswaa6mnn0_desc = {
+ .mode = &wf40eswaa6mnn0_mode,
+ .lanes = 2,
+ .format = MIPI_DSI_FMT_RGB888,
+ .panel_sleep_delay = 0,
+
+ .pv_gamma = {
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0x1),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x08),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0c),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x08),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x0c),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x08),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x22),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x04),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x14),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x12),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0xb3),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3a),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nv_gamma = {
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x13),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x19),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0f),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x14),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x07),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x07),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x08),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x07),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x22),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x02),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0xf),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x0f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0xa3),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x0d)
+ },
+ .nlinv = 3,
+ .vop_uv = 4737500,
+ .vcom_uv = 662500,
+ .vgh_mv = 15000,
+ .vgl_mv = -10170,
+ .avdd_mv = 6600,
+ .avcl_mv = -4600,
+ .gamma_op_bias = OP_BIAS_MIDDLE,
+ .input_op_bias = OP_BIAS_MIDDLE,
+ .output_op_bias = OP_BIAS_MIN,
+ .t2d_ns = 1600,
+ .t3d_ns = 10400,
+ .eot_en = true,
+ .gip_sequence = wf40eswaa6mnn0_gip_sequence,
+};
+
static void st7701_cleanup(void *data)
{
struct st7701 *st7701 = (struct st7701 *)data;
@@ -1150,9 +1273,10 @@ static int st7701_probe(struct device *dev, int connector_type)
struct st7701 *st7701;
int ret;
- st7701 = devm_kzalloc(dev, sizeof(*st7701), GFP_KERNEL);
- if (!st7701)
- return -ENOMEM;
+ st7701 = devm_drm_panel_alloc(dev, struct st7701, panel, &st7701_funcs,
+ connector_type);
+ if (IS_ERR(st7701))
+ return PTR_ERR(st7701);
desc = of_device_get_match_data(dev);
if (!desc)
@@ -1176,7 +1300,6 @@ static int st7701_probe(struct device *dev, int connector_type)
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to get orientation\n");
- drm_panel_init(&st7701->panel, dev, &st7701_funcs, connector_type);
st7701->panel.prepare_prev_first = true;
/**
@@ -1265,6 +1388,7 @@ static const struct of_device_id st7701_dsi_of_match[] = {
{ .compatible = "densitron,dmt028vghmcmi-1a", .data = &dmt028vghmcmi_1a_desc },
{ .compatible = "elida,kd50t048a", .data = &kd50t048a_desc },
{ .compatible = "techstar,ts8550b", .data = &ts8550b_desc },
+ { .compatible = "winstar,wf40eswaa6mnn0", .data = &wf40eswaa6mnn0_desc },
{ }
};
MODULE_DEVICE_TABLE(of, st7701_dsi_of_match);
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index 67e8e45498cb..1a007a244d84 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -846,9 +846,11 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
struct st7703 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct st7703, panel,
+ &st7703_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio))
@@ -876,9 +878,6 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return dev_err_probe(&dsi->dev, ret, "Failed to get orientation\n");
- drm_panel_init(&ctx->panel, dev, &st7703_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 28bfc48a9127..04d91929eedd 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -612,9 +612,10 @@ static int st7789v_probe(struct spi_device *spi)
struct st7789v *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct st7789v, panel,
+ &st7789v_drm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
spi_set_drvdata(spi, ctx);
ctx->spi = spi;
@@ -626,9 +627,6 @@ static int st7789v_probe(struct spi_device *spi)
ctx->info = device_get_match_data(&spi->dev);
- drm_panel_init(&ctx->panel, dev, &st7789v_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ctx->power = devm_regulator_get(dev, "power");
ret = PTR_ERR_OR_ZERO(ctx->power);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index d437f5c84f5f..fe043de791b0 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -607,9 +607,10 @@ static int acx565akm_probe(struct spi_device *spi)
struct acx565akm_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct acx565akm_panel, panel,
+ &acx565akm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
spi->mode = SPI_MODE_3;
@@ -635,9 +636,6 @@ static int acx565akm_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &acx565akm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
index 97f4bb4e1029..7c989b70ab51 100644
--- a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+++ b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
@@ -175,9 +175,11 @@ static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi)
struct sony_td4353_jdi *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct sony_td4353_jdi, panel,
+ &sony_td4353_jdi_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->type = (uintptr_t)of_device_get_match_data(dev);
@@ -206,9 +208,6 @@ static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &sony_td4353_jdi_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
index 104b2290560e..216a6ad8696e 100644
--- a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
+++ b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
@@ -433,9 +433,11 @@ static int truly_nt35521_probe(struct mipi_dsi_device *dsi)
struct truly_nt35521 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct truly_nt35521, panel,
+ &truly_nt35521_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "positive5";
ctx->supplies[1].supply = "negative5";
@@ -465,9 +467,6 @@ static int truly_nt35521_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &truly_nt35521_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = truly_nt35521_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-summit.c b/drivers/gpu/drm/panel/panel-summit.c
index e780faee1857..4854437e2899 100644
--- a/drivers/gpu/drm/panel/panel-summit.c
+++ b/drivers/gpu/drm/panel/panel-summit.c
@@ -68,9 +68,11 @@ static int summit_probe(struct mipi_dsi_device *dsi)
struct summit_data *s_data;
int ret;
- s_data = devm_kzalloc(dev, sizeof(*s_data), GFP_KERNEL);
- if (!s_data)
- return -ENOMEM;
+ s_data = devm_drm_panel_alloc(dev, struct summit_data, panel,
+ &summit_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(s_data))
+ return PTR_ERR(s_data);
mipi_dsi_set_drvdata(dsi, s_data);
s_data->dsi = dsi;
@@ -85,8 +87,6 @@ static int summit_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(s_data->bl))
return PTR_ERR(s_data->bl);
- drm_panel_init(&s_data->panel, dev, &summit_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&s_data->panel);
return mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-synaptics-r63353.c b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
index b148e6cba9bd..3a74d48753d9 100644
--- a/drivers/gpu/drm/panel/panel-synaptics-r63353.c
+++ b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
@@ -229,9 +229,11 @@ static int r63353_panel_probe(struct mipi_dsi_device *dsi)
struct device *dev = &dsi->dev;
struct r63353_panel *panel;
- panel = devm_kzalloc(&dsi->dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct r63353_panel, base,
+ &r63353_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
mipi_dsi_set_drvdata(dsi, panel);
panel->dsi = dsi;
@@ -258,9 +260,6 @@ static int r63353_panel_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(panel->reset_gpio);
}
- drm_panel_init(&panel->base, dev, &r63353_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
panel->base.prepare_prev_first = true;
ret = drm_panel_of_backlight(&panel->base);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index 11d460d2ea19..ee86ff20c1bd 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -318,9 +318,11 @@ static int td028ttec1_probe(struct spi_device *spi)
struct td028ttec1_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct td028ttec1_panel, panel,
+ &td028ttec1_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -334,9 +336,6 @@ static int td028ttec1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &td028ttec1_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&lcd->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index cf4609bb9b1d..b18af526b54c 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -421,9 +421,10 @@ static int td043mtea1_probe(struct spi_device *spi)
struct td043mtea1_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (lcd == NULL)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct td043mtea1_panel, panel,
+ &td043mtea1_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -455,9 +456,6 @@ static int td043mtea1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &td043mtea1_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index f6a212e542cb..0beba5c08956 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -405,9 +405,11 @@ static int tpg110_probe(struct spi_device *spi)
struct tpg110 *tpg;
int ret;
- tpg = devm_kzalloc(dev, sizeof(*tpg), GFP_KERNEL);
- if (!tpg)
- return -ENOMEM;
+ tpg = devm_drm_panel_alloc(dev, struct tpg110, panel,
+ &tpg110_drm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(tpg))
+ return PTR_ERR(tpg);
+
tpg->dev = dev;
/* We get the physical display dimensions from the DT */
@@ -438,9 +440,6 @@ static int tpg110_probe(struct spi_device *spi)
if (ret)
return ret;
- drm_panel_init(&tpg->panel, dev, &tpg110_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&tpg->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-visionox-r66451.c b/drivers/gpu/drm/panel/panel-visionox-r66451.c
index 3ea0a86f6e69..690cccedd438 100644
--- a/drivers/gpu/drm/panel/panel-visionox-r66451.c
+++ b/drivers/gpu/drm/panel/panel-visionox-r66451.c
@@ -255,9 +255,11 @@ static int visionox_r66451_probe(struct mipi_dsi_device *dsi)
struct drm_dsc_config *dsc;
int ret = 0;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_r66451, panel,
+ &visionox_r66451_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
dsc = devm_kzalloc(dev, sizeof(*dsc), GFP_KERNEL);
if (!dsc)
@@ -297,7 +299,6 @@ static int visionox_r66451_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
ctx->panel.prepare_prev_first = true;
- drm_panel_init(&ctx->panel, dev, &visionox_r66451_funcs, DRM_MODE_CONNECTOR_DSI);
ctx->panel.backlight = visionox_r66451_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index be3a9797fbce..909c280eab1f 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/mod_devicetable.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
@@ -15,11 +16,138 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+struct visionox_rm69299_panel_desc {
+ const struct drm_display_mode *mode;
+ const u8 *init_seq;
+ unsigned int init_seq_len;
+};
+
struct visionox_rm69299 {
struct drm_panel panel;
- struct regulator_bulk_data supplies[2];
+ struct regulator_bulk_data *supplies;
struct gpio_desc *reset_gpio;
struct mipi_dsi_device *dsi;
+ const struct visionox_rm69299_panel_desc *desc;
+};
+
+static const struct regulator_bulk_data visionox_rm69299_supplies[] = {
+ { .supply = "vdda", .init_load_uA = 32000 },
+ { .supply = "vdd3p3", .init_load_uA = 13200 },
+};
+
+static const u8 visionox_rm69299_1080x2248_60hz_init_seq[][2] = {
+ { 0xfe, 0x00 }, { 0xc2, 0x08 }, { 0x35, 0x00 }, { 0x51, 0xff },
+};
+
+static const u8 visionox_rm69299_1080x2160_60hz_init_seq[][2] = {
+ { 0xfe, 0x40 }, { 0x05, 0x04 }, { 0x06, 0x08 }, { 0x08, 0x04 },
+ { 0x09, 0x08 }, { 0x0a, 0x07 }, { 0x0b, 0xcc }, { 0x0c, 0x07 },
+ { 0x0d, 0x90 }, { 0x0f, 0x87 }, { 0x20, 0x8d }, { 0x21, 0x8d },
+ { 0x24, 0x05 }, { 0x26, 0x05 }, { 0x28, 0x05 }, { 0x2a, 0x05 },
+ { 0x2d, 0x28 }, { 0x2f, 0x28 }, { 0x30, 0x32 }, { 0x31, 0x32 },
+ { 0x37, 0x80 }, { 0x38, 0x30 }, { 0x39, 0xa8 }, { 0x46, 0x48 },
+ { 0x47, 0x48 }, { 0x6b, 0x10 }, { 0x6f, 0x02 }, { 0x74, 0x2b },
+ { 0x80, 0x1a }, { 0xfe, 0x40 }, { 0x93, 0x10 }, { 0x16, 0x00 },
+ { 0x85, 0x07 }, { 0x84, 0x01 }, { 0x86, 0x0f }, { 0x87, 0x05 },
+ { 0x8c, 0x00 }, { 0x88, 0x2e }, { 0x89, 0x2e }, { 0x8b, 0x09 },
+ { 0x95, 0x00 }, { 0x91, 0x00 }, { 0x90, 0x00 }, { 0x8d, 0xd0 },
+ { 0x8a, 0x03 }, { 0xfe, 0xa0 }, { 0x13, 0x00 }, { 0x33, 0x00 },
+ { 0x0b, 0x33 }, { 0x36, 0x1e }, { 0x31, 0x88 }, { 0x32, 0x88 },
+ { 0x37, 0xf1 }, { 0xfe, 0x50 }, { 0x00, 0x00 }, { 0x01, 0x00 },
+ { 0x02, 0x00 }, { 0x03, 0xe9 }, { 0x04, 0x00 }, { 0x05, 0xf6 },
+ { 0x06, 0x01 }, { 0x07, 0x2c }, { 0x08, 0x01 }, { 0x09, 0x62 },
+ { 0x0a, 0x01 }, { 0x0b, 0x98 }, { 0x0c, 0x01 }, { 0x0d, 0xbf },
+ { 0x0e, 0x01 }, { 0x0f, 0xf6 }, { 0x10, 0x02 }, { 0x11, 0x24 },
+ { 0x12, 0x02 }, { 0x13, 0x4e }, { 0x14, 0x02 }, { 0x15, 0x70 },
+ { 0x16, 0x02 }, { 0x17, 0xaf }, { 0x18, 0x02 }, { 0x19, 0xe2 },
+ { 0x1a, 0x03 }, { 0x1b, 0x1f }, { 0x1c, 0x03 }, { 0x1d, 0x52 },
+ { 0x1e, 0x03 }, { 0x1f, 0x82 }, { 0x20, 0x03 }, { 0x21, 0xb6 },
+ { 0x22, 0x03 }, { 0x23, 0xf0 }, { 0x24, 0x04 }, { 0x25, 0x1f },
+ { 0x26, 0x04 }, { 0x27, 0x37 }, { 0x28, 0x04 }, { 0x29, 0x59 },
+ { 0x2a, 0x04 }, { 0x2b, 0x68 }, { 0x30, 0x04 }, { 0x31, 0x85 },
+ { 0x32, 0x04 }, { 0x33, 0xa2 }, { 0x34, 0x04 }, { 0x35, 0xbc },
+ { 0x36, 0x04 }, { 0x37, 0xd8 }, { 0x38, 0x04 }, { 0x39, 0xf4 },
+ { 0x3a, 0x05 }, { 0x3b, 0x0e }, { 0x40, 0x05 }, { 0x41, 0x13 },
+ { 0x42, 0x05 }, { 0x43, 0x1f }, { 0x44, 0x05 }, { 0x45, 0x1f },
+ { 0x46, 0x00 }, { 0x47, 0x00 }, { 0x48, 0x01 }, { 0x49, 0x43 },
+ { 0x4a, 0x01 }, { 0x4b, 0x4c }, { 0x4c, 0x01 }, { 0x4d, 0x6f },
+ { 0x4e, 0x01 }, { 0x4f, 0x92 }, { 0x50, 0x01 }, { 0x51, 0xb5 },
+ { 0x52, 0x01 }, { 0x53, 0xd4 }, { 0x58, 0x02 }, { 0x59, 0x06 },
+ { 0x5a, 0x02 }, { 0x5b, 0x33 }, { 0x5c, 0x02 }, { 0x5d, 0x59 },
+ { 0x5e, 0x02 }, { 0x5f, 0x7d }, { 0x60, 0x02 }, { 0x61, 0xbd },
+ { 0x62, 0x02 }, { 0x63, 0xf7 }, { 0x64, 0x03 }, { 0x65, 0x31 },
+ { 0x66, 0x03 }, { 0x67, 0x63 }, { 0x68, 0x03 }, { 0x69, 0x9d },
+ { 0x6a, 0x03 }, { 0x6b, 0xd2 }, { 0x6c, 0x04 }, { 0x6d, 0x05 },
+ { 0x6e, 0x04 }, { 0x6f, 0x38 }, { 0x70, 0x04 }, { 0x71, 0x51 },
+ { 0x72, 0x04 }, { 0x73, 0x70 }, { 0x74, 0x04 }, { 0x75, 0x85 },
+ { 0x76, 0x04 }, { 0x77, 0xa1 }, { 0x78, 0x04 }, { 0x79, 0xc0 },
+ { 0x7a, 0x04 }, { 0x7b, 0xd8 }, { 0x7c, 0x04 }, { 0x7d, 0xf2 },
+ { 0x7e, 0x05 }, { 0x7f, 0x10 }, { 0x80, 0x05 }, { 0x81, 0x21 },
+ { 0x82, 0x05 }, { 0x83, 0x2e }, { 0x84, 0x05 }, { 0x85, 0x3a },
+ { 0x86, 0x05 }, { 0x87, 0x3e }, { 0x88, 0x00 }, { 0x89, 0x00 },
+ { 0x8a, 0x01 }, { 0x8b, 0x86 }, { 0x8c, 0x01 }, { 0x8d, 0x8f },
+ { 0x8e, 0x01 }, { 0x8f, 0xb3 }, { 0x90, 0x01 }, { 0x91, 0xd7 },
+ { 0x92, 0x01 }, { 0x93, 0xfb }, { 0x94, 0x02 }, { 0x95, 0x18 },
+ { 0x96, 0x02 }, { 0x97, 0x4f }, { 0x98, 0x02 }, { 0x99, 0x7e },
+ { 0x9a, 0x02 }, { 0x9b, 0xa6 }, { 0x9c, 0x02 }, { 0x9d, 0xcf },
+ { 0x9e, 0x03 }, { 0x9f, 0x14 }, { 0xa4, 0x03 }, { 0xa5, 0x52 },
+ { 0xa6, 0x03 }, { 0xa7, 0x93 }, { 0xac, 0x03 }, { 0xad, 0xcf },
+ { 0xae, 0x04 }, { 0xaf, 0x08 }, { 0xb0, 0x04 }, { 0xb1, 0x42 },
+ { 0xb2, 0x04 }, { 0xb3, 0x7f }, { 0xb4, 0x04 }, { 0xb5, 0xb4 },
+ { 0xb6, 0x04 }, { 0xb7, 0xcc }, { 0xb8, 0x04 }, { 0xb9, 0xf2 },
+ { 0xba, 0x05 }, { 0xbb, 0x0c }, { 0xbc, 0x05 }, { 0xbd, 0x26 },
+ { 0xbe, 0x05 }, { 0xbf, 0x4b }, { 0xc0, 0x05 }, { 0xc1, 0x64 },
+ { 0xc2, 0x05 }, { 0xc3, 0x83 }, { 0xc4, 0x05 }, { 0xc5, 0xa1 },
+ { 0xc6, 0x05 }, { 0xc7, 0xba }, { 0xc8, 0x05 }, { 0xc9, 0xc4 },
+ { 0xca, 0x05 }, { 0xcb, 0xd5 }, { 0xcc, 0x05 }, { 0xcd, 0xd5 },
+ { 0xce, 0x00 }, { 0xcf, 0xce }, { 0xd0, 0x00 }, { 0xd1, 0xdb },
+ { 0xd2, 0x01 }, { 0xd3, 0x32 }, { 0xd4, 0x01 }, { 0xd5, 0x3b },
+ { 0xd6, 0x01 }, { 0xd7, 0x74 }, { 0xd8, 0x01 }, { 0xd9, 0x7d },
+ { 0xfe, 0x60 }, { 0x00, 0xcc }, { 0x01, 0x0f }, { 0x02, 0xff },
+ { 0x03, 0x01 }, { 0x04, 0x00 }, { 0x05, 0x02 }, { 0x06, 0x00 },
+ { 0x07, 0x00 }, { 0x09, 0xc4 }, { 0x0a, 0x00 }, { 0x0b, 0x04 },
+ { 0x0c, 0x01 }, { 0x0d, 0x00 }, { 0x0e, 0x04 }, { 0x0f, 0x00 },
+ { 0x10, 0x71 }, { 0x12, 0xc4 }, { 0x13, 0x00 }, { 0x14, 0x04 },
+ { 0x15, 0x01 }, { 0x16, 0x00 }, { 0x17, 0x06 }, { 0x18, 0x00 },
+ { 0x19, 0x71 }, { 0x1b, 0xc4 }, { 0x1c, 0x00 }, { 0x1d, 0x02 },
+ { 0x1e, 0x00 }, { 0x1f, 0x00 }, { 0x20, 0x08 }, { 0x21, 0x66 },
+ { 0x22, 0xb4 }, { 0x24, 0xc4 }, { 0x25, 0x00 }, { 0x26, 0x02 },
+ { 0x27, 0x00 }, { 0x28, 0x00 }, { 0x29, 0x07 }, { 0x2a, 0x66 },
+ { 0x2b, 0xb4 }, { 0x2f, 0xc4 }, { 0x30, 0x00 }, { 0x31, 0x04 },
+ { 0x32, 0x01 }, { 0x33, 0x00 }, { 0x34, 0x03 }, { 0x35, 0x00 },
+ { 0x36, 0x71 }, { 0x38, 0xc4 }, { 0x39, 0x00 }, { 0x3a, 0x04 },
+ { 0x3b, 0x01 }, { 0x3d, 0x00 }, { 0x3f, 0x05 }, { 0x40, 0x00 },
+ { 0x41, 0x71 }, { 0x83, 0xce }, { 0x84, 0x02 }, { 0x85, 0x20 },
+ { 0x86, 0xdc }, { 0x87, 0x00 }, { 0x88, 0x04 }, { 0x89, 0x00 },
+ { 0x8a, 0xbb }, { 0x8b, 0x80 }, { 0xc7, 0x0e }, { 0xc8, 0x05 },
+ { 0xc9, 0x1f }, { 0xca, 0x06 }, { 0xcb, 0x00 }, { 0xcc, 0x03 },
+ { 0xcd, 0x04 }, { 0xce, 0x1f }, { 0xcf, 0x1f }, { 0xd0, 0x1f },
+ { 0xd1, 0x1f }, { 0xd2, 0x1f }, { 0xd3, 0x1f }, { 0xd4, 0x1f },
+ { 0xd5, 0x1f }, { 0xd6, 0x1f }, { 0xd7, 0x17 }, { 0xd8, 0x1f },
+ { 0xd9, 0x16 }, { 0xda, 0x1f }, { 0xdb, 0x0e }, { 0xdc, 0x01 },
+ { 0xdd, 0x1f }, { 0xde, 0x02 }, { 0xdf, 0x00 }, { 0xe0, 0x03 },
+ { 0xe1, 0x04 }, { 0xe2, 0x1f }, { 0xe3, 0x1f }, { 0xe4, 0x1f },
+ { 0xe5, 0x1f }, { 0xe6, 0x1f }, { 0xe7, 0x1f }, { 0xe8, 0x1f },
+ { 0xe9, 0x1f }, { 0xea, 0x1f }, { 0xeb, 0x17 }, { 0xec, 0x1f },
+ { 0xed, 0x16 }, { 0xee, 0x1f }, { 0xef, 0x03 }, { 0xfe, 0x70 },
+ { 0x5a, 0x0b }, { 0x5b, 0x0b }, { 0x5c, 0x55 }, { 0x5d, 0x24 },
+ { 0xfe, 0x90 }, { 0x12, 0x24 }, { 0x13, 0x49 }, { 0x14, 0x92 },
+ { 0x15, 0x86 }, { 0x16, 0x61 }, { 0x17, 0x18 }, { 0x18, 0x24 },
+ { 0x19, 0x49 }, { 0x1a, 0x92 }, { 0x1b, 0x86 }, { 0x1c, 0x61 },
+ { 0x1d, 0x18 }, { 0x1e, 0x24 }, { 0x1f, 0x49 }, { 0x20, 0x92 },
+ { 0x21, 0x86 }, { 0x22, 0x61 }, { 0x23, 0x18 }, { 0xfe, 0x40 },
+ { 0x0e, 0x10 }, { 0xfe, 0xa0 }, { 0x04, 0x80 }, { 0x16, 0x00 },
+ { 0x26, 0x10 }, { 0x2f, 0x37 }, { 0xfe, 0xd0 }, { 0x06, 0x0f },
+ { 0x4b, 0x00 }, { 0x56, 0x4a }, { 0xfe, 0x00 }, { 0xc2, 0x09 },
+ { 0x35, 0x00 }, { 0xfe, 0x70 }, { 0x7d, 0x61 }, { 0x7f, 0x00 },
+ { 0x7e, 0x4e }, { 0x52, 0x2c }, { 0x49, 0x00 }, { 0x4a, 0x00 },
+ { 0x4b, 0x00 }, { 0x4c, 0x00 }, { 0x4d, 0xe8 }, { 0x4e, 0x25 },
+ { 0x4f, 0x6e }, { 0x50, 0xae }, { 0x51, 0x2f }, { 0xad, 0xf4 },
+ { 0xae, 0x8f }, { 0xaf, 0x00 }, { 0xb0, 0x54 }, { 0xb1, 0x3a },
+ { 0xb2, 0x00 }, { 0xb3, 0x00 }, { 0xb4, 0x00 }, { 0xb5, 0x00 },
+ { 0xb6, 0x18 }, { 0xb7, 0x30 }, { 0xb8, 0x4a }, { 0xb9, 0x98 },
+ { 0xba, 0x30 }, { 0xbb, 0x60 }, { 0xbc, 0x50 }, { 0xbd, 0x00 },
+ { 0xbe, 0x00 }, { 0xbf, 0x39 }, { 0xfe, 0x00 }, { 0x51, 0x66 },
};
static inline struct visionox_rm69299 *panel_to_ctx(struct drm_panel *panel)
@@ -31,7 +159,8 @@ static int visionox_rm69299_power_on(struct visionox_rm69299 *ctx)
{
int ret;
- ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ ret = regulator_bulk_enable(ARRAY_SIZE(visionox_rm69299_supplies),
+ ctx->supplies);
if (ret < 0)
return ret;
@@ -54,37 +183,32 @@ static int visionox_rm69299_power_off(struct visionox_rm69299 *ctx)
{
gpiod_set_value(ctx->reset_gpio, 0);
- return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ return regulator_bulk_disable(ARRAY_SIZE(visionox_rm69299_supplies),
+ ctx->supplies);
}
static int visionox_rm69299_unprepare(struct drm_panel *panel)
{
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
ctx->dsi->mode_flags = 0;
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
- if (ret < 0)
- dev_err(ctx->panel.dev, "set_display_off cmd failed ret = %d\n", ret);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
/* 120ms delay required here as per DCS spec */
- msleep(120);
-
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "enter_sleep cmd failed ret = %d\n", ret);
- }
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = visionox_rm69299_power_off(ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- return ret;
+ return visionox_rm69299_power_off(ctx);
}
static int visionox_rm69299_prepare(struct drm_panel *panel)
{
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+ int ret, i;
ret = visionox_rm69299_power_on(ctx);
if (ret < 0)
@@ -92,52 +216,20 @@ static int visionox_rm69299_prepare(struct drm_panel *panel)
ctx->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xfe, 0x00 }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 0 failed, ret = %d\n", ret);
- goto power_off;
- }
-
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xc2, 0x08 }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 1 failed, ret = %d\n", ret);
- goto power_off;
- }
+ for (i = 0; i < ctx->desc->init_seq_len; i++)
+ mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, &ctx->desc->init_seq[i * 2], 2);
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x35, 0x00 }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 2 failed, ret = %d\n", ret);
- goto power_off;
- }
-
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x51, 0xff }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 3 failed, ret = %d\n", ret);
- goto power_off;
- }
-
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "exit_sleep_mode cmd failed ret = %d\n", ret);
- goto power_off;
- }
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
/* Per DSI spec wait 120ms after sending exit sleep DCS command */
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "set_display_on cmd failed ret = %d\n", ret);
- goto power_off;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
/* Per DSI spec wait 120ms after sending set_display_on DCS command */
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- return 0;
-
-power_off:
- return ret;
+ return dsi_ctx.accum_err;
}
static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
@@ -154,14 +246,26 @@ static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
.flags = 0,
};
+static const struct drm_display_mode visionox_rm69299_1080x2160_60hz = {
+ .clock = 158695,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 26,
+ .hsync_end = 1080 + 26 + 2,
+ .htotal = 1080 + 26 + 2 + 36,
+ .vdisplay = 2160,
+ .vsync_start = 2160 + 8,
+ .vsync_end = 2160 + 8 + 4,
+ .vtotal = 2160 + 8 + 4 + 4,
+ .flags = 0,
+};
+
static int visionox_rm69299_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev,
- &visionox_rm69299_1080x2248_60hz);
+ mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
if (!mode) {
dev_err(ctx->panel.dev, "failed to create a new display mode\n");
return 0;
@@ -187,20 +291,22 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
struct visionox_rm69299 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_rm69299, panel,
+ &visionox_rm69299_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ctx->desc = device_get_match_data(dev);
+ if (!ctx->desc)
+ return -EINVAL;
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- ctx->supplies[0].supply = "vdda";
- ctx->supplies[0].init_load_uA = 32000;
- ctx->supplies[1].supply = "vdd3p3";
- ctx->supplies[1].init_load_uA = 13200;
-
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(visionox_rm69299_supplies),
+ visionox_rm69299_supplies, &ctx->supplies);
if (ret < 0)
return ret;
@@ -210,8 +316,6 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset_gpio);
}
- drm_panel_init(&ctx->panel, dev, &visionox_rm69299_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
dsi->lanes = 4;
@@ -239,8 +343,23 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
drm_panel_remove(&ctx->panel);
}
+const struct visionox_rm69299_panel_desc visionox_rm69299_1080p_display_desc = {
+ .mode = &visionox_rm69299_1080x2248_60hz,
+ .init_seq = (const u8 *)visionox_rm69299_1080x2248_60hz_init_seq,
+ .init_seq_len = ARRAY_SIZE(visionox_rm69299_1080x2248_60hz_init_seq),
+};
+
+const struct visionox_rm69299_panel_desc visionox_rm69299_shift_desc = {
+ .mode = &visionox_rm69299_1080x2160_60hz,
+ .init_seq = (const u8 *)visionox_rm69299_1080x2160_60hz_init_seq,
+ .init_seq_len = ARRAY_SIZE(visionox_rm69299_1080x2160_60hz_init_seq),
+};
+
static const struct of_device_id visionox_rm69299_of_match[] = {
- { .compatible = "visionox,rm69299-1080p-display", },
+ { .compatible = "visionox,rm69299-1080p-display",
+ .data = &visionox_rm69299_1080p_display_desc },
+ { .compatible = "visionox,rm69299-shift",
+ .data = &visionox_rm69299_shift_desc },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, visionox_rm69299_of_match);
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm692e5.c b/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
index 4db7fa8d74c4..e53645d59413 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
@@ -360,9 +360,11 @@ static int visionox_rm692e5_probe(struct mipi_dsi_device *dsi)
struct visionox_rm692e5 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_rm692e5, panel,
+ &visionox_rm692e5_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(visionox_rm692e5_supplies),
@@ -383,8 +385,6 @@ static int visionox_rm692e5_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &visionox_rm692e5_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = visionox_rm692e5_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
index 17b8defe79c1..97a79411e1ec 100644
--- a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
+++ b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
@@ -248,9 +248,11 @@ static int visionox_vtdr6130_probe(struct mipi_dsi_device *dsi)
struct visionox_vtdr6130 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_vtdr6130, panel,
+ &visionox_vtdr6130_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(visionox_vtdr6130_supplies),
@@ -273,9 +275,6 @@ static int visionox_vtdr6130_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_CLOCK_NON_CONTINUOUS;
ctx->panel.prepare_prev_first = true;
- drm_panel_init(&ctx->panel, dev, &visionox_vtdr6130_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = visionox_vtdr6130_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-widechips-ws2401.c b/drivers/gpu/drm/panel/panel-widechips-ws2401.c
index 2591ff8f0d4e..dd74610bd2eb 100644
--- a/drivers/gpu/drm/panel/panel-widechips-ws2401.c
+++ b/drivers/gpu/drm/panel/panel-widechips-ws2401.c
@@ -347,9 +347,11 @@ static int ws2401_probe(struct spi_device *spi)
struct ws2401 *ws;
int ret;
- ws = devm_kzalloc(dev, sizeof(*ws), GFP_KERNEL);
- if (!ws)
- return -ENOMEM;
+ ws = devm_drm_panel_alloc(dev, struct ws2401, panel, &ws2401_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ws))
+ return PTR_ERR(ws);
+
ws->dev = dev;
/*
@@ -379,9 +381,6 @@ static int ws2401_probe(struct spi_device *spi)
ws2401_read_mtp_id(ws);
ws2401_power_off(ws);
- drm_panel_init(&ws->panel, dev, &ws2401_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&ws->panel);
if (ret)
return dev_err_probe(dev, ret,
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
index 2b91414c2829..fc6516373b5d 100644
--- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -241,9 +241,10 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi)
struct xpp055c272 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct xpp055c272, panel,
+ &xpp055c272_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio))
@@ -269,9 +270,6 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&ctx->panel, &dsi->dev, &xpp055c272_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 3385fd3ef41a..5d0dce10336b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -29,7 +29,7 @@ static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfr
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
- struct panfrost_device *ptdev = dev_get_drvdata(dev);
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
struct dev_pm_opp *opp;
int err;
@@ -40,7 +40,7 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
err = dev_pm_opp_set_rate(dev, *freq);
if (!err)
- ptdev->pfdevfreq.current_frequency = *freq;
+ pfdev->pfdevfreq.current_frequency = *freq;
return err;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 5d35076b2e6d..04bec27449cb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -209,6 +209,11 @@ int panfrost_device_init(struct panfrost_device *pfdev)
spin_lock_init(&pfdev->cycle_counter.lock);
+#ifdef CONFIG_DEBUG_FS
+ mutex_init(&pfdev->debugfs.gems_lock);
+ INIT_LIST_HEAD(&pfdev->debugfs.gems_list);
+#endif
+
err = panfrost_pm_domain_init(pfdev);
if (err)
return err;
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index dcff70f905cd..077525a3ad68 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -111,6 +111,17 @@ struct panfrost_compatible {
u8 gpu_quirks;
};
+/**
+ * struct panfrost_device_debugfs - Device-wide DebugFS tracking structures
+ */
+struct panfrost_device_debugfs {
+ /** @gems_list: Device-wide list of GEM objects owned by at least one file. */
+ struct list_head gems_list;
+
+ /** @gems_lock: Serializes access to the device-wide list of GEM objects. */
+ struct mutex gems_lock;
+};
+
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
@@ -164,6 +175,10 @@ struct panfrost_device {
atomic_t use_count;
spinlock_t lock;
} cycle_counter;
+
+#ifdef CONFIG_DEBUG_FS
+ struct panfrost_device_debugfs debugfs;
+#endif
};
struct panfrost_mmu {
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index f1ec3b02f15a..1ea6c509a5d5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/panfrost_drm.h>
+#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_syncobj.h>
@@ -312,7 +313,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
ret = drm_sched_job_init(&job->base,
&file_priv->sched_entity[slot],
- 1, NULL);
+ 1, NULL, file->client_id);
if (ret)
goto out_put_job;
@@ -495,6 +496,46 @@ out_put_object:
return ret;
}
+static int panfrost_ioctl_set_label_bo(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_panfrost_set_label_bo *args = data;
+ struct drm_gem_object *obj;
+ const char *label = NULL;
+ int ret = 0;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (args->label) {
+ label = strndup_user(u64_to_user_ptr(args->label),
+ PANFROST_BO_LABEL_MAXLEN);
+ if (IS_ERR(label)) {
+ ret = PTR_ERR(label);
+ if (ret == -EINVAL)
+ ret = -E2BIG;
+ goto err_put_obj;
+ }
+ }
+
+ /*
+ * We treat passing a label of length 0 and passing a NULL label
+ * differently, because even though they might seem conceptually
+ * similar, future uses of the BO label might expect a different
+ * behaviour in each case.
+ */
+ panfrost_gem_set_label(obj, label);
+
+err_put_obj:
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
int panfrost_unstable_ioctl_check(void)
{
if (!unstable_ioctls)
@@ -561,6 +602,7 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW),
PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(SET_LABEL_BO, set_label_bo, DRM_RENDER_ALLOW),
};
static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev,
@@ -618,6 +660,37 @@ static const struct file_operations panfrost_drm_driver_fops = {
.show_fdinfo = drm_show_fdinfo,
};
+#ifdef CONFIG_DEBUG_FS
+static int panthor_gems_show(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct panfrost_device *pfdev = dev->dev_private;
+
+ panfrost_gem_debugfs_print_bos(pfdev, m);
+
+ return 0;
+}
+
+static struct drm_info_list panthor_debugfs_list[] = {
+ {"gems", panthor_gems_show, 0, NULL},
+};
+
+static int panthor_gems_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(panthor_debugfs_list,
+ ARRAY_SIZE(panthor_debugfs_list),
+ minor->debugfs_root, minor);
+
+ return 0;
+}
+
+static void panfrost_debugfs_init(struct drm_minor *minor)
+{
+ panthor_gems_debugfs_init(minor);
+}
+#endif
+
/*
* Panfrost driver version:
* - 1.0 - initial interface
@@ -625,6 +698,7 @@ static const struct file_operations panfrost_drm_driver_fops = {
* - 1.2 - adds AFBC_FEATURES query
* - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT
* - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries
+ * - 1.4 - adds SET_LABEL_BO
*/
static const struct drm_driver panfrost_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
@@ -637,10 +711,13 @@ static const struct drm_driver panfrost_drm_driver = {
.name = "panfrost",
.desc = "panfrost DRM",
.major = 1,
- .minor = 3,
+ .minor = 4,
.gem_create_object = panfrost_gem_create_object,
.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = panfrost_debugfs_init,
+#endif
};
static int panfrost_probe(struct platform_device *pdev)
@@ -789,6 +866,8 @@ static const struct panfrost_compatible amlogic_data = {
.vendor_quirk = panfrost_gpu_amlogic_quirk,
};
+static const char * const mediatek_pm_domains[] = { "core0", "core1", "core2",
+ "core3", "core4" };
/*
* The old data with two power supplies for MT8183 is here only to
* keep retro-compatibility with older devicetrees, as DVFS will
@@ -797,51 +876,53 @@ static const struct panfrost_compatible amlogic_data = {
* On new devicetrees please use the _b variant with a single and
* coupled regulators instead.
*/
-static const char * const mediatek_mt8183_supplies[] = { "mali", "sram", NULL };
-static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
+static const char * const legacy_supplies[] = { "mali", "sram", NULL };
static const struct panfrost_compatible mediatek_mt8183_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies) - 1,
- .supply_names = mediatek_mt8183_supplies,
- .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
- .pm_domain_names = mediatek_mt8183_pm_domains,
+ .num_supplies = ARRAY_SIZE(legacy_supplies) - 1,
+ .supply_names = legacy_supplies,
+ .num_pm_domains = 3,
+ .pm_domain_names = mediatek_pm_domains,
};
-static const char * const mediatek_mt8183_b_supplies[] = { "mali", NULL };
static const struct panfrost_compatible mediatek_mt8183_b_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1,
- .supply_names = mediatek_mt8183_b_supplies,
- .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
- .pm_domain_names = mediatek_mt8183_pm_domains,
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 3,
+ .pm_domain_names = mediatek_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
};
-static const char * const mediatek_mt8186_pm_domains[] = { "core0", "core1" };
static const struct panfrost_compatible mediatek_mt8186_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1,
- .supply_names = mediatek_mt8183_b_supplies,
- .num_pm_domains = ARRAY_SIZE(mediatek_mt8186_pm_domains),
- .pm_domain_names = mediatek_mt8186_pm_domains,
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 2,
+ .pm_domain_names = mediatek_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
};
-/* MT8188 uses the same power domains and power supplies as MT8183 */
static const struct panfrost_compatible mediatek_mt8188_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1,
- .supply_names = mediatek_mt8183_b_supplies,
- .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
- .pm_domain_names = mediatek_mt8183_pm_domains,
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 3,
+ .pm_domain_names = mediatek_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
.gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
};
-static const char * const mediatek_mt8192_supplies[] = { "mali", NULL };
-static const char * const mediatek_mt8192_pm_domains[] = { "core0", "core1", "core2",
- "core3", "core4" };
static const struct panfrost_compatible mediatek_mt8192_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8192_supplies) - 1,
- .supply_names = mediatek_mt8192_supplies,
- .num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains),
- .pm_domain_names = mediatek_mt8192_pm_domains,
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 5,
+ .pm_domain_names = mediatek_pm_domains,
+ .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
+ .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
+};
+
+static const struct panfrost_compatible mediatek_mt8370_data = {
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 2,
+ .pm_domain_names = mediatek_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
.gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
};
@@ -868,6 +949,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "mediatek,mt8186-mali", .data = &mediatek_mt8186_data },
{ .compatible = "mediatek,mt8188-mali", .data = &mediatek_mt8188_data },
{ .compatible = "mediatek,mt8192-mali", .data = &mediatek_mt8192_data },
+ { .compatible = "mediatek,mt8370-mali", .data = &mediatek_mt8370_data },
{ .compatible = "allwinner,sun50i-h616-mali", .data = &allwinner_h616_data },
{}
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 963f04ba2de6..bb73f2a68a12 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
@@ -11,6 +12,36 @@
#include "panfrost_gem.h"
#include "panfrost_mmu.h"
+#ifdef CONFIG_DEBUG_FS
+static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
+ struct panfrost_gem_object *bo)
+{
+ bo->debugfs.creator.tgid = current->group_leader->pid;
+ get_task_comm(bo->debugfs.creator.process_name, current->group_leader);
+
+ mutex_lock(&pfdev->debugfs.gems_lock);
+ list_add_tail(&bo->debugfs.node, &pfdev->debugfs.gems_list);
+ mutex_unlock(&pfdev->debugfs.gems_lock);
+}
+
+static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo)
+{
+ struct panfrost_device *pfdev = bo->base.base.dev->dev_private;
+
+ if (list_empty(&bo->debugfs.node))
+ return;
+
+ mutex_lock(&pfdev->debugfs.gems_lock);
+ list_del_init(&bo->debugfs.node);
+ mutex_unlock(&pfdev->debugfs.gems_lock);
+}
+#else
+static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
+ struct panfrost_gem_object *bo)
+{}
+static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo) {}
+#endif
+
/* Called DRM core on the last userspace/kernel unreference of the
* BO.
*/
@@ -35,6 +66,10 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
*/
WARN_ON_ONCE(!list_empty(&bo->mappings.list));
+ kfree_const(bo->label.str);
+ panfrost_gem_debugfs_bo_rm(bo);
+ mutex_destroy(&bo->label.lock);
+
if (bo->sgts) {
int i;
int n_sgt = bo->base.base.size / SZ_2M;
@@ -260,6 +295,9 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
obj->base.map_wc = !pfdev->coherent;
+ mutex_init(&obj->label.lock);
+
+ panfrost_gem_debugfs_bo_add(pfdev, obj);
return &obj->base.base;
}
@@ -300,5 +338,153 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
bo = to_panfrost_bo(obj);
bo->noexec = true;
+ /*
+ * We assign this generic label because this function cannot
+ * be reached through any of the Panfrost UM driver-specific
+ * code paths, unless one is given by explicitly calling the
+ * SET_LABEL_BO ioctl. It is therefore preferable to have a
+ * blanket BO tag that tells us the object was imported from
+ * another driver than nothing at all.
+ */
+ panfrost_gem_internal_set_label(obj, "GEM PRIME buffer");
+
return obj;
}
+
+void
+panfrost_gem_set_label(struct drm_gem_object *obj, const char *label)
+{
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ const char *old_label;
+
+ scoped_guard(mutex, &bo->label.lock) {
+ old_label = bo->label.str;
+ bo->label.str = label;
+ }
+
+ kfree_const(old_label);
+}
+
+void
+panfrost_gem_internal_set_label(struct drm_gem_object *obj, const char *label)
+{
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ const char *str;
+
+ /* We should never attempt labelling a UM-exposed GEM object */
+ if (drm_WARN_ON(bo->base.base.dev, bo->base.base.handle_count > 0))
+ return;
+
+ if (!label)
+ return;
+
+ str = kstrdup_const(label, GFP_KERNEL);
+ if (!str) {
+ /* Failing to allocate memory for a label isn't a fatal condition */
+ drm_warn(bo->base.base.dev, "Not enough memory to allocate BO label");
+ return;
+ }
+
+ panfrost_gem_set_label(obj, str);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct gem_size_totals {
+ size_t size;
+ size_t resident;
+ size_t reclaimable;
+};
+
+struct flag_def {
+ u32 flag;
+ const char *name;
+};
+
+static void panfrost_gem_debugfs_print_flag_names(struct seq_file *m)
+{
+ int len;
+ int i;
+
+ static const struct flag_def gem_state_flags_names[] = {
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED, "imported"},
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED, "exported"},
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED, "purged"},
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE, "purgeable"},
+ };
+
+ seq_puts(m, "GEM state flags: ");
+ for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) {
+ seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i].name,
+ gem_state_flags_names[i].flag, (i < len - 1) ? ", " : "\n\n");
+ }
+}
+
+static void panfrost_gem_debugfs_bo_print(struct panfrost_gem_object *bo,
+ struct seq_file *m,
+ struct gem_size_totals *totals)
+{
+ unsigned int refcount = kref_read(&bo->base.base.refcount);
+ char creator_info[32] = {};
+ size_t resident_size;
+ u32 gem_state_flags = 0;
+
+ /* Skip BOs being destroyed. */
+ if (!refcount)
+ return;
+
+ resident_size = bo->base.pages ? bo->base.base.size : 0;
+
+ snprintf(creator_info, sizeof(creator_info),
+ "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
+ seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx",
+ creator_info,
+ bo->base.base.name,
+ refcount,
+ bo->base.base.size,
+ resident_size,
+ drm_vma_node_start(&bo->base.base.vma_node));
+
+ if (bo->base.base.import_attach)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED;
+ if (bo->base.base.dma_buf)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED;
+
+ if (bo->base.madv < 0)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED;
+ else if (bo->base.madv > 0)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE;
+
+ seq_printf(m, "0x%-10x", gem_state_flags);
+
+ scoped_guard(mutex, &bo->label.lock) {
+ seq_printf(m, "%s\n", bo->label.str ? : "");
+ }
+
+ totals->size += bo->base.base.size;
+ totals->resident += resident_size;
+ if (bo->base.madv > 0)
+ totals->reclaimable += resident_size;
+}
+
+void panfrost_gem_debugfs_print_bos(struct panfrost_device *pfdev,
+ struct seq_file *m)
+{
+ struct gem_size_totals totals = {0};
+ struct panfrost_gem_object *bo;
+
+ panfrost_gem_debugfs_print_flag_names(m);
+
+ seq_puts(m, "created-by global-name refcount size resident-size file-offset state label\n");
+ seq_puts(m, "-----------------------------------------------------------------------------------------------------------------------------------\n");
+
+ scoped_guard(mutex, &pfdev->debugfs.gems_lock) {
+ list_for_each_entry(bo, &pfdev->debugfs.gems_list, debugfs.node) {
+ panfrost_gem_debugfs_bo_print(bo, m, &totals);
+ }
+ }
+
+ seq_puts(m, "===================================================================================================================================\n");
+ seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n",
+ totals.size, totals.resident, totals.reclaimable);
+}
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 7516b7ecf7fe..8de3e76f2717 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -8,6 +8,46 @@
#include <drm/drm_mm.h>
struct panfrost_mmu;
+struct panfrost_device;
+
+#define PANFROST_BO_LABEL_MAXLEN 4096
+
+enum panfrost_debugfs_gem_state_flags {
+ /** @PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED: GEM BO is PRIME imported. */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED = BIT(0),
+
+ /** @PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED: GEM BO is PRIME exported. */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED = BIT(1),
+
+ /** @PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED: GEM BO was reclaimed by the shrinker. */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED = BIT(2),
+
+ /**
+ * @PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE: GEM BO pages were marked as no longer
+ * needed by UM and can be reclaimed by the shrinker.
+ */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE = BIT(3),
+};
+
+/**
+ * struct panfrost_gem_debugfs - GEM object's DebugFS list information
+ */
+struct panfrost_gem_debugfs {
+ /**
+ * @node: Node used to insert the object in the device-wide list of
+ * GEM objects, to display information about it through a DebugFS file.
+ */
+ struct list_head node;
+
+ /** @creator: Information about the UM process which created the GEM. */
+ struct {
+ /** @creator.process_name: Group leader name in owning thread's process */
+ char process_name[TASK_COMM_LEN];
+
+ /** @creator.tgid: PID of the thread's group leader within its process */
+ pid_t tgid;
+ } creator;
+};
struct panfrost_gem_object {
struct drm_gem_shmem_object base;
@@ -41,8 +81,26 @@ struct panfrost_gem_object {
*/
size_t heap_rss_size;
+ /**
+ * @label: BO tagging fields. The label can be assigned within the
+ * driver itself or through a specific IOCTL.
+ */
+ struct {
+ /**
+ * @label.str: Pointer to NULL-terminated string,
+ */
+ const char *str;
+
+ /** @lock.str: Protects access to the @label.str field. */
+ struct mutex lock;
+ } label;
+
bool noexec :1;
bool is_heap :1;
+
+#ifdef CONFIG_DEBUG_FS
+ struct panfrost_gem_debugfs debugfs;
+#endif
};
struct panfrost_gem_mapping {
@@ -89,4 +147,12 @@ void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo);
int panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
+void panfrost_gem_set_label(struct drm_gem_object *obj, const char *label);
+void panfrost_gem_internal_set_label(struct drm_gem_object *obj, const char *label);
+
+#ifdef CONFIG_DEBUG_FS
+void panfrost_gem_debugfs_print_bos(struct panfrost_device *pfdev,
+ struct seq_file *m);
+#endif
+
#endif /* __PANFROST_GEM_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 15e2d505550f..82acabb21b27 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -751,11 +751,11 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
int js = panfrost_job_get_slot(job);
/*
- * If the GPU managed to complete this jobs fence, the timeout is
- * spurious. Bail out.
+ * If the GPU managed to complete this jobs fence, the timeout has
+ * fired before free-job worker. The timeout is spurious, so bail out.
*/
if (dma_fence_is_signaled(job->done_fence))
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
/*
* Panfrost IRQ handler may take a long time to process an interrupt
@@ -770,7 +770,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
if (dma_fence_is_signaled(job->done_fence)) {
dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n");
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
@@ -786,7 +786,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
atomic_set(&pfdev->reset.pending, 1);
panfrost_reset(pfdev, sched_job);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static void panfrost_reset_work(struct work_struct *work)
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 52befead08c6..563f16bae543 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -111,6 +111,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
goto err_put_mapping;
perfcnt->buf = map.vaddr;
+ panfrost_gem_internal_set_label(&bo->base, "Perfcnt sample buffer");
+
/*
* Invalidate the cache and clear the counters to start from a fresh
* state.
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
index 465d3ab1b79e..4fc7cf2aeed5 100644
--- a/drivers/gpu/drm/panthor/panthor_device.h
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -230,6 +230,24 @@ struct panthor_file {
/** @ptdev: Device attached to this file. */
struct panthor_device *ptdev;
+ /** @user_mmio: User MMIO related fields. */
+ struct {
+ /**
+ * @offset: Offset used for user MMIO mappings.
+ *
+ * This offset should not be used to check the type of mapping
+ * except in panthor_mmap(). After that point, MMIO mapping
+ * offsets have been adjusted to match
+ * DRM_PANTHOR_USER_MMIO_OFFSET and that macro should be used
+ * instead.
+ * Make sure this rule is followed at all times, because
+ * userspace is in control of the offset, and can change the
+ * value behind our back. Otherwise it can lead to erroneous
+ * branching happening in kernel space.
+ */
+ u64 offset;
+ } user_mmio;
+
/** @vms: VM pool attached to this file. */
struct panthor_vm_pool *vms;
@@ -437,4 +455,75 @@ static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev, \
extern struct workqueue_struct *panthor_cleanup_wq;
+static inline void gpu_write(struct panthor_device *ptdev, u32 reg, u32 data)
+{
+ writel(data, ptdev->iomem + reg);
+}
+
+static inline u32 gpu_read(struct panthor_device *ptdev, u32 reg)
+{
+ return readl(ptdev->iomem + reg);
+}
+
+static inline u32 gpu_read_relaxed(struct panthor_device *ptdev, u32 reg)
+{
+ return readl_relaxed(ptdev->iomem + reg);
+}
+
+static inline void gpu_write64(struct panthor_device *ptdev, u32 reg, u64 data)
+{
+ gpu_write(ptdev, reg, lower_32_bits(data));
+ gpu_write(ptdev, reg + 4, upper_32_bits(data));
+}
+
+static inline u64 gpu_read64(struct panthor_device *ptdev, u32 reg)
+{
+ return (gpu_read(ptdev, reg) | ((u64)gpu_read(ptdev, reg + 4) << 32));
+}
+
+static inline u64 gpu_read64_relaxed(struct panthor_device *ptdev, u32 reg)
+{
+ return (gpu_read_relaxed(ptdev, reg) |
+ ((u64)gpu_read_relaxed(ptdev, reg + 4) << 32));
+}
+
+static inline u64 gpu_read64_counter(struct panthor_device *ptdev, u32 reg)
+{
+ u32 lo, hi1, hi2;
+ do {
+ hi1 = gpu_read(ptdev, reg + 4);
+ lo = gpu_read(ptdev, reg);
+ hi2 = gpu_read(ptdev, reg + 4);
+ } while (hi1 != hi2);
+ return lo | ((u64)hi2 << 32);
+}
+
+#define gpu_read_poll_timeout(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout(gpu_read, val, cond, delay_us, timeout_us, false, \
+ dev, reg)
+
+#define gpu_read_poll_timeout_atomic(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout_atomic(gpu_read, val, cond, delay_us, timeout_us, \
+ false, dev, reg)
+
+#define gpu_read64_poll_timeout(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout(gpu_read64, val, cond, delay_us, timeout_us, false, \
+ dev, reg)
+
+#define gpu_read64_poll_timeout_atomic(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout_atomic(gpu_read64, val, cond, delay_us, timeout_us, \
+ false, dev, reg)
+
+#define gpu_read_relaxed_poll_timeout_atomic(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout_atomic(gpu_read_relaxed, val, cond, delay_us, \
+ timeout_us, false, dev, reg)
+
+#define gpu_read64_relaxed_poll_timeout(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout(gpu_read64_relaxed, val, cond, delay_us, timeout_us, \
+ false, dev, reg)
+
#endif
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 6200cad22563..1116f2d2826e 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -772,8 +772,8 @@ static int panthor_query_timestamp_info(struct panthor_device *ptdev,
#else
arg->timestamp_frequency = 0;
#endif
- arg->current_timestamp = panthor_gpu_read_timestamp(ptdev);
- arg->timestamp_offset = panthor_gpu_read_timestamp_offset(ptdev);
+ arg->current_timestamp = gpu_read64_counter(ptdev, GPU_TIMESTAMP);
+ arg->timestamp_offset = gpu_read64(ptdev, GPU_TIMESTAMP_OFFSET);
pm_runtime_put(ptdev->base.dev);
return 0;
@@ -996,7 +996,8 @@ static int panthor_ioctl_group_submit(struct drm_device *ddev, void *data,
const struct drm_panthor_queue_submit *qsubmit = &jobs_args[i];
struct drm_sched_job *job;
- job = panthor_job_create(pfile, args->group_handle, qsubmit);
+ job = panthor_job_create(pfile, args->group_handle, qsubmit,
+ file->client_id);
if (IS_ERR(job)) {
ret = PTR_ERR(job);
goto out_cleanup_submit_ctx;
@@ -1378,6 +1379,20 @@ err_put_obj:
return ret;
}
+static int panthor_ioctl_set_user_mmio_offset(struct drm_device *ddev,
+ void *data, struct drm_file *file)
+{
+ struct drm_panthor_set_user_mmio_offset *args = data;
+ struct panthor_file *pfile = file->driver_priv;
+
+ if (args->offset != DRM_PANTHOR_USER_MMIO_OFFSET_32BIT &&
+ args->offset != DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
+ return -EINVAL;
+
+ WRITE_ONCE(pfile->user_mmio.offset, args->offset);
+ return 0;
+}
+
static int
panthor_open(struct drm_device *ddev, struct drm_file *file)
{
@@ -1395,6 +1410,18 @@ panthor_open(struct drm_device *ddev, struct drm_file *file)
}
pfile->ptdev = ptdev;
+ pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET;
+
+#ifdef CONFIG_ARM64
+ /*
+ * With 32-bit systems being limited by the 32-bit representation of
+ * mmap2's pgoffset field, we need to make the MMIO offset arch
+ * specific.
+ */
+ if (test_tsk_thread_flag(current, TIF_32BIT))
+ pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET_32BIT;
+#endif
+
ret = panthor_vm_pool_create(pfile);
if (ret)
@@ -1448,6 +1475,7 @@ static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
PANTHOR_IOCTL(TILER_HEAP_DESTROY, tiler_heap_destroy, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(BO_SET_LABEL, bo_set_label, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(SET_USER_MMIO_OFFSET, set_user_mmio_offset, DRM_RENDER_ALLOW),
};
static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -1456,30 +1484,26 @@ static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
struct panthor_file *pfile = file->driver_priv;
struct panthor_device *ptdev = pfile->ptdev;
u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
+ u64 user_mmio_offset;
int ret, cookie;
if (!drm_dev_enter(file->minor->dev, &cookie))
return -ENODEV;
-#ifdef CONFIG_ARM64
- /*
- * With 32-bit systems being limited by the 32-bit representation of
- * mmap2's pgoffset field, we need to make the MMIO offset arch
- * specific. This converts a user MMIO offset into something the kernel
- * driver understands.
+ /* Adjust the user MMIO offset to match the offset used kernel side.
+ * We use a local variable with a READ_ONCE() here to make sure
+ * the user_mmio_offset we use for the is_user_mmio_mapping() check
+ * hasn't changed when we do the offset adjustment.
*/
- if (test_tsk_thread_flag(current, TIF_32BIT) &&
- offset >= DRM_PANTHOR_USER_MMIO_OFFSET_32BIT) {
- offset += DRM_PANTHOR_USER_MMIO_OFFSET_64BIT -
- DRM_PANTHOR_USER_MMIO_OFFSET_32BIT;
+ user_mmio_offset = READ_ONCE(pfile->user_mmio.offset);
+ if (offset >= user_mmio_offset) {
+ offset -= user_mmio_offset;
+ offset += DRM_PANTHOR_USER_MMIO_OFFSET;
vma->vm_pgoff = offset >> PAGE_SHIFT;
- }
-#endif
-
- if (offset >= DRM_PANTHOR_USER_MMIO_OFFSET)
ret = panthor_device_mmap_io(ptdev, vma);
- else
+ } else {
ret = drm_gem_mmap(filp, vma);
+ }
drm_dev_exit(cookie);
return ret;
@@ -1583,6 +1607,7 @@ static void panthor_debugfs_init(struct drm_minor *minor)
* - adds PANTHOR_GROUP_PRIORITY_REALTIME priority
* - 1.3 - adds DRM_PANTHOR_GROUP_STATE_INNOCENT flag
* - 1.4 - adds DRM_IOCTL_PANTHOR_BO_SET_LABEL ioctl
+ * - 1.5 - adds DRM_PANTHOR_SET_USER_MMIO_OFFSET ioctl
*/
static const struct drm_driver panthor_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
@@ -1596,7 +1621,7 @@ static const struct drm_driver panthor_drm_driver = {
.name = "panthor",
.desc = "Panthor DRM driver",
.major = 1,
- .minor = 4,
+ .minor = 5,
.gem_create_object = panthor_gem_create_object,
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 7bc38e635329..36f1034839c2 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -1063,8 +1063,8 @@ static void panthor_fw_stop(struct panthor_device *ptdev)
u32 status;
gpu_write(ptdev, MCU_CONTROL, MCU_CONTROL_DISABLE);
- if (readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
- status == MCU_STATUS_DISABLED, 10, 100000))
+ if (gpu_read_poll_timeout(ptdev, MCU_STATUS, status,
+ status == MCU_STATUS_DISABLED, 10, 100000))
drm_err(&ptdev->base, "Failed to stop MCU");
}
@@ -1089,8 +1089,9 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT);
gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
- if (!readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
- status == MCU_STATUS_HALT, 10, 100000)) {
+ if (!gpu_read_poll_timeout(ptdev, MCU_STATUS, status,
+ status == MCU_STATUS_HALT, 10,
+ 100000)) {
ptdev->reset.fast = true;
} else {
drm_warn(&ptdev->base, "Failed to cleanly suspend MCU");
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index 7c00fd77758b..a123bc740ba1 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -16,10 +16,15 @@
#include "panthor_mmu.h"
#ifdef CONFIG_DEBUG_FS
-static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev,
- struct panthor_gem_object *bo)
+static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo)
{
INIT_LIST_HEAD(&bo->debugfs.node);
+}
+
+static void panthor_gem_debugfs_bo_add(struct panthor_gem_object *bo)
+{
+ struct panthor_device *ptdev = container_of(bo->base.base.dev,
+ struct panthor_device, base);
bo->debugfs.creator.tgid = current->group_leader->pid;
get_task_comm(bo->debugfs.creator.process_name, current->group_leader);
@@ -44,14 +49,13 @@ static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo)
static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags)
{
- bo->debugfs.flags = usage_flags | PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED;
+ bo->debugfs.flags = usage_flags;
+ panthor_gem_debugfs_bo_add(bo);
}
#else
-static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev,
- struct panthor_gem_object *bo)
-{}
static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) {}
static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) {}
+static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo) {}
#endif
static void panthor_gem_free_object(struct drm_gem_object *obj)
@@ -246,7 +250,7 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t
drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
mutex_init(&obj->label.lock);
- panthor_gem_debugfs_bo_add(ptdev, obj);
+ panthor_gem_debugfs_bo_init(obj);
return &obj->base.base;
}
@@ -285,6 +289,8 @@ panthor_gem_create_with_handle(struct drm_file *file,
bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
}
+ panthor_gem_debugfs_set_usage_flags(bo, 0);
+
/*
* Allocate an id of idr table where the obj is registered
* and handle has the id what user can see.
@@ -296,12 +302,6 @@ panthor_gem_create_with_handle(struct drm_file *file,
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(&shmem->base);
- /*
- * No explicit flags are needed in the call below, since the
- * function internally sets the INITIALIZED bit for us.
- */
- panthor_gem_debugfs_set_usage_flags(bo, 0);
-
return ret;
}
@@ -387,7 +387,7 @@ static void panthor_gem_debugfs_bo_print(struct panthor_gem_object *bo,
unsigned int refcount = kref_read(&bo->base.base.refcount);
char creator_info[32] = {};
size_t resident_size;
- u32 gem_usage_flags = bo->debugfs.flags & (u32)~PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED;
+ u32 gem_usage_flags = bo->debugfs.flags;
u32 gem_state_flags = 0;
/* Skip BOs being destroyed. */
@@ -436,8 +436,7 @@ void panthor_gem_debugfs_print_bos(struct panthor_device *ptdev,
scoped_guard(mutex, &ptdev->gems.lock) {
list_for_each_entry(bo, &ptdev->gems.node, debugfs.node) {
- if (bo->debugfs.flags & PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED)
- panthor_gem_debugfs_bo_print(bo, m, &totals);
+ panthor_gem_debugfs_bo_print(bo, m, &totals);
}
}
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
index 4dd732dcd59f..8fc7215e9b90 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.h
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -35,9 +35,6 @@ enum panthor_debugfs_gem_usage_flags {
/** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED: BO is mapped on the FW VM. */
PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED = BIT(PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT),
-
- /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED: BO is ready for DebugFS display. */
- PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED = BIT(31),
};
/**
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
index 32d678a0114e..cb7a335e07d7 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.c
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -108,14 +108,9 @@ static void panthor_gpu_init_info(struct panthor_device *ptdev)
ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
- ptdev->gpu_info.shader_present = gpu_read(ptdev, GPU_SHADER_PRESENT_LO);
- ptdev->gpu_info.shader_present |= (u64)gpu_read(ptdev, GPU_SHADER_PRESENT_HI) << 32;
-
- ptdev->gpu_info.tiler_present = gpu_read(ptdev, GPU_TILER_PRESENT_LO);
- ptdev->gpu_info.tiler_present |= (u64)gpu_read(ptdev, GPU_TILER_PRESENT_HI) << 32;
-
- ptdev->gpu_info.l2_present = gpu_read(ptdev, GPU_L2_PRESENT_LO);
- ptdev->gpu_info.l2_present |= (u64)gpu_read(ptdev, GPU_L2_PRESENT_HI) << 32;
+ ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT);
+ ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT);
+ ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT);
arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id);
product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id);
@@ -154,8 +149,7 @@ static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
if (status & GPU_IRQ_FAULT) {
u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
- u64 address = ((u64)gpu_read(ptdev, GPU_FAULT_ADDR_HI) << 32) |
- gpu_read(ptdev, GPU_FAULT_ADDR_LO);
+ u64 address = gpu_read64(ptdev, GPU_FAULT_ADDR);
drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
fault_status, panthor_exception_name(ptdev, fault_status & 0xFF),
@@ -246,45 +240,27 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
u32 pwroff_reg, u32 pwrtrans_reg,
u64 mask, u32 timeout_us)
{
- u32 val, i;
+ u32 val;
int ret;
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
-
- ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
- val, !(mask32 & val),
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
+ !(mask & val), 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base,
+ "timeout waiting on %s:%llx power transition", blk_name,
+ mask);
+ return ret;
}
- if (mask & GENMASK(31, 0))
- gpu_write(ptdev, pwroff_reg, mask);
+ gpu_write64(ptdev, pwroff_reg, mask);
- if (mask >> 32)
- gpu_write(ptdev, pwroff_reg + 4, mask >> 32);
-
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
-
- ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
- val, !(mask32 & val),
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
+ !(mask & val), 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base,
+ "timeout waiting on %s:%llx power transition", blk_name,
+ mask);
+ return ret;
}
return 0;
@@ -307,45 +283,27 @@ int panthor_gpu_block_power_on(struct panthor_device *ptdev,
u32 pwron_reg, u32 pwrtrans_reg,
u32 rdy_reg, u64 mask, u32 timeout_us)
{
- u32 val, i;
+ u32 val;
int ret;
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
-
- ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
- val, !(mask32 & val),
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
+ !(mask & val), 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base,
+ "timeout waiting on %s:%llx power transition", blk_name,
+ mask);
+ return ret;
}
- if (mask & GENMASK(31, 0))
- gpu_write(ptdev, pwron_reg, mask);
-
- if (mask >> 32)
- gpu_write(ptdev, pwron_reg + 4, mask >> 32);
-
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
+ gpu_write64(ptdev, pwron_reg, mask);
- ret = readl_relaxed_poll_timeout(ptdev->iomem + rdy_reg + (i * 4),
- val, (mask32 & val) == mask32,
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, rdy_reg, val,
+ (mask & val) == val,
+ 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
+ blk_name, mask);
+ return ret;
}
return 0;
@@ -494,49 +452,3 @@ void panthor_gpu_resume(struct panthor_device *ptdev)
panthor_gpu_l2_power_on(ptdev);
}
-/**
- * panthor_gpu_read_64bit_counter() - Read a 64-bit counter at a given offset.
- * @ptdev: Device.
- * @reg: The offset of the register to read.
- *
- * Return: The counter value.
- */
-static u64
-panthor_gpu_read_64bit_counter(struct panthor_device *ptdev, u32 reg)
-{
- u32 hi, lo;
-
- do {
- hi = gpu_read(ptdev, reg + 0x4);
- lo = gpu_read(ptdev, reg);
- } while (hi != gpu_read(ptdev, reg + 0x4));
-
- return ((u64)hi << 32) | lo;
-}
-
-/**
- * panthor_gpu_read_timestamp() - Read the timestamp register.
- * @ptdev: Device.
- *
- * Return: The GPU timestamp value.
- */
-u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev)
-{
- return panthor_gpu_read_64bit_counter(ptdev, GPU_TIMESTAMP_LO);
-}
-
-/**
- * panthor_gpu_read_timestamp_offset() - Read the timestamp offset register.
- * @ptdev: Device.
- *
- * Return: The GPU timestamp offset value.
- */
-u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev)
-{
- u32 hi, lo;
-
- hi = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_HI);
- lo = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_LO);
-
- return ((u64)hi << 32) | lo;
-}
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.h b/drivers/gpu/drm/panthor/panthor_gpu.h
index 7f6133a66127..7c17a8c06858 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.h
+++ b/drivers/gpu/drm/panthor/panthor_gpu.h
@@ -30,9 +30,9 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
*/
#define panthor_gpu_power_on(ptdev, type, mask, timeout_us) \
panthor_gpu_block_power_on(ptdev, #type, \
- type ## _PWRON_LO, \
- type ## _PWRTRANS_LO, \
- type ## _READY_LO, \
+ type ## _PWRON, \
+ type ## _PWRTRANS, \
+ type ## _READY, \
mask, timeout_us)
/**
@@ -42,15 +42,13 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
*/
#define panthor_gpu_power_off(ptdev, type, mask, timeout_us) \
panthor_gpu_block_power_off(ptdev, #type, \
- type ## _PWROFF_LO, \
- type ## _PWRTRANS_LO, \
+ type ## _PWROFF, \
+ type ## _PWRTRANS, \
mask, timeout_us)
int panthor_gpu_l2_power_on(struct panthor_device *ptdev);
int panthor_gpu_flush_caches(struct panthor_device *ptdev,
u32 l2, u32 lsc, u32 other);
int panthor_gpu_soft_reset(struct panthor_device *ptdev);
-u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev);
-u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev);
#endif
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 6ca9a2642a4e..4140f697ba5a 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -510,9 +510,9 @@ static int wait_ready(struct panthor_device *ptdev, u32 as_nr)
/* Wait for the MMU status to indicate there is no active command, in
* case one is pending.
*/
- ret = readl_relaxed_poll_timeout_atomic(ptdev->iomem + AS_STATUS(as_nr),
- val, !(val & AS_STATUS_AS_ACTIVE),
- 10, 100000);
+ ret = gpu_read_relaxed_poll_timeout_atomic(ptdev, AS_STATUS(as_nr), val,
+ !(val & AS_STATUS_AS_ACTIVE),
+ 10, 100000);
if (ret) {
panthor_device_schedule_reset(ptdev);
@@ -564,8 +564,7 @@ static void lock_region(struct panthor_device *ptdev, u32 as_nr,
region = region_width | region_start;
/* Lock the region that needs to be updated */
- gpu_write(ptdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
- gpu_write(ptdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
+ gpu_write64(ptdev, AS_LOCKADDR(as_nr), region);
write_cmd(ptdev, as_nr, AS_COMMAND_LOCK);
}
@@ -615,14 +614,9 @@ static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr,
if (ret)
return ret;
- gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
- gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
-
- gpu_write(ptdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
- gpu_write(ptdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
-
- gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
- gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
+ gpu_write64(ptdev, AS_TRANSTAB(as_nr), transtab);
+ gpu_write64(ptdev, AS_MEMATTR(as_nr), memattr);
+ gpu_write64(ptdev, AS_TRANSCFG(as_nr), transcfg);
return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
}
@@ -635,14 +629,9 @@ static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr)
if (ret)
return ret;
- gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), 0);
- gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), 0);
-
- gpu_write(ptdev, AS_MEMATTR_LO(as_nr), 0);
- gpu_write(ptdev, AS_MEMATTR_HI(as_nr), 0);
-
- gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
- gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), 0);
+ gpu_write64(ptdev, AS_TRANSTAB(as_nr), 0);
+ gpu_write64(ptdev, AS_MEMATTR(as_nr), 0);
+ gpu_write64(ptdev, AS_TRANSCFG(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
}
@@ -896,17 +885,6 @@ static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size)
return ret;
}
-/**
- * panthor_vm_flush_all() - Flush L2 caches for the entirety of a VM's AS
- * @vm: VM whose cache to flush
- *
- * Return: 0 on success, a negative error code if flush failed.
- */
-int panthor_vm_flush_all(struct panthor_vm *vm)
-{
- return panthor_vm_flush_range(vm, vm->base.mm_start, vm->base.mm_range);
-}
-
static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
{
struct panthor_device *ptdev = vm->ptdev;
@@ -1681,8 +1659,7 @@ static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
u32 source_id;
fault_status = gpu_read(ptdev, AS_FAULTSTATUS(as));
- addr = gpu_read(ptdev, AS_FAULTADDRESS_LO(as));
- addr |= (u64)gpu_read(ptdev, AS_FAULTADDRESS_HI(as)) << 32;
+ addr = gpu_read64(ptdev, AS_FAULTADDRESS(as));
/* decode the fault status */
exception_type = fault_status & 0xFF;
@@ -2282,7 +2259,7 @@ static enum drm_gpu_sched_stat
panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job)
{
WARN(1, "VM_BIND ops are synchronous for now, there should be no timeout!");
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static const struct drm_sched_backend_ops panthor_vm_bind_ops = {
@@ -2523,7 +2500,7 @@ panthor_vm_bind_job_create(struct drm_file *file,
kref_init(&job->refcount);
job->vm = panthor_vm_get(vm);
- ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm);
+ ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm, file->client_id);
if (ret)
goto err_put_job;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h
index fc274637114e..0e268fdfdb2f 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.h
+++ b/drivers/gpu/drm/panthor/panthor_mmu.h
@@ -33,7 +33,6 @@ int panthor_vm_active(struct panthor_vm *vm);
void panthor_vm_idle(struct panthor_vm *vm);
u32 panthor_vm_page_size(struct panthor_vm *vm);
int panthor_vm_as(struct panthor_vm *vm);
-int panthor_vm_flush_all(struct panthor_vm *vm);
struct panthor_heap_pool *
panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h
index a7a323dc5cf9..48bbfd40138c 100644
--- a/drivers/gpu/drm/panthor/panthor_regs.h
+++ b/drivers/gpu/drm/panthor/panthor_regs.h
@@ -63,20 +63,16 @@
#define GPU_STATUS_DBG_ENABLED BIT(8)
#define GPU_FAULT_STATUS 0x3C
-#define GPU_FAULT_ADDR_LO 0x40
-#define GPU_FAULT_ADDR_HI 0x44
+#define GPU_FAULT_ADDR 0x40
#define GPU_PWR_KEY 0x50
#define GPU_PWR_KEY_UNLOCK 0x2968A819
#define GPU_PWR_OVERRIDE0 0x54
#define GPU_PWR_OVERRIDE1 0x58
-#define GPU_TIMESTAMP_OFFSET_LO 0x88
-#define GPU_TIMESTAMP_OFFSET_HI 0x8C
-#define GPU_CYCLE_COUNT_LO 0x90
-#define GPU_CYCLE_COUNT_HI 0x94
-#define GPU_TIMESTAMP_LO 0x98
-#define GPU_TIMESTAMP_HI 0x9C
+#define GPU_TIMESTAMP_OFFSET 0x88
+#define GPU_CYCLE_COUNT 0x90
+#define GPU_TIMESTAMP 0x98
#define GPU_THREAD_MAX_THREADS 0xA0
#define GPU_THREAD_MAX_WORKGROUP_SIZE 0xA4
@@ -85,47 +81,29 @@
#define GPU_TEXTURE_FEATURES(n) (0xB0 + ((n) * 4))
-#define GPU_SHADER_PRESENT_LO 0x100
-#define GPU_SHADER_PRESENT_HI 0x104
-#define GPU_TILER_PRESENT_LO 0x110
-#define GPU_TILER_PRESENT_HI 0x114
-#define GPU_L2_PRESENT_LO 0x120
-#define GPU_L2_PRESENT_HI 0x124
-
-#define SHADER_READY_LO 0x140
-#define SHADER_READY_HI 0x144
-#define TILER_READY_LO 0x150
-#define TILER_READY_HI 0x154
-#define L2_READY_LO 0x160
-#define L2_READY_HI 0x164
-
-#define SHADER_PWRON_LO 0x180
-#define SHADER_PWRON_HI 0x184
-#define TILER_PWRON_LO 0x190
-#define TILER_PWRON_HI 0x194
-#define L2_PWRON_LO 0x1A0
-#define L2_PWRON_HI 0x1A4
-
-#define SHADER_PWROFF_LO 0x1C0
-#define SHADER_PWROFF_HI 0x1C4
-#define TILER_PWROFF_LO 0x1D0
-#define TILER_PWROFF_HI 0x1D4
-#define L2_PWROFF_LO 0x1E0
-#define L2_PWROFF_HI 0x1E4
-
-#define SHADER_PWRTRANS_LO 0x200
-#define SHADER_PWRTRANS_HI 0x204
-#define TILER_PWRTRANS_LO 0x210
-#define TILER_PWRTRANS_HI 0x214
-#define L2_PWRTRANS_LO 0x220
-#define L2_PWRTRANS_HI 0x224
-
-#define SHADER_PWRACTIVE_LO 0x240
-#define SHADER_PWRACTIVE_HI 0x244
-#define TILER_PWRACTIVE_LO 0x250
-#define TILER_PWRACTIVE_HI 0x254
-#define L2_PWRACTIVE_LO 0x260
-#define L2_PWRACTIVE_HI 0x264
+#define GPU_SHADER_PRESENT 0x100
+#define GPU_TILER_PRESENT 0x110
+#define GPU_L2_PRESENT 0x120
+
+#define SHADER_READY 0x140
+#define TILER_READY 0x150
+#define L2_READY 0x160
+
+#define SHADER_PWRON 0x180
+#define TILER_PWRON 0x190
+#define L2_PWRON 0x1A0
+
+#define SHADER_PWROFF 0x1C0
+#define TILER_PWROFF 0x1D0
+#define L2_PWROFF 0x1E0
+
+#define SHADER_PWRTRANS 0x200
+#define TILER_PWRTRANS 0x210
+#define L2_PWRTRANS 0x220
+
+#define SHADER_PWRACTIVE 0x240
+#define TILER_PWRACTIVE 0x250
+#define L2_PWRACTIVE 0x260
#define GPU_REVID 0x280
@@ -168,10 +146,8 @@
#define MMU_AS_SHIFT 6
#define MMU_AS(as) (MMU_BASE + ((as) << MMU_AS_SHIFT))
-#define AS_TRANSTAB_LO(as) (MMU_AS(as) + 0x0)
-#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x4)
-#define AS_MEMATTR_LO(as) (MMU_AS(as) + 0x8)
-#define AS_MEMATTR_HI(as) (MMU_AS(as) + 0xC)
+#define AS_TRANSTAB(as) (MMU_AS(as) + 0x0)
+#define AS_MEMATTR(as) (MMU_AS(as) + 0x8)
#define AS_MEMATTR_AARCH64_INNER_ALLOC_IMPL (2 << 2)
#define AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(w, r) ((3 << 2) | \
((w) ? BIT(0) : 0) | \
@@ -183,8 +159,7 @@
#define AS_MEMATTR_AARCH64_INNER_OUTER_NC (1 << 6)
#define AS_MEMATTR_AARCH64_INNER_OUTER_WB (2 << 6)
#define AS_MEMATTR_AARCH64_FAULT (3 << 6)
-#define AS_LOCKADDR_LO(as) (MMU_AS(as) + 0x10)
-#define AS_LOCKADDR_HI(as) (MMU_AS(as) + 0x14)
+#define AS_LOCKADDR(as) (MMU_AS(as) + 0x10)
#define AS_COMMAND(as) (MMU_AS(as) + 0x18)
#define AS_COMMAND_NOP 0
#define AS_COMMAND_UPDATE 1
@@ -199,12 +174,10 @@
#define AS_FAULTSTATUS_ACCESS_TYPE_EX (0x1 << 8)
#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
-#define AS_FAULTADDRESS_LO(as) (MMU_AS(as) + 0x20)
-#define AS_FAULTADDRESS_HI(as) (MMU_AS(as) + 0x24)
+#define AS_FAULTADDRESS(as) (MMU_AS(as) + 0x20)
#define AS_STATUS(as) (MMU_AS(as) + 0x28)
#define AS_STATUS_AS_ACTIVE BIT(0)
-#define AS_TRANSCFG_LO(as) (MMU_AS(as) + 0x30)
-#define AS_TRANSCFG_HI(as) (MMU_AS(as) + 0x34)
+#define AS_TRANSCFG(as) (MMU_AS(as) + 0x30)
#define AS_TRANSCFG_ADRMODE_UNMAPPED (1 << 0)
#define AS_TRANSCFG_ADRMODE_IDENTITY (2 << 0)
#define AS_TRANSCFG_ADRMODE_AARCH64_4K (6 << 0)
@@ -222,18 +195,11 @@
#define AS_TRANSCFG_DISABLE_AF_FAULT BIT(34)
#define AS_TRANSCFG_WXN BIT(35)
#define AS_TRANSCFG_XREADABLE BIT(36)
-#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38)
-#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C)
+#define AS_FAULTEXTRA(as) (MMU_AS(as) + 0x38)
#define CSF_GPU_LATEST_FLUSH_ID 0x10000
#define CSF_DOORBELL(i) (0x80000 + ((i) * 0x10000))
#define CSF_GLB_DOORBELL_ID 0
-#define gpu_write(dev, reg, data) \
- writel(data, (dev)->iomem + (reg))
-
-#define gpu_read(dev, reg) \
- readl((dev)->iomem + (reg))
-
#endif
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 43ee57728de5..8f17394cc82a 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -3241,7 +3241,7 @@ queue_timedout_job(struct drm_sched_job *sched_job)
queue_start(queue);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static void queue_free_job(struct drm_sched_job *sched_job)
@@ -3732,7 +3732,8 @@ struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job)
struct drm_sched_job *
panthor_job_create(struct panthor_file *pfile,
u16 group_handle,
- const struct drm_panthor_queue_submit *qsubmit)
+ const struct drm_panthor_queue_submit *qsubmit,
+ u64 drm_client_id)
{
struct panthor_group_pool *gpool = pfile->groups;
struct panthor_job *job;
@@ -3804,7 +3805,7 @@ panthor_job_create(struct panthor_file *pfile,
ret = drm_sched_job_init(&job->base,
&job->group->queues[job->queue_idx]->entity,
- credits, job->group);
+ credits, job->group, drm_client_id);
if (ret)
goto err_put_job;
diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
index e650a445cf50..742b0b4ff3a3 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.h
+++ b/drivers/gpu/drm/panthor/panthor_sched.h
@@ -29,7 +29,8 @@ int panthor_group_get_state(struct panthor_file *pfile,
struct drm_sched_job *
panthor_job_create(struct panthor_file *pfile,
u16 group_handle,
- const struct drm_panthor_queue_submit *qsubmit);
+ const struct drm_panthor_queue_submit *qsubmit,
+ u64 drm_client_id);
struct drm_sched_job *panthor_job_get(struct drm_sched_job *job);
struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job);
void panthor_job_put(struct drm_sched_job *job);
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 70aff64ced87..ae7e572b1b4a 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -1176,9 +1176,10 @@ err_drm_connector_cleanup:
static struct drm_framebuffer *
qxl_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd,
+ return drm_gem_fb_create_with_funcs(dev, file_priv, info, mode_cmd,
&qxl_fb_funcs);
}
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index a46613283393..266c57733136 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -211,7 +211,7 @@ static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
surf->base_align = track->group_size;
surf->palign = palign;
surf->halign = 1;
- if (surf->nbx & (palign - 1)) {
+ if ((surf->nbx & (palign - 1)) && !(palign == 64 && surf->nbx == 32)) {
if (prefix) {
dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
__func__, __LINE__, prefix, surf->nbx, palign);
@@ -2661,6 +2661,95 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
break;
}
+ case PACKET3_COND_EXEC:
+ {
+ u64 offset;
+
+ if (pkt->count != 2) {
+ DRM_ERROR("bad COND_EXEC (invalid count)\n");
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad COND_EXEC (missing reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx + 0);
+ offset += ((u64)(radeon_get_ib_value(p, idx + 1) & 0xff)) << 32UL;
+ if (offset & 0x7) {
+ DRM_ERROR("bad COND_EXEC (address not qwords aligned)\n");
+ return -EINVAL;
+ }
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COND_EXEC bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->gpu_offset;
+ ib[idx + 0] = offset;
+ ib[idx + 1] = upper_32_bits(offset) & 0xff;
+ break;
+ }
+ case PACKET3_COND_WRITE:
+ if (pkt->count != 7) {
+ DRM_ERROR("bad COND_WRITE (invalid count)\n");
+ return -EINVAL;
+ }
+ if (idx_value & 0x10) {
+ u64 offset;
+ /* POLL is memory. */
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad COND_WRITE (missing src reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx + 1);
+ offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32;
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COND_WRITE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->gpu_offset;
+ ib[idx + 1] = offset;
+ ib[idx + 2] = upper_32_bits(offset) & 0xff;
+ } else {
+ /* POLL is a reg. */
+ reg = radeon_get_ib_value(p, idx + 1) << 2;
+ if (!evergreen_is_safe_reg(p, reg)) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 1);
+ return -EINVAL;
+ }
+ }
+ if (idx_value & 0x100) {
+ u64 offset;
+ /* WRITE is memory. */
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad COND_WRITE (missing dst reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx + 5);
+ offset += ((u64)(radeon_get_ib_value(p, idx + 6) & 0xff)) << 32;
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COND_WRITE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->gpu_offset;
+ ib[idx + 5] = offset;
+ ib[idx + 6] = upper_32_bits(offset) & 0xff;
+ } else {
+ /* WRITE is a reg. */
+ reg = radeon_get_ib_value(p, idx + 5) << 2;
+ if (!evergreen_is_safe_reg(p, reg)) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 5);
+ return -EINVAL;
+ }
+ }
+ break;
case PACKET3_NOP:
break;
default:
@@ -3406,7 +3495,12 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
case CAYMAN_PACKET3_DEALLOC_STATE:
break;
case PACKET3_COND_WRITE:
- if (idx_value & 0x100) {
+ if (!(idx_value & 0x10)) {
+ reg = ib[idx + 1] * 4;
+ if (!evergreen_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ if (!(idx_value & 0x100)) {
reg = ib[idx + 5] * 4;
if (!evergreen_vm_reg_valid(reg))
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8f5f8abcb1b4..b4bf5dfeea2d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1302,7 +1302,7 @@ radeon_framebuffer_init(struct drm_device *dev,
{
int ret;
fb->obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs);
if (ret) {
fb->obj[0] = NULL;
@@ -1314,6 +1314,7 @@ radeon_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *
radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 267f082bc430..88e821d67af7 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -110,9 +110,10 @@
* 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
* 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
* 2.50.0 - Allows unaligned shader loads on CIK. (needed by OpenGL)
+ * 2.51.0 - Add evergreen/cayman OpenGL 4.6 compatibility
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 50
+#define KMS_DRIVER_MINOR 51
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_no_wb;
diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c
index d4a58bd679db..e3a481bbee7b 100644
--- a/drivers/gpu/drm/radeon/radeon_fbdev.c
+++ b/drivers/gpu/drm/radeon/radeon_fbdev.c
@@ -67,7 +67,8 @@ static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper,
int height = mode_cmd->height;
u32 cpp;
- info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd);
+ info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
cpp = info->cpp[0];
/* need to align pitch with crtc limits */
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
index 4c8fe83dd610..216219accfd9 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
@@ -426,6 +426,7 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
static struct drm_framebuffer *
rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct rcar_du_device *rcdu = to_rcar_du_device(dev);
@@ -490,7 +491,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
}
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
}
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h
index f9893d7d6dfc..e9e59c5e70d5 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h
@@ -16,7 +16,7 @@ struct rcar_du_format_info;
struct rcar_du_group;
/*
- * The RCAR DU has 8 hardware planes, shared between primary and overlay planes.
+ * The R-Car DU has 8 hardware planes, shared between primary and overlay planes.
* As using overlay planes requires at least one of the CRTCs being enabled, no
* more than 7 overlay planes can be available. We thus create 1 primary plane
* per CRTC and 7 overlay planes, for a total of up to 9 KMS planes.
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
index a9145253294f..af58b814e588 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
@@ -878,9 +878,10 @@ static int rcar_lvds_probe(struct platform_device *pdev)
struct rcar_lvds *lvds;
int ret;
- lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (lvds == NULL)
- return -ENOMEM;
+ lvds = devm_drm_bridge_alloc(&pdev->dev, struct rcar_lvds, bridge,
+ &rcar_lvds_bridge_ops);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
platform_set_drvdata(pdev, lvds);
@@ -895,7 +896,6 @@ static int rcar_lvds_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- lvds->bridge.funcs = &rcar_lvds_bridge_ops;
lvds->bridge.of_node = pdev->dev.of_node;
lvds->mmio = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index 7ab8be46c7f6..1af4c73f7a88 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -918,7 +918,6 @@ static int rcar_mipi_dsi_host_attach(struct mipi_dsi_host *host,
}
/* Initialize the DRM bridge. */
- dsi->bridge.funcs = &rcar_mipi_dsi_bridge_ops;
dsi->bridge.of_node = dsi->dev->of_node;
drm_bridge_add(&dsi->bridge);
@@ -1004,9 +1003,10 @@ static int rcar_mipi_dsi_probe(struct platform_device *pdev)
struct rcar_mipi_dsi *dsi;
int ret;
- dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
- if (dsi == NULL)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct rcar_mipi_dsi, bridge,
+ &rcar_mipi_dsi_bridge_ops);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
platform_set_drvdata(pdev, dsi);
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
index 5e40f0c1e7b0..e1aa6a719529 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -50,9 +50,20 @@ static const struct rzg2l_du_device_info rzg2l_du_r9a07g044_info = {
}
};
+static const struct rzg2l_du_device_info rzg2l_du_r9a09g057_info = {
+ .channels_mask = BIT(0),
+ .routes = {
+ [RZG2L_DU_OUTPUT_DSI0] = {
+ .possible_outputs = BIT(0),
+ .port = 0,
+ },
+ },
+};
+
static const struct of_device_id rzg2l_du_of_table[] = {
{ .compatible = "renesas,r9a07g043u-du", .data = &rzg2l_du_r9a07g043u_info },
{ .compatible = "renesas,r9a07g044-du", .data = &rzg2l_du_r9a07g044_info },
+ { .compatible = "renesas,r9a09g057-du", .data = &rzg2l_du_r9a09g057_info },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
index 564ab4cb3d37..5e6dd16705e6 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
@@ -22,6 +22,26 @@
* Encoder
*/
+static unsigned int rzg2l_du_encoder_count_ports(struct device_node *node)
+{
+ struct device_node *ports;
+ struct device_node *port;
+ unsigned int num_ports = 0;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = of_node_get(node);
+
+ for_each_child_of_node(ports, port) {
+ if (of_node_name_eq(port, "port"))
+ num_ports++;
+ }
+
+ of_node_put(ports);
+
+ return num_ports;
+}
+
static const struct drm_encoder_funcs rzg2l_du_encoder_funcs = {
};
@@ -50,10 +70,26 @@ int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu,
struct drm_bridge *bridge;
int ret;
- /* Locate the DRM bridge from the DT node. */
- bridge = of_drm_find_bridge(enc_node);
- if (!bridge)
- return -EPROBE_DEFER;
+ /*
+ * Locate the DRM bridge from the DT node. For the DPAD outputs, if the
+ * DT node has a single port, assume that it describes a panel and
+ * create a panel bridge.
+ */
+ if (output == RZG2L_DU_OUTPUT_DPAD0 && rzg2l_du_encoder_count_ports(enc_node) == 1) {
+ struct drm_panel *panel = of_drm_find_panel(enc_node);
+
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+
+ bridge = devm_drm_panel_bridge_add_typed(rcdu->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ } else {
+ bridge = of_drm_find_bridge(enc_node);
+ if (!bridge)
+ return -EPROBE_DEFER;
+ }
dev_dbg(rcdu->dev, "initializing encoder %pOF for output %s\n",
enc_node, rzg2l_du_output_name(output));
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
index 55a97691e9b2..87f171145a23 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
@@ -191,6 +191,7 @@ int rzg2l_du_dumb_create(struct drm_file *file, struct drm_device *dev,
static struct drm_framebuffer *
rzg2l_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct rzg2l_du_format_info *format;
@@ -214,7 +215,7 @@ rzg2l_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-EINVAL);
}
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
}
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index dc6ab012cdb6..f87337c3cbb5 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
@@ -4,10 +4,14 @@
*
* Copyright (C) 2022 Renesas Electronics Corporation
*/
+
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
@@ -15,6 +19,7 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
+#include <linux/units.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -23,13 +28,37 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <video/mipi_display.h>
#include "rzg2l_mipi_dsi_regs.h"
+#define RZG2L_DCS_BUF_SIZE 128 /* Maximum DCS buffer size in external memory. */
+
+#define RZ_MIPI_DSI_FEATURE_16BPP BIT(0)
+
+struct rzg2l_mipi_dsi;
+
+struct rzg2l_mipi_dsi_hw_info {
+ int (*dphy_init)(struct rzg2l_mipi_dsi *dsi, u64 hsfreq_millihz);
+ void (*dphy_startup_late_init)(struct rzg2l_mipi_dsi *dsi);
+ void (*dphy_exit)(struct rzg2l_mipi_dsi *dsi);
+ int (*dphy_conf_clks)(struct rzg2l_mipi_dsi *dsi, unsigned long mode_freq,
+ u64 *hsfreq_millihz);
+ unsigned int (*dphy_mode_clk_check)(struct rzg2l_mipi_dsi *dsi,
+ unsigned long mode_freq);
+ u32 phy_reg_offset;
+ u32 link_reg_offset;
+ unsigned long min_dclk;
+ unsigned long max_dclk;
+ u8 features;
+};
+
struct rzg2l_mipi_dsi {
struct device *dev;
void __iomem *mmio;
+ const struct rzg2l_mipi_dsi_hw_info *info;
+
struct reset_control *rstc;
struct reset_control *arstc;
struct reset_control *prstc;
@@ -44,6 +73,10 @@ struct rzg2l_mipi_dsi {
unsigned int num_data_lanes;
unsigned int lanes;
unsigned long mode_flags;
+
+ /* DCS buffer pointers when using external memory. */
+ dma_addr_t dcs_buf_phys;
+ u8 *dcs_buf_virt;
};
static inline struct rzg2l_mipi_dsi *
@@ -75,7 +108,7 @@ struct rzg2l_mipi_dsi_timings {
static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
{
- .hsfreq_max = 80000,
+ .hsfreq_max = 80000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 13,
@@ -89,7 +122,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 125000,
+ .hsfreq_max = 125000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 12,
@@ -103,7 +136,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 250000,
+ .hsfreq_max = 250000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 12,
@@ -117,7 +150,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 360000,
+ .hsfreq_max = 360000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 10,
@@ -131,7 +164,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 720000,
+ .hsfreq_max = 720000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 9,
@@ -145,7 +178,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 1500000,
+ .hsfreq_max = 1500000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 9,
@@ -162,22 +195,22 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
static void rzg2l_mipi_dsi_phy_write(struct rzg2l_mipi_dsi *dsi, u32 reg, u32 data)
{
- iowrite32(data, dsi->mmio + reg);
+ iowrite32(data, dsi->mmio + dsi->info->phy_reg_offset + reg);
}
static void rzg2l_mipi_dsi_link_write(struct rzg2l_mipi_dsi *dsi, u32 reg, u32 data)
{
- iowrite32(data, dsi->mmio + LINK_REG_OFFSET + reg);
+ iowrite32(data, dsi->mmio + dsi->info->link_reg_offset + reg);
}
static u32 rzg2l_mipi_dsi_phy_read(struct rzg2l_mipi_dsi *dsi, u32 reg)
{
- return ioread32(dsi->mmio + reg);
+ return ioread32(dsi->mmio + dsi->info->phy_reg_offset + reg);
}
static u32 rzg2l_mipi_dsi_link_read(struct rzg2l_mipi_dsi *dsi, u32 reg)
{
- return ioread32(dsi->mmio + LINK_REG_OFFSET + reg);
+ return ioread32(dsi->mmio + dsi->info->link_reg_offset + reg);
}
/* -----------------------------------------------------------------------------
@@ -185,8 +218,9 @@ static u32 rzg2l_mipi_dsi_link_read(struct rzg2l_mipi_dsi *dsi, u32 reg)
*/
static int rzg2l_mipi_dsi_dphy_init(struct rzg2l_mipi_dsi *dsi,
- unsigned long hsfreq)
+ u64 hsfreq_millihz)
{
+ unsigned long hsfreq = DIV_ROUND_CLOSEST_ULL(hsfreq_millihz, MILLI);
const struct rzg2l_mipi_dsi_timings *dphy_timings;
unsigned int i;
u32 dphyctrl0;
@@ -255,20 +289,17 @@ static void rzg2l_mipi_dsi_dphy_exit(struct rzg2l_mipi_dsi *dsi)
reset_control_assert(dsi->rstc);
}
-static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
- const struct drm_display_mode *mode)
+static int rzg2l_dphy_conf_clks(struct rzg2l_mipi_dsi *dsi, unsigned long mode_freq,
+ u64 *hsfreq_millihz)
{
- unsigned long hsfreq;
+ unsigned long vclk_rate;
unsigned int bpp;
- u32 txsetr;
- u32 clstptsetr;
- u32 lptrnstsetr;
- u32 clkkpt;
- u32 clkbfht;
- u32 clkstpt;
- u32 golpbkt;
- int ret;
+ clk_set_rate(dsi->vclk, mode_freq * KILO);
+ vclk_rate = clk_get_rate(dsi->vclk);
+ if (vclk_rate != mode_freq * KILO)
+ dev_dbg(dsi->dev, "Requested vclk rate %lu, actual %lu mismatch\n",
+ mode_freq * KILO, vclk_rate);
/*
* Relationship between hsclk and vclk must follow
* vclk * bpp = hsclk * 8 * lanes
@@ -277,18 +308,39 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
* hsclk: DSI HS Byte clock frequency (Hz)
* lanes: number of data lanes
*
- * hsclk(bit) = hsclk(byte) * 8
+ * hsclk(bit) = hsclk(byte) * 8 = hsfreq
*/
bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
- hsfreq = (mode->clock * bpp * 8) / (8 * dsi->lanes);
+ *hsfreq_millihz = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(vclk_rate, bpp * MILLI),
+ dsi->lanes);
+
+ return 0;
+}
+
+static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
+ const struct drm_display_mode *mode)
+{
+ unsigned long hsfreq;
+ u64 hsfreq_millihz;
+ u32 txsetr;
+ u32 clstptsetr;
+ u32 lptrnstsetr;
+ u32 clkkpt;
+ u32 clkbfht;
+ u32 clkstpt;
+ u32 golpbkt;
+ u32 dsisetr;
+ int ret;
ret = pm_runtime_resume_and_get(dsi->dev);
if (ret < 0)
return ret;
- clk_set_rate(dsi->vclk, mode->clock * 1000);
+ ret = dsi->info->dphy_conf_clks(dsi, mode->clock, &hsfreq_millihz);
+ if (ret < 0)
+ goto err_phy;
- ret = rzg2l_mipi_dsi_dphy_init(dsi, hsfreq);
+ ret = dsi->info->dphy_init(dsi, hsfreq_millihz);
if (ret < 0)
goto err_phy;
@@ -296,6 +348,10 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
txsetr = TXSETR_DLEN | TXSETR_NUMLANEUSE(dsi->lanes - 1) | TXSETR_CLEN;
rzg2l_mipi_dsi_link_write(dsi, TXSETR, txsetr);
+ if (dsi->info->dphy_startup_late_init)
+ dsi->info->dphy_startup_late_init(dsi);
+
+ hsfreq = DIV_ROUND_CLOSEST_ULL(hsfreq_millihz, MILLI);
/*
* Global timings characteristic depends on high speed Clock Frequency
* Currently MIPI DSI-IF just supports maximum FHD@60 with:
@@ -304,12 +360,12 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
* - data lanes: maximum 4 lanes
* Therefore maximum hsclk will be 891 Mbps.
*/
- if (hsfreq > 445500) {
+ if (hsfreq > 445500000) {
clkkpt = 12;
clkbfht = 15;
clkstpt = 48;
golpbkt = 75;
- } else if (hsfreq > 250000) {
+ } else if (hsfreq > 250000000) {
clkkpt = 7;
clkbfht = 8;
clkstpt = 27;
@@ -328,10 +384,19 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
lptrnstsetr = LPTRNSTSETR_GOLPBKT(golpbkt);
rzg2l_mipi_dsi_link_write(dsi, LPTRNSTSETR, lptrnstsetr);
+ /*
+ * Increase MRPSZ as the default value of 1 will result in long read
+ * commands payload not being saved to memory.
+ */
+ dsisetr = rzg2l_mipi_dsi_link_read(dsi, DSISETR);
+ dsisetr &= ~DSISETR_MRPSZ;
+ dsisetr |= FIELD_PREP(DSISETR_MRPSZ, RZG2L_DCS_BUF_SIZE);
+ rzg2l_mipi_dsi_link_write(dsi, DSISETR, dsisetr);
+
return 0;
err_phy:
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
return ret;
@@ -339,7 +404,7 @@ err_phy:
static void rzg2l_mipi_dsi_stop(struct rzg2l_mipi_dsi *dsi)
{
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
}
@@ -532,8 +597,8 @@ static int rzg2l_mipi_dsi_attach(struct drm_bridge *bridge,
flags);
}
-static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void rzg2l_mipi_dsi_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
const struct drm_display_mode *mode;
@@ -550,6 +615,13 @@ static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
return;
rzg2l_mipi_dsi_set_display_timing(dsi, mode);
+}
+
+static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
+ int ret;
ret = rzg2l_mipi_dsi_start_hs_clock(dsi);
if (ret < 0)
@@ -582,9 +654,22 @@ rzg2l_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
- if (mode->clock > 148500)
+ struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
+
+ if (mode->clock > dsi->info->max_dclk)
return MODE_CLOCK_HIGH;
+ if (mode->clock < dsi->info->min_dclk)
+ return MODE_CLOCK_LOW;
+
+ if (dsi->info->dphy_mode_clk_check) {
+ enum drm_mode_status status;
+
+ status = dsi->info->dphy_mode_clk_check(dsi, mode->clock);
+ if (status != MODE_OK)
+ return status;
+ }
+
return MODE_OK;
}
@@ -593,6 +678,7 @@ static const struct drm_bridge_funcs rzg2l_mipi_dsi_bridge_ops = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = rzg2l_mipi_dsi_atomic_pre_enable,
.atomic_enable = rzg2l_mipi_dsi_atomic_enable,
.atomic_disable = rzg2l_mipi_dsi_atomic_disable,
.mode_valid = rzg2l_mipi_dsi_bridge_mode_valid,
@@ -617,8 +703,16 @@ static int rzg2l_mipi_dsi_host_attach(struct mipi_dsi_host *host,
switch (mipi_dsi_pixel_format_to_bpp(device->format)) {
case 24:
+ break;
case 18:
break;
+ case 16:
+ if (!(dsi->info->features & RZ_MIPI_DSI_FEATURE_16BPP)) {
+ dev_err(dsi->dev, "Unsupported format 0x%04x\n",
+ device->format);
+ return -EINVAL;
+ }
+ break;
default:
dev_err(dsi->dev, "Unsupported format 0x%04x\n", device->format);
return -EINVAL;
@@ -651,9 +745,168 @@ static int rzg2l_mipi_dsi_host_detach(struct mipi_dsi_host *host,
return 0;
}
+static ssize_t rzg2l_mipi_dsi_read_response(struct rzg2l_mipi_dsi *dsi,
+ const struct mipi_dsi_msg *msg)
+{
+ u8 *msg_rx = msg->rx_buf;
+ u8 datatype;
+ u32 result;
+ u16 size;
+
+ result = rzg2l_mipi_dsi_link_read(dsi, RXRSS0R);
+ if (result & RXRSS0R_RXPKTDFAIL) {
+ dev_err(dsi->dev, "packet rx data did not save correctly\n");
+ return -EPROTO;
+ }
+
+ if (result & RXRSS0R_RXFAIL) {
+ dev_err(dsi->dev, "packet rx failure\n");
+ return -EPROTO;
+ }
+
+ if (!(result & RXRSS0R_RXSUC))
+ return -EPROTO;
+
+ datatype = FIELD_GET(RXRSS0R_DT, result);
+
+ switch (datatype) {
+ case 0:
+ dev_dbg(dsi->dev, "ACK\n");
+ return 0;
+ case MIPI_DSI_RX_END_OF_TRANSMISSION:
+ dev_dbg(dsi->dev, "EoTp\n");
+ return 0;
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ dev_dbg(dsi->dev, "Acknowledge and error report: $%02x%02x\n",
+ (u8)FIELD_GET(RXRSS0R_DATA1, result),
+ (u8)FIELD_GET(RXRSS0R_DATA0, result));
+ return 0;
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ msg_rx[0] = FIELD_GET(RXRSS0R_DATA0, result);
+ return 1;
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ msg_rx[0] = FIELD_GET(RXRSS0R_DATA0, result);
+ msg_rx[1] = FIELD_GET(RXRSS0R_DATA1, result);
+ return 2;
+ case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+ case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+ size = FIELD_GET(RXRSS0R_WC, result);
+
+ if (size > msg->rx_len) {
+ dev_err(dsi->dev, "rx buffer too small");
+ return -ENOSPC;
+ }
+
+ memcpy(msg_rx, dsi->dcs_buf_virt, size);
+ return size;
+ default:
+ dev_err(dsi->dev, "unhandled response type: %02x\n", datatype);
+ return -EPROTO;
+ }
+}
+
+static ssize_t rzg2l_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct rzg2l_mipi_dsi *dsi = host_to_rzg2l_mipi_dsi(host);
+ struct mipi_dsi_packet packet;
+ bool need_bta;
+ u32 value;
+ int ret;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret < 0)
+ return ret;
+
+ /* Terminate operation after this descriptor is finished */
+ value = SQCH0DSC0AR_NXACT_TERM;
+
+ if (msg->flags & MIPI_DSI_MSG_REQ_ACK) {
+ need_bta = true; /* Message with explicitly requested ACK */
+ value |= FIELD_PREP(SQCH0DSC0AR_BTA, SQCH0DSC0AR_BTA_NON_READ);
+ } else if (msg->rx_buf && msg->rx_len > 0) {
+ need_bta = true; /* Read request */
+ value |= FIELD_PREP(SQCH0DSC0AR_BTA, SQCH0DSC0AR_BTA_READ);
+ } else {
+ need_bta = false;
+ value |= FIELD_PREP(SQCH0DSC0AR_BTA, SQCH0DSC0AR_BTA_NONE);
+ }
+
+ /* Set transmission speed */
+ if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+ value |= SQCH0DSC0AR_SPD_LOW;
+ else
+ value |= SQCH0DSC0AR_SPD_HIGH;
+
+ /* Write TX packet header */
+ value |= FIELD_PREP(SQCH0DSC0AR_DT, packet.header[0]) |
+ FIELD_PREP(SQCH0DSC0AR_DATA0, packet.header[1]) |
+ FIELD_PREP(SQCH0DSC0AR_DATA1, packet.header[2]);
+
+ if (mipi_dsi_packet_format_is_long(msg->type)) {
+ value |= SQCH0DSC0AR_FMT_LONG;
+
+ if (packet.payload_length > RZG2L_DCS_BUF_SIZE) {
+ dev_err(dsi->dev, "Packet Tx payload size (%d) too large",
+ (unsigned int)packet.payload_length);
+ return -ENOSPC;
+ }
+
+ /* Copy TX packet payload data to memory space */
+ memcpy(dsi->dcs_buf_virt, packet.payload, packet.payload_length);
+ } else {
+ value |= SQCH0DSC0AR_FMT_SHORT;
+ }
+
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0AR, value);
+
+ /*
+ * Write: specify payload data source location, only used for
+ * long packet.
+ * Read: specify payload data storage location of response
+ * packet. Note: a read packet is always a short packet.
+ * If the response packet is a short packet or a long packet
+ * with WC = 0 (no payload), DTSEL is meaningless.
+ */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0BR, SQCH0DSC0BR_DTSEL_MEM_SPACE);
+
+ /*
+ * Set SQCHxSR.AACTFIN bit when descriptor actions are finished.
+ * Read: set Rx result save slot number to 0 (ACTCODE).
+ */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0CR, SQCH0DSC0CR_FINACT);
+
+ /* Set rx/tx payload data address, only relevant for long packet. */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0DR, (u32)dsi->dcs_buf_phys);
+
+ /* Start sequence 0 operation */
+ value = rzg2l_mipi_dsi_link_read(dsi, SQCH0SET0R);
+ value |= SQCH0SET0R_START;
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0SET0R, value);
+
+ /* Wait for operation to finish */
+ ret = read_poll_timeout(rzg2l_mipi_dsi_link_read,
+ value, value & SQCH0SR_ADESFIN,
+ 2000, 20000, false, dsi, SQCH0SR);
+ if (ret == 0) {
+ /* Success: clear status bit */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0SCR, SQCH0SCR_ADESFIN);
+
+ if (need_bta)
+ ret = rzg2l_mipi_dsi_read_response(dsi, msg);
+ else
+ ret = packet.payload_length;
+ }
+
+ return ret;
+}
+
static const struct mipi_dsi_host_ops rzg2l_mipi_dsi_host_ops = {
.attach = rzg2l_mipi_dsi_host_attach,
.detach = rzg2l_mipi_dsi_host_detach,
+ .transfer = rzg2l_mipi_dsi_host_transfer,
};
/* -----------------------------------------------------------------------------
@@ -701,13 +954,16 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
u32 txsetr;
int ret;
- dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct rzg2l_mipi_dsi, bridge,
+ &rzg2l_mipi_dsi_bridge_ops);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
platform_set_drvdata(pdev, dsi);
dsi->dev = &pdev->dev;
+ dsi->info = of_device_get_match_data(&pdev->dev);
+
ret = drm_of_get_data_lanes_count_ep(dsi->dev->of_node, 1, 0, 1, 4);
if (ret < 0)
return dev_err_probe(dsi->dev, ret,
@@ -723,7 +979,7 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->vclk))
return PTR_ERR(dsi->vclk);
- dsi->rstc = devm_reset_control_get_exclusive(dsi->dev, "rst");
+ dsi->rstc = devm_reset_control_get_optional_exclusive(dsi->dev, "rst");
if (IS_ERR(dsi->rstc))
return dev_err_probe(dsi->dev, PTR_ERR(dsi->rstc),
"failed to get rst\n");
@@ -751,17 +1007,16 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
* mode->clock and format are not available. So initialize DPHY with
* timing parameters for 80Mbps.
*/
- ret = rzg2l_mipi_dsi_dphy_init(dsi, 80000);
+ ret = dsi->info->dphy_init(dsi, 80000000ULL * MILLI);
if (ret < 0)
goto err_phy;
txsetr = rzg2l_mipi_dsi_link_read(dsi, TXSETR);
dsi->num_data_lanes = min(((txsetr >> 16) & 3) + 1, num_data_lanes);
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
/* Initialize the DRM bridge. */
- dsi->bridge.funcs = &rzg2l_mipi_dsi_bridge_ops;
dsi->bridge.of_node = dsi->dev->of_node;
/* Init host device */
@@ -771,10 +1026,15 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
if (ret < 0)
goto err_pm_disable;
+ dsi->dcs_buf_virt = dma_alloc_coherent(dsi->host.dev, RZG2L_DCS_BUF_SIZE,
+ &dsi->dcs_buf_phys, GFP_KERNEL);
+ if (!dsi->dcs_buf_virt)
+ return -ENOMEM;
+
return 0;
err_phy:
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
err_pm_disable:
pm_runtime_disable(dsi->dev);
@@ -785,12 +1045,23 @@ static void rzg2l_mipi_dsi_remove(struct platform_device *pdev)
{
struct rzg2l_mipi_dsi *dsi = platform_get_drvdata(pdev);
+ dma_free_coherent(dsi->host.dev, RZG2L_DCS_BUF_SIZE, dsi->dcs_buf_virt,
+ dsi->dcs_buf_phys);
mipi_dsi_host_unregister(&dsi->host);
pm_runtime_disable(&pdev->dev);
}
+static const struct rzg2l_mipi_dsi_hw_info rzg2l_mipi_dsi_info = {
+ .dphy_init = rzg2l_mipi_dsi_dphy_init,
+ .dphy_exit = rzg2l_mipi_dsi_dphy_exit,
+ .dphy_conf_clks = rzg2l_dphy_conf_clks,
+ .link_reg_offset = 0x10000,
+ .min_dclk = 5803,
+ .max_dclk = 148500,
+};
+
static const struct of_device_id rzg2l_mipi_dsi_of_table[] = {
- { .compatible = "renesas,rzg2l-mipi-dsi" },
+ { .compatible = "renesas,rzg2l-mipi-dsi", .data = &rzg2l_mipi_dsi_info, },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
index 1dbc16ec64a4..d8082a87d874 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
@@ -41,8 +41,6 @@
#define DSIDPHYTIM3_THS_ZERO(x) ((x) << 0)
/* --------------------------------------------------------*/
-/* Link Registers */
-#define LINK_REG_OFFSET 0x10000
/* Link Status Register */
#define LINKSR 0x10
@@ -81,6 +79,20 @@
#define RSTSR_SWRSTLP (1 << 1)
#define RSTSR_SWRSTHS (1 << 0)
+/* DSI Set Register */
+#define DSISETR 0x120
+#define DSISETR_MRPSZ GENMASK(15, 0)
+
+/* Rx Result Save Slot 0 Register */
+#define RXRSS0R 0x240
+#define RXRSS0R_RXPKTDFAIL BIT(28)
+#define RXRSS0R_RXFAIL BIT(27)
+#define RXRSS0R_RXSUC BIT(25)
+#define RXRSS0R_DT GENMASK(21, 16)
+#define RXRSS0R_DATA1 GENMASK(15, 8)
+#define RXRSS0R_DATA0 GENMASK(7, 0)
+#define RXRSS0R_WC GENMASK(15, 0) /* Word count for long packet. */
+
/* Clock Lane Stop Time Set Register */
#define CLSTPTSETR 0x314
#define CLSTPTSETR_CLKKPT(x) ((x) << 24)
@@ -148,4 +160,44 @@
#define VICH1HPSETR_HFP(x) (((x) & 0x1fff) << 16)
#define VICH1HPSETR_HBP(x) (((x) & 0x1fff) << 0)
+/* Sequence Channel 0 Set 0 Register */
+#define SQCH0SET0R 0x5c0
+#define SQCH0SET0R_START BIT(0)
+
+/* Sequence Channel 0 Status Register */
+#define SQCH0SR 0x5d0
+#define SQCH0SR_ADESFIN BIT(8)
+
+/* Sequence Channel 0 Status Clear Register */
+#define SQCH0SCR 0x5d4
+#define SQCH0SCR_ADESFIN BIT(8)
+
+/* Sequence Channel 0 Descriptor 0-A Register */
+#define SQCH0DSC0AR 0x780
+#define SQCH0DSC0AR_NXACT_TERM 0 /* Bit 28 */
+#define SQCH0DSC0AR_BTA GENMASK(27, 26)
+#define SQCH0DSC0AR_BTA_NONE 0
+#define SQCH0DSC0AR_BTA_NON_READ 1
+#define SQCH0DSC0AR_BTA_READ 2
+#define SQCH0DSC0AR_BTA_ONLY 3
+#define SQCH0DSC0AR_SPD_HIGH 0
+#define SQCH0DSC0AR_SPD_LOW BIT(25)
+#define SQCH0DSC0AR_FMT_SHORT 0
+#define SQCH0DSC0AR_FMT_LONG BIT(24)
+#define SQCH0DSC0AR_DT GENMASK(21, 16)
+#define SQCH0DSC0AR_DATA1 GENMASK(15, 8)
+#define SQCH0DSC0AR_DATA0 GENMASK(7, 0)
+
+/* Sequence Channel 0 Descriptor 0-B Register */
+#define SQCH0DSC0BR 0x784
+#define SQCH0DSC0BR_DTSEL_MEM_SPACE BIT(24) /* Use external memory */
+
+/* Sequence Channel 0 Descriptor 0-C Register */
+#define SQCH0DSC0CR 0x788
+#define SQCH0DSC0CR_FINACT BIT(0)
+#define SQCH0DSC0CR_AUXOP BIT(22)
+
+/* Sequence Channel 0 Descriptor 0-D Register */
+#define SQCH0DSC0DR 0x78c
+
#endif /* __RZG2L_MIPI_DSI_REGS_H__ */
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
index 4202ab00fb0c..fd9460da1789 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
@@ -117,6 +117,7 @@ const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc)
static struct drm_framebuffer *
shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct shmob_drm_format_info *format;
@@ -144,7 +145,7 @@ shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
}
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
}
static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 292c31de18f1..b7e3f5dcf8d5 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -16,7 +16,9 @@
#include <sound/hdmi-codec.h>
#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -25,9 +27,9 @@
#include "cdn-dp-core.h"
#include "cdn-dp-reg.h"
-static inline struct cdn_dp_device *connector_to_dp(struct drm_connector *connector)
+static inline struct cdn_dp_device *bridge_to_dp(struct drm_bridge *bridge)
{
- return container_of(connector, struct cdn_dp_device, connector);
+ return container_of(bridge, struct cdn_dp_device, bridge);
}
static inline struct cdn_dp_device *encoder_to_dp(struct drm_encoder *encoder)
@@ -231,9 +233,9 @@ static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
}
static enum drm_connector_status
-cdn_dp_connector_detect(struct drm_connector *connector, bool force)
+cdn_dp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
- struct cdn_dp_device *dp = connector_to_dp(connector);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
enum drm_connector_status status = connector_status_disconnected;
mutex_lock(&dp->lock);
@@ -244,41 +246,25 @@ cdn_dp_connector_detect(struct drm_connector *connector, bool force)
return status;
}
-static void cdn_dp_connector_destroy(struct drm_connector *connector)
+static const struct drm_edid *
+cdn_dp_bridge_edid_read(struct drm_bridge *bridge, struct drm_connector *connector)
{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
- .detect = cdn_dp_connector_detect,
- .destroy = cdn_dp_connector_destroy,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int cdn_dp_connector_get_modes(struct drm_connector *connector)
-{
- struct cdn_dp_device *dp = connector_to_dp(connector);
- int ret = 0;
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
+ const struct drm_edid *drm_edid;
mutex_lock(&dp->lock);
-
- ret = drm_edid_connector_add_modes(connector);
-
+ drm_edid = drm_edid_read_custom(connector, cdn_dp_get_edid_block, dp);
mutex_unlock(&dp->lock);
- return ret;
+ return drm_edid;
}
static enum drm_mode_status
-cdn_dp_connector_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
+cdn_dp_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *display_info,
+ const struct drm_display_mode *mode)
{
- struct cdn_dp_device *dp = connector_to_dp(connector);
- struct drm_display_info *display_info = &dp->connector.display_info;
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
u32 requested, actual, rate, sink_max, source_max = 0;
u8 lanes, bpc;
@@ -323,11 +309,6 @@ cdn_dp_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
- .get_modes = cdn_dp_connector_get_modes,
- .mode_valid = cdn_dp_connector_mode_valid,
-};
-
static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
{
int ret;
@@ -360,7 +341,6 @@ static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
{
- const struct drm_display_info *info = &dp->connector.display_info;
int ret;
if (!cdn_dp_check_sink_connection(dp))
@@ -373,17 +353,6 @@ static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
return ret;
}
- drm_edid_free(dp->drm_edid);
- dp->drm_edid = drm_edid_read_custom(&dp->connector,
- cdn_dp_get_edid_block, dp);
- drm_edid_connector_update(&dp->connector, dp->drm_edid);
-
- dp->sink_has_audio = info->has_audio;
-
- if (dp->drm_edid)
- DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
- info->width_mm / 10, info->height_mm / 10);
-
return 0;
}
@@ -488,10 +457,6 @@ static int cdn_dp_disable(struct cdn_dp_device *dp)
dp->active = false;
dp->max_lanes = 0;
dp->max_rate = 0;
- if (!dp->connected) {
- drm_edid_free(dp->drm_edid);
- dp->drm_edid = NULL;
- }
return 0;
}
@@ -546,26 +511,13 @@ err_clk_disable:
return ret;
}
-static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
+static void cdn_dp_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted)
{
- struct cdn_dp_device *dp = encoder_to_dp(encoder);
- struct drm_display_info *display_info = &dp->connector.display_info;
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
struct video_info *video = &dp->video_info;
- switch (display_info->bpc) {
- case 10:
- video->color_depth = 10;
- break;
- case 6:
- video->color_depth = 6;
- break;
- default:
- video->color_depth = 8;
- break;
- }
-
video->color_fmt = PXL_RGB;
video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
@@ -592,19 +544,37 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
}
-static void cdn_dp_audio_handle_plugged_change(struct cdn_dp_device *dp,
- bool plugged)
+static void cdn_dp_display_info_update(struct cdn_dp_device *dp,
+ struct drm_display_info *display_info)
{
- if (dp->codec_dev)
- dp->plugged_cb(dp->codec_dev, plugged);
+ struct video_info *video = &dp->video_info;
+
+ switch (display_info->bpc) {
+ case 10:
+ video->color_depth = 10;
+ break;
+ case 6:
+ video->color_depth = 6;
+ break;
+ default:
+ video->color_depth = 8;
+ break;
+ }
}
-static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
+static void cdn_dp_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_atomic_state *state)
{
- struct cdn_dp_device *dp = encoder_to_dp(encoder);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
+ struct drm_connector *connector;
int ret, val;
- ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (!connector)
+ return;
+
+ cdn_dp_display_info_update(dp, &connector->display_info);
+
+ ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, &dp->encoder.encoder);
if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
return;
@@ -625,7 +595,7 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
ret = cdn_dp_enable(dp);
if (ret) {
- DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
+ DRM_DEV_ERROR(dp->dev, "Failed to enable bridge %d\n",
ret);
goto out;
}
@@ -655,24 +625,21 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
goto out;
}
- cdn_dp_audio_handle_plugged_change(dp, true);
-
out:
mutex_unlock(&dp->lock);
}
-static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
+static void cdn_dp_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_atomic_state *state)
{
- struct cdn_dp_device *dp = encoder_to_dp(encoder);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
int ret;
mutex_lock(&dp->lock);
- cdn_dp_audio_handle_plugged_change(dp, false);
if (dp->active) {
ret = cdn_dp_disable(dp);
if (ret) {
- DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
+ DRM_DEV_ERROR(dp->dev, "Failed to disable bridge %d\n",
ret);
}
}
@@ -704,9 +671,6 @@ static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
- .mode_set = cdn_dp_encoder_mode_set,
- .enable = cdn_dp_encoder_enable,
- .disable = cdn_dp_encoder_disable,
.atomic_check = cdn_dp_encoder_atomic_check,
};
@@ -779,11 +743,12 @@ static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
return 0;
}
-static int cdn_dp_audio_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
+static int cdn_dp_audio_prepare(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
{
- struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
struct audio_info audio = {
.sample_width = params->sample_width,
.sample_rate = params->sample_rate,
@@ -805,7 +770,7 @@ static int cdn_dp_audio_hw_params(struct device *dev, void *data,
audio.format = AFMT_SPDIF;
break;
default:
- DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
+ drm_err(bridge->dev, "Invalid format %d\n", daifmt->fmt);
ret = -EINVAL;
goto out;
}
@@ -819,9 +784,10 @@ out:
return ret;
}
-static void cdn_dp_audio_shutdown(struct device *dev, void *data)
+static void cdn_dp_audio_shutdown(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
int ret;
mutex_lock(&dp->lock);
@@ -835,10 +801,11 @@ out:
mutex_unlock(&dp->lock);
}
-static int cdn_dp_audio_mute_stream(struct device *dev, void *data,
+static int cdn_dp_audio_mute_stream(struct drm_bridge *bridge,
+ struct drm_connector *connector,
bool enable, int direction)
{
- struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ struct cdn_dp_device *dp = bridge_to_dp(bridge);
int ret;
mutex_lock(&dp->lock);
@@ -854,57 +821,22 @@ out:
return ret;
}
-static int cdn_dp_audio_get_eld(struct device *dev, void *data,
- u8 *buf, size_t len)
-{
- struct cdn_dp_device *dp = dev_get_drvdata(dev);
-
- memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
-
- return 0;
-}
-
-static int cdn_dp_audio_hook_plugged_cb(struct device *dev, void *data,
- hdmi_codec_plugged_cb fn,
- struct device *codec_dev)
-{
- struct cdn_dp_device *dp = dev_get_drvdata(dev);
-
- mutex_lock(&dp->lock);
- dp->plugged_cb = fn;
- dp->codec_dev = codec_dev;
- cdn_dp_audio_handle_plugged_change(dp, dp->connected);
- mutex_unlock(&dp->lock);
-
- return 0;
-}
-
-static const struct hdmi_codec_ops audio_codec_ops = {
- .hw_params = cdn_dp_audio_hw_params,
- .audio_shutdown = cdn_dp_audio_shutdown,
- .mute_stream = cdn_dp_audio_mute_stream,
- .get_eld = cdn_dp_audio_get_eld,
- .hook_plugged_cb = cdn_dp_audio_hook_plugged_cb,
+static const struct drm_bridge_funcs cdn_dp_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .detect = cdn_dp_bridge_detect,
+ .edid_read = cdn_dp_bridge_edid_read,
+ .atomic_enable = cdn_dp_bridge_atomic_enable,
+ .atomic_disable = cdn_dp_bridge_atomic_disable,
+ .mode_valid = cdn_dp_bridge_mode_valid,
+ .mode_set = cdn_dp_bridge_mode_set,
+
+ .dp_audio_prepare = cdn_dp_audio_prepare,
+ .dp_audio_mute_stream = cdn_dp_audio_mute_stream,
+ .dp_audio_shutdown = cdn_dp_audio_shutdown,
};
-static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
- struct device *dev)
-{
- struct hdmi_codec_pdata codec_data = {
- .i2s = 1,
- .spdif = 1,
- .ops = &audio_codec_ops,
- .max_i2s_channels = 8,
- .no_capture_mute = 1,
- };
-
- dp->audio_pdev = platform_device_register_data(
- dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
- &codec_data, sizeof(codec_data));
-
- return PTR_ERR_OR_ZERO(dp->audio_pdev);
-}
-
static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
{
int ret;
@@ -1006,7 +938,9 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
out:
mutex_unlock(&dp->lock);
- drm_connector_helper_hpd_irq_event(&dp->connector);
+ drm_bridge_hpd_notify(&dp->bridge,
+ dp->connected ? connector_status_connected
+ : connector_status_disconnected);
}
static int cdn_dp_pd_event(struct notifier_block *nb,
@@ -1062,26 +996,35 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
- connector = &dp->connector;
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- connector->dpms = DRM_MODE_DPMS_OFF;
-
- ret = drm_connector_init(drm_dev, connector,
- &cdn_dp_atomic_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
- if (ret) {
- DRM_ERROR("failed to initialize connector with drm\n");
- goto err_free_encoder;
- }
+ dp->bridge.ops =
+ DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HPD |
+ DRM_BRIDGE_OP_DP_AUDIO;
+ dp->bridge.of_node = dp->dev->of_node;
+ dp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
+ dp->bridge.hdmi_audio_dev = dp->dev;
+ dp->bridge.hdmi_audio_max_i2s_playback_channels = 8;
+ dp->bridge.hdmi_audio_spdif_playback = 1;
+ dp->bridge.hdmi_audio_dai_port = -1;
+
+ ret = devm_drm_bridge_add(dev, &dp->bridge);
+ if (ret)
+ return ret;
- drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
+ ret = drm_bridge_attach(encoder, &dp->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return ret;
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret) {
- DRM_ERROR("failed to attach connector and encoder\n");
- goto err_free_connector;
+ connector = drm_bridge_connector_init(drm_dev, encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ dev_err(dp->dev, "failed to init bridge connector: %d\n", ret);
+ return ret;
}
+ drm_connector_attach_encoder(connector, encoder);
+
for (i = 0; i < dp->ports; i++) {
port = dp->port[i];
@@ -1092,7 +1035,7 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
if (ret) {
DRM_DEV_ERROR(dev,
"register EXTCON_DISP_DP notifier err\n");
- goto err_free_connector;
+ return ret;
}
}
@@ -1101,30 +1044,19 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
schedule_work(&dp->event_work);
return 0;
-
-err_free_connector:
- drm_connector_cleanup(connector);
-err_free_encoder:
- drm_encoder_cleanup(encoder);
- return ret;
}
static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
struct drm_encoder *encoder = &dp->encoder.encoder;
- struct drm_connector *connector = &dp->connector;
cancel_work_sync(&dp->event_work);
- cdn_dp_encoder_disable(encoder);
encoder->funcs->destroy(encoder);
- connector->funcs->destroy(connector);
pm_runtime_disable(dev);
if (dp->fw_loaded)
release_firmware(dp->fw);
- drm_edid_free(dp->drm_edid);
- dp->drm_edid = NULL;
}
static const struct component_ops cdn_dp_component_ops = {
@@ -1171,9 +1103,10 @@ static int cdn_dp_probe(struct platform_device *pdev)
int ret;
int i;
- dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
- if (!dp)
- return -ENOMEM;
+ dp = devm_drm_bridge_alloc(dev, struct cdn_dp_device, bridge,
+ &cdn_dp_bridge_funcs);
+ if (IS_ERR(dp))
+ return PTR_ERR(dp);
dp->dev = dev;
match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
@@ -1209,19 +1142,11 @@ static int cdn_dp_probe(struct platform_device *pdev)
mutex_init(&dp->lock);
dev_set_drvdata(dev, dp);
- ret = cdn_dp_audio_codec_init(dp, dev);
- if (ret)
- return ret;
-
ret = component_add(dev, &cdn_dp_component_ops);
if (ret)
- goto err_audio_deinit;
+ return ret;
return 0;
-
-err_audio_deinit:
- platform_device_unregister(dp->audio_pdev);
- return ret;
}
static void cdn_dp_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index 17498f576ce7..e9c30b9fd543 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -8,6 +8,7 @@
#define _CDN_DP_CORE_H
#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <sound/hdmi-codec.h>
@@ -65,12 +66,11 @@ struct cdn_dp_port {
struct cdn_dp_device {
struct device *dev;
struct drm_device *drm_dev;
- struct drm_connector connector;
+ struct drm_bridge bridge;
struct rockchip_encoder encoder;
struct drm_display_mode mode;
struct platform_device *audio_pdev;
struct work_struct event_work;
- const struct drm_edid *drm_edid;
struct mutex lock;
bool connected;
@@ -101,9 +101,5 @@ struct cdn_dp_device {
int active_port;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
- bool sink_has_audio;
-
- hdmi_codec_plugged_cb plugged_cb;
- struct device *codec_dev;
};
#endif /* _CDN_DP_CORE_H */
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index f737e7d46e66..acb59b25d928 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -213,17 +213,13 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
if (IS_ERR(hdmi->ref_clk)) {
ret = PTR_ERR(hdmi->ref_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(hdmi->dev, "failed to get reference clock\n");
- return ret;
+ return dev_err_probe(hdmi->dev, ret, "failed to get reference clock\n");
}
hdmi->grf_clk = devm_clk_get_optional(hdmi->dev, "grf");
if (IS_ERR(hdmi->grf_clk)) {
ret = PTR_ERR(hdmi->grf_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(hdmi->dev, "failed to get grf clock\n");
- return ret;
+ return dev_err_probe(hdmi->dev, ret, "failed to get grf clock\n");
}
ret = devm_regulator_get_enable(hdmi->dev, "avdd-0v9");
@@ -573,17 +569,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
ret = rockchip_hdmi_parse_dt(hdmi);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(hdmi->dev, "Unable to parse OF data\n");
- return ret;
+ return dev_err_probe(hdmi->dev, ret, "Unable to parse OF data\n");
}
hdmi->phy = devm_phy_optional_get(dev, "hdmi");
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
- if (ret != -EPROBE_DEFER)
- dev_err(hdmi->dev, "failed to get phy\n");
- return ret;
+ return dev_err_probe(hdmi->dev, ret, "failed to get phy\n");
}
if (hdmi->phy) {
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index db4b4038e51d..1ab3ad4bde9e 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -29,11 +29,360 @@
#include "rockchip_drm_drv.h"
-#include "inno_hdmi.h"
+#define INNO_HDMI_MIN_TMDS_CLOCK 25000000U
-#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
+#define DDC_SEGMENT_ADDR 0x30
+
+#define HDMI_SCL_RATE (100 * 1000)
+
+#define DDC_BUS_FREQ_L 0x4b
+#define DDC_BUS_FREQ_H 0x4c
+
+#define HDMI_SYS_CTRL 0x00
+#define m_RST_ANALOG BIT(6)
+#define v_RST_ANALOG (0 << 6)
+#define v_NOT_RST_ANALOG BIT(6)
+#define m_RST_DIGITAL BIT(5)
+#define v_RST_DIGITAL (0 << 5)
+#define v_NOT_RST_DIGITAL BIT(5)
+#define m_REG_CLK_INV BIT(4)
+#define v_REG_CLK_NOT_INV (0 << 4)
+#define v_REG_CLK_INV BIT(4)
+#define m_VCLK_INV BIT(3)
+#define v_VCLK_NOT_INV (0 << 3)
+#define v_VCLK_INV BIT(3)
+#define m_REG_CLK_SOURCE BIT(2)
+#define v_REG_CLK_SOURCE_TMDS (0 << 2)
+#define v_REG_CLK_SOURCE_SYS BIT(2)
+#define m_POWER BIT(1)
+#define v_PWR_ON (0 << 1)
+#define v_PWR_OFF BIT(1)
+#define m_INT_POL BIT(0)
+#define v_INT_POL_HIGH 1
+#define v_INT_POL_LOW 0
+
+#define HDMI_VIDEO_CONTRL1 0x01
+#define m_VIDEO_INPUT_FORMAT (7 << 1)
+#define m_DE_SOURCE BIT(0)
+#define v_VIDEO_INPUT_FORMAT(n) ((n) << 1)
+#define v_DE_EXTERNAL 1
+#define v_DE_INTERNAL 0
+enum {
+ VIDEO_INPUT_SDR_RGB444 = 0,
+ VIDEO_INPUT_DDR_RGB444 = 5,
+ VIDEO_INPUT_DDR_YCBCR422 = 6
+};
-#define INNO_HDMI_MIN_TMDS_CLOCK 25000000U
+#define HDMI_VIDEO_CONTRL2 0x02
+#define m_VIDEO_OUTPUT_COLOR (3 << 6)
+#define m_VIDEO_INPUT_BITS (3 << 4)
+#define m_VIDEO_INPUT_CSP BIT(0)
+#define v_VIDEO_OUTPUT_COLOR(n) (((n) & 0x3) << 6)
+#define v_VIDEO_INPUT_BITS(n) ((n) << 4)
+#define v_VIDEO_INPUT_CSP(n) ((n) << 0)
+enum {
+ VIDEO_INPUT_12BITS = 0,
+ VIDEO_INPUT_10BITS = 1,
+ VIDEO_INPUT_REVERT = 2,
+ VIDEO_INPUT_8BITS = 3,
+};
+
+#define HDMI_VIDEO_CONTRL 0x03
+#define m_VIDEO_AUTO_CSC BIT(7)
+#define v_VIDEO_AUTO_CSC(n) ((n) << 7)
+#define m_VIDEO_C0_C2_SWAP BIT(0)
+#define v_VIDEO_C0_C2_SWAP(n) ((n) << 0)
+enum {
+ C0_C2_CHANGE_ENABLE = 0,
+ C0_C2_CHANGE_DISABLE = 1,
+ AUTO_CSC_DISABLE = 0,
+ AUTO_CSC_ENABLE = 1,
+};
+
+#define HDMI_VIDEO_CONTRL3 0x04
+#define m_COLOR_DEPTH_NOT_INDICATED BIT(4)
+#define m_SOF BIT(3)
+#define m_COLOR_RANGE BIT(2)
+#define m_CSC BIT(0)
+#define v_COLOR_DEPTH_NOT_INDICATED(n) ((n) << 4)
+#define v_SOF_ENABLE (0 << 3)
+#define v_SOF_DISABLE BIT(3)
+#define v_COLOR_RANGE_FULL BIT(2)
+#define v_COLOR_RANGE_LIMITED (0 << 2)
+#define v_CSC_ENABLE 1
+#define v_CSC_DISABLE 0
+
+#define HDMI_AV_MUTE 0x05
+#define m_AVMUTE_CLEAR BIT(7)
+#define m_AVMUTE_ENABLE BIT(6)
+#define m_AUDIO_MUTE BIT(1)
+#define m_VIDEO_BLACK BIT(0)
+#define v_AVMUTE_CLEAR(n) ((n) << 7)
+#define v_AVMUTE_ENABLE(n) ((n) << 6)
+#define v_AUDIO_MUTE(n) ((n) << 1)
+#define v_VIDEO_MUTE(n) ((n) << 0)
+
+#define HDMI_VIDEO_TIMING_CTL 0x08
+#define v_HSYNC_POLARITY(n) ((n) << 3)
+#define v_VSYNC_POLARITY(n) ((n) << 2)
+#define v_INETLACE(n) ((n) << 1)
+#define v_EXTERANL_VIDEO(n) ((n) << 0)
+
+#define HDMI_VIDEO_EXT_HTOTAL_L 0x09
+#define HDMI_VIDEO_EXT_HTOTAL_H 0x0a
+#define HDMI_VIDEO_EXT_HBLANK_L 0x0b
+#define HDMI_VIDEO_EXT_HBLANK_H 0x0c
+#define HDMI_VIDEO_EXT_HDELAY_L 0x0d
+#define HDMI_VIDEO_EXT_HDELAY_H 0x0e
+#define HDMI_VIDEO_EXT_HDURATION_L 0x0f
+#define HDMI_VIDEO_EXT_HDURATION_H 0x10
+#define HDMI_VIDEO_EXT_VTOTAL_L 0x11
+#define HDMI_VIDEO_EXT_VTOTAL_H 0x12
+#define HDMI_VIDEO_EXT_VBLANK 0x13
+#define HDMI_VIDEO_EXT_VDELAY 0x14
+#define HDMI_VIDEO_EXT_VDURATION 0x15
+
+#define HDMI_VIDEO_CSC_COEF 0x18
+
+#define HDMI_AUDIO_CTRL1 0x35
+enum {
+ CTS_SOURCE_INTERNAL = 0,
+ CTS_SOURCE_EXTERNAL = 1,
+};
+
+#define v_CTS_SOURCE(n) ((n) << 7)
+
+enum {
+ DOWNSAMPLE_DISABLE = 0,
+ DOWNSAMPLE_1_2 = 1,
+ DOWNSAMPLE_1_4 = 2,
+};
+
+#define v_DOWN_SAMPLE(n) ((n) << 5)
+
+enum {
+ AUDIO_SOURCE_IIS = 0,
+ AUDIO_SOURCE_SPDIF = 1,
+};
+
+#define v_AUDIO_SOURCE(n) ((n) << 3)
+
+#define v_MCLK_ENABLE(n) ((n) << 2)
+
+enum {
+ MCLK_128FS = 0,
+ MCLK_256FS = 1,
+ MCLK_384FS = 2,
+ MCLK_512FS = 3,
+};
+
+#define v_MCLK_RATIO(n) (n)
+
+#define AUDIO_SAMPLE_RATE 0x37
+
+enum {
+ AUDIO_32K = 0x3,
+ AUDIO_441K = 0x0,
+ AUDIO_48K = 0x2,
+ AUDIO_882K = 0x8,
+ AUDIO_96K = 0xa,
+ AUDIO_1764K = 0xc,
+ AUDIO_192K = 0xe,
+};
+
+#define AUDIO_I2S_MODE 0x38
+
+enum {
+ I2S_CHANNEL_1_2 = 1,
+ I2S_CHANNEL_3_4 = 3,
+ I2S_CHANNEL_5_6 = 7,
+ I2S_CHANNEL_7_8 = 0xf
+};
+
+#define v_I2S_CHANNEL(n) ((n) << 2)
+
+enum {
+ I2S_STANDARD = 0,
+ I2S_LEFT_JUSTIFIED = 1,
+ I2S_RIGHT_JUSTIFIED = 2,
+};
+
+#define v_I2S_MODE(n) (n)
+
+#define AUDIO_I2S_MAP 0x39
+#define AUDIO_I2S_SWAPS_SPDIF 0x3a
+#define v_SPIDF_FREQ(n) (n)
+
+#define N_32K 0x1000
+#define N_441K 0x1880
+#define N_882K 0x3100
+#define N_1764K 0x6200
+#define N_48K 0x1800
+#define N_96K 0x3000
+#define N_192K 0x6000
+
+#define HDMI_AUDIO_CHANNEL_STATUS 0x3e
+#define m_AUDIO_STATUS_NLPCM BIT(7)
+#define m_AUDIO_STATUS_USE BIT(6)
+#define m_AUDIO_STATUS_COPYRIGHT BIT(5)
+#define m_AUDIO_STATUS_ADDITION (3 << 2)
+#define m_AUDIO_STATUS_CLK_ACCURACY (2 << 0)
+#define v_AUDIO_STATUS_NLPCM(n) (((n) & 1) << 7)
+#define AUDIO_N_H 0x3f
+#define AUDIO_N_M 0x40
+#define AUDIO_N_L 0x41
+
+#define HDMI_AUDIO_CTS_H 0x45
+#define HDMI_AUDIO_CTS_M 0x46
+#define HDMI_AUDIO_CTS_L 0x47
+
+#define HDMI_DDC_CLK_L 0x4b
+#define HDMI_DDC_CLK_H 0x4c
+
+#define HDMI_EDID_SEGMENT_POINTER 0x4d
+#define HDMI_EDID_WORD_ADDR 0x4e
+#define HDMI_EDID_FIFO_OFFSET 0x4f
+#define HDMI_EDID_FIFO_ADDR 0x50
+
+#define HDMI_PACKET_SEND_MANUAL 0x9c
+#define HDMI_PACKET_SEND_AUTO 0x9d
+#define m_PACKET_GCP_EN BIT(7)
+#define m_PACKET_MSI_EN BIT(6)
+#define m_PACKET_SDI_EN BIT(5)
+#define m_PACKET_VSI_EN BIT(4)
+#define v_PACKET_GCP_EN(n) (((n) & 1) << 7)
+#define v_PACKET_MSI_EN(n) (((n) & 1) << 6)
+#define v_PACKET_SDI_EN(n) (((n) & 1) << 5)
+#define v_PACKET_VSI_EN(n) (((n) & 1) << 4)
+
+#define HDMI_CONTROL_PACKET_BUF_INDEX 0x9f
+
+enum {
+ INFOFRAME_VSI = 0x05,
+ INFOFRAME_AVI = 0x06,
+ INFOFRAME_AAI = 0x08,
+};
+
+#define HDMI_CONTROL_PACKET_ADDR 0xa0
+#define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11
+
+enum {
+ AVI_COLOR_MODE_RGB = 0,
+ AVI_COLOR_MODE_YCBCR422 = 1,
+ AVI_COLOR_MODE_YCBCR444 = 2,
+ AVI_COLORIMETRY_NO_DATA = 0,
+
+ AVI_COLORIMETRY_SMPTE_170M = 1,
+ AVI_COLORIMETRY_ITU709 = 2,
+ AVI_COLORIMETRY_EXTENDED = 3,
+
+ AVI_CODED_FRAME_ASPECT_NO_DATA = 0,
+ AVI_CODED_FRAME_ASPECT_4_3 = 1,
+ AVI_CODED_FRAME_ASPECT_16_9 = 2,
+
+ ACTIVE_ASPECT_RATE_SAME_AS_CODED_FRAME = 0x08,
+ ACTIVE_ASPECT_RATE_4_3 = 0x09,
+ ACTIVE_ASPECT_RATE_16_9 = 0x0A,
+ ACTIVE_ASPECT_RATE_14_9 = 0x0B,
+};
+
+#define HDMI_HDCP_CTRL 0x52
+#define m_HDMI_DVI BIT(1)
+#define v_HDMI_DVI(n) ((n) << 1)
+
+#define HDMI_INTERRUPT_MASK1 0xc0
+#define HDMI_INTERRUPT_STATUS1 0xc1
+#define m_INT_ACTIVE_VSYNC BIT(5)
+#define m_INT_EDID_READY BIT(2)
+
+#define HDMI_INTERRUPT_MASK2 0xc2
+#define HDMI_INTERRUPT_STATUS2 0xc3
+#define m_INT_HDCP_ERR BIT(7)
+#define m_INT_BKSV_FLAG BIT(6)
+#define m_INT_HDCP_OK BIT(4)
+
+#define HDMI_STATUS 0xc8
+#define m_HOTPLUG BIT(7)
+#define m_MASK_INT_HOTPLUG BIT(5)
+#define m_INT_HOTPLUG BIT(1)
+#define v_MASK_INT_HOTPLUG(n) (((n) & 0x1) << 5)
+
+#define HDMI_COLORBAR 0xc9
+
+#define HDMI_PHY_SYNC 0xce
+#define HDMI_PHY_SYS_CTL 0xe0
+#define m_TMDS_CLK_SOURCE BIT(5)
+#define v_TMDS_FROM_PLL (0 << 5)
+#define v_TMDS_FROM_GEN BIT(5)
+#define m_PHASE_CLK BIT(4)
+#define v_DEFAULT_PHASE (0 << 4)
+#define v_SYNC_PHASE BIT(4)
+#define m_TMDS_CURRENT_PWR BIT(3)
+#define v_TURN_ON_CURRENT (0 << 3)
+#define v_CAT_OFF_CURRENT BIT(3)
+#define m_BANDGAP_PWR BIT(2)
+#define v_BANDGAP_PWR_UP (0 << 2)
+#define v_BANDGAP_PWR_DOWN BIT(2)
+#define m_PLL_PWR BIT(1)
+#define v_PLL_PWR_UP (0 << 1)
+#define v_PLL_PWR_DOWN BIT(1)
+#define m_TMDS_CHG_PWR BIT(0)
+#define v_TMDS_CHG_PWR_UP (0 << 0)
+#define v_TMDS_CHG_PWR_DOWN BIT(0)
+
+#define HDMI_PHY_CHG_PWR 0xe1
+#define v_CLK_CHG_PWR(n) (((n) & 1) << 3)
+#define v_DATA_CHG_PWR(n) (((n) & 7) << 0)
+
+#define HDMI_PHY_DRIVER 0xe2
+#define v_CLK_MAIN_DRIVER(n) ((n) << 4)
+#define v_DATA_MAIN_DRIVER(n) ((n) << 0)
+
+#define HDMI_PHY_PRE_EMPHASIS 0xe3
+#define v_PRE_EMPHASIS(n) (((n) & 7) << 4)
+#define v_CLK_PRE_DRIVER(n) (((n) & 3) << 2)
+#define v_DATA_PRE_DRIVER(n) (((n) & 3) << 0)
+
+#define HDMI_PHY_FEEDBACK_DIV_RATIO_LOW 0xe7
+#define v_FEEDBACK_DIV_LOW(n) ((n) & 0xff)
+#define HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH 0xe8
+#define v_FEEDBACK_DIV_HIGH(n) ((n) & 1)
+
+#define HDMI_PHY_PRE_DIV_RATIO 0xed
+#define v_PRE_DIV_RATIO(n) ((n) & 0x1f)
+
+#define HDMI_CEC_CTRL 0xd0
+#define m_ADJUST_FOR_HISENSE BIT(6)
+#define m_REJECT_RX_BROADCAST BIT(5)
+#define m_BUSFREETIME_ENABLE BIT(2)
+#define m_REJECT_RX BIT(1)
+#define m_START_TX BIT(0)
+
+#define HDMI_CEC_DATA 0xd1
+#define HDMI_CEC_TX_OFFSET 0xd2
+#define HDMI_CEC_RX_OFFSET 0xd3
+#define HDMI_CEC_CLK_H 0xd4
+#define HDMI_CEC_CLK_L 0xd5
+#define HDMI_CEC_TX_LENGTH 0xd6
+#define HDMI_CEC_RX_LENGTH 0xd7
+#define HDMI_CEC_TX_INT_MASK 0xd8
+#define m_TX_DONE BIT(3)
+#define m_TX_NOACK BIT(2)
+#define m_TX_BROADCAST_REJ BIT(1)
+#define m_TX_BUSNOTFREE BIT(0)
+
+#define HDMI_CEC_RX_INT_MASK 0xd9
+#define m_RX_LA_ERR BIT(4)
+#define m_RX_GLITCH BIT(3)
+#define m_RX_DONE BIT(0)
+
+#define HDMI_CEC_TX_INT 0xda
+#define HDMI_CEC_RX_INT 0xdb
+#define HDMI_CEC_BUSFREETIME_L 0xdc
+#define HDMI_CEC_BUSFREETIME_H 0xdd
+#define HDMI_CEC_LOGICADDR 0xde
+
+#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
#define RK3036_GRF_SOC_CON2 0x148
#define RK3036_HDMI_PHSYNC BIT(4)
@@ -255,22 +604,37 @@ static void inno_hdmi_power_up(struct inno_hdmi *hdmi,
inno_hdmi_sys_power(hdmi, true);
};
-static void inno_hdmi_reset(struct inno_hdmi *hdmi)
+static void inno_hdmi_init_hw(struct inno_hdmi *hdmi)
{
u32 val;
u32 msk;
hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_DIGITAL, v_NOT_RST_DIGITAL);
- udelay(100);
+ usleep_range(100, 150);
hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_ANALOG, v_NOT_RST_ANALOG);
- udelay(100);
+ usleep_range(100, 150);
msk = m_REG_CLK_INV | m_REG_CLK_SOURCE | m_POWER | m_INT_POL;
val = v_REG_CLK_INV | v_REG_CLK_SOURCE_SYS | v_PWR_ON | v_INT_POL_HIGH;
hdmi_modb(hdmi, HDMI_SYS_CTRL, msk, val);
inno_hdmi_standby(hdmi);
+
+ /*
+ * When the controller isn't configured to an accurate
+ * video timing and there is no reference clock available,
+ * then the TMDS clock source would be switched to PCLK_HDMI,
+ * so we need to init the TMDS rate to PCLK rate, and
+ * reconfigure the DDC clock.
+ */
+ if (hdmi->refclk)
+ inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->refclk));
+ else
+ inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->pclk));
+
+ /* Unmute hotplug interrupt */
+ hdmi_modb(hdmi, HDMI_STATUS, m_MASK_INT_HOTPLUG, v_MASK_INT_HOTPLUG(1));
}
static int inno_hdmi_disable_frame(struct drm_connector *connector,
@@ -775,8 +1139,7 @@ static int inno_hdmi_i2c_write(struct inno_hdmi *hdmi, struct i2c_msg *msgs)
* we assume that each word write to this i2c adapter
* should be the offset of EDID word address.
*/
- if ((msgs->len != 1) ||
- ((msgs->addr != DDC_ADDR) && (msgs->addr != DDC_SEGMENT_ADDR)))
+ if (msgs->len != 1 || (msgs->addr != DDC_ADDR && msgs->addr != DDC_SEGMENT_ADDR))
return -EINVAL;
reinit_completion(&hdmi->i2c->cmp);
@@ -867,10 +1230,9 @@ static struct i2c_adapter *inno_hdmi_i2c_adapter(struct inno_hdmi *hdmi)
strscpy(adap->name, "Inno HDMI", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
- ret = i2c_add_adapter(adap);
+ ret = devm_i2c_add_adapter(hdmi->dev, adap);
if (ret) {
dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name);
- devm_kfree(hdmi->dev, i2c);
return ERR_PTR(ret);
}
@@ -907,71 +1269,37 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
- hdmi->pclk = devm_clk_get(hdmi->dev, "pclk");
+ hdmi->pclk = devm_clk_get_enabled(hdmi->dev, "pclk");
if (IS_ERR(hdmi->pclk))
return dev_err_probe(dev, PTR_ERR(hdmi->pclk), "Unable to get HDMI pclk\n");
- ret = clk_prepare_enable(hdmi->pclk);
- if (ret)
- return dev_err_probe(dev, ret, "Cannot enable HDMI pclk: %d\n", ret);
-
- hdmi->refclk = devm_clk_get_optional(hdmi->dev, "ref");
- if (IS_ERR(hdmi->refclk)) {
- ret = dev_err_probe(dev, PTR_ERR(hdmi->refclk), "Unable to get HDMI refclk\n");
- goto err_disable_pclk;
- }
-
- ret = clk_prepare_enable(hdmi->refclk);
- if (ret) {
- ret = dev_err_probe(dev, ret, "Cannot enable HDMI refclk: %d\n", ret);
- goto err_disable_pclk;
- }
+ hdmi->refclk = devm_clk_get_optional_enabled(hdmi->dev, "ref");
+ if (IS_ERR(hdmi->refclk))
+ return dev_err_probe(dev, PTR_ERR(hdmi->refclk), "Unable to get HDMI refclk\n");
if (hdmi->variant->dev_type == RK3036_HDMI) {
hdmi->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
- if (IS_ERR(hdmi->grf)) {
- ret = dev_err_probe(dev, PTR_ERR(hdmi->grf),
- "Unable to get rockchip,grf\n");
- goto err_disable_clk;
- }
+ if (IS_ERR(hdmi->grf))
+ return dev_err_probe(dev,
+ PTR_ERR(hdmi->grf), "Unable to get rockchip,grf\n");
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err_disable_clk;
- }
+ if (irq < 0)
+ return irq;
- inno_hdmi_reset(hdmi);
+ inno_hdmi_init_hw(hdmi);
hdmi->ddc = inno_hdmi_i2c_adapter(hdmi);
- if (IS_ERR(hdmi->ddc)) {
- ret = PTR_ERR(hdmi->ddc);
- hdmi->ddc = NULL;
- goto err_disable_clk;
- }
-
- /*
- * When the controller isn't configured to an accurate
- * video timing and there is no reference clock available,
- * then the TMDS clock source would be switched to PCLK_HDMI,
- * so we need to init the TMDS rate to PCLK rate, and
- * reconfigure the DDC clock.
- */
- if (hdmi->refclk)
- inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->refclk));
- else
- inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->pclk));
+ if (IS_ERR(hdmi->ddc))
+ return PTR_ERR(hdmi->ddc);
ret = inno_hdmi_register(drm, hdmi);
if (ret)
- goto err_put_adapter;
+ return ret;
dev_set_drvdata(dev, hdmi);
- /* Unmute hotplug interrupt */
- hdmi_modb(hdmi, HDMI_STATUS, m_MASK_INT_HOTPLUG, v_MASK_INT_HOTPLUG(1));
-
ret = devm_request_threaded_irq(dev, irq, inno_hdmi_hardirq,
inno_hdmi_irq, IRQF_SHARED,
dev_name(dev), hdmi);
@@ -982,12 +1310,6 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
err_cleanup_hdmi:
hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
-err_put_adapter:
- i2c_put_adapter(hdmi->ddc);
-err_disable_clk:
- clk_disable_unprepare(hdmi->refclk);
-err_disable_pclk:
- clk_disable_unprepare(hdmi->pclk);
return ret;
}
@@ -998,10 +1320,6 @@ static void inno_hdmi_unbind(struct device *dev, struct device *master,
hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
-
- i2c_put_adapter(hdmi->ddc);
- clk_disable_unprepare(hdmi->refclk);
- clk_disable_unprepare(hdmi->pclk);
}
static const struct component_ops inno_hdmi_ops = {
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.h b/drivers/gpu/drm/rockchip/inno_hdmi.h
deleted file mode 100644
index 8b7ef3fac485..000000000000
--- a/drivers/gpu/drm/rockchip/inno_hdmi.h
+++ /dev/null
@@ -1,349 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) Rockchip Electronics Co., Ltd.
- * Zheng Yang <zhengyang@rock-chips.com>
- * Yakir Yang <ykk@rock-chips.com>
- */
-
-#ifndef __INNO_HDMI_H__
-#define __INNO_HDMI_H__
-
-#define DDC_SEGMENT_ADDR 0x30
-
-#define HDMI_SCL_RATE (100*1000)
-#define DDC_BUS_FREQ_L 0x4b
-#define DDC_BUS_FREQ_H 0x4c
-
-#define HDMI_SYS_CTRL 0x00
-#define m_RST_ANALOG (1 << 6)
-#define v_RST_ANALOG (0 << 6)
-#define v_NOT_RST_ANALOG (1 << 6)
-#define m_RST_DIGITAL (1 << 5)
-#define v_RST_DIGITAL (0 << 5)
-#define v_NOT_RST_DIGITAL (1 << 5)
-#define m_REG_CLK_INV (1 << 4)
-#define v_REG_CLK_NOT_INV (0 << 4)
-#define v_REG_CLK_INV (1 << 4)
-#define m_VCLK_INV (1 << 3)
-#define v_VCLK_NOT_INV (0 << 3)
-#define v_VCLK_INV (1 << 3)
-#define m_REG_CLK_SOURCE (1 << 2)
-#define v_REG_CLK_SOURCE_TMDS (0 << 2)
-#define v_REG_CLK_SOURCE_SYS (1 << 2)
-#define m_POWER (1 << 1)
-#define v_PWR_ON (0 << 1)
-#define v_PWR_OFF (1 << 1)
-#define m_INT_POL (1 << 0)
-#define v_INT_POL_HIGH 1
-#define v_INT_POL_LOW 0
-
-#define HDMI_VIDEO_CONTRL1 0x01
-#define m_VIDEO_INPUT_FORMAT (7 << 1)
-#define m_DE_SOURCE (1 << 0)
-#define v_VIDEO_INPUT_FORMAT(n) (n << 1)
-#define v_DE_EXTERNAL 1
-#define v_DE_INTERNAL 0
-enum {
- VIDEO_INPUT_SDR_RGB444 = 0,
- VIDEO_INPUT_DDR_RGB444 = 5,
- VIDEO_INPUT_DDR_YCBCR422 = 6
-};
-
-#define HDMI_VIDEO_CONTRL2 0x02
-#define m_VIDEO_OUTPUT_COLOR (3 << 6)
-#define m_VIDEO_INPUT_BITS (3 << 4)
-#define m_VIDEO_INPUT_CSP (1 << 0)
-#define v_VIDEO_OUTPUT_COLOR(n) (((n) & 0x3) << 6)
-#define v_VIDEO_INPUT_BITS(n) (n << 4)
-#define v_VIDEO_INPUT_CSP(n) (n << 0)
-enum {
- VIDEO_INPUT_12BITS = 0,
- VIDEO_INPUT_10BITS = 1,
- VIDEO_INPUT_REVERT = 2,
- VIDEO_INPUT_8BITS = 3,
-};
-
-#define HDMI_VIDEO_CONTRL 0x03
-#define m_VIDEO_AUTO_CSC (1 << 7)
-#define v_VIDEO_AUTO_CSC(n) (n << 7)
-#define m_VIDEO_C0_C2_SWAP (1 << 0)
-#define v_VIDEO_C0_C2_SWAP(n) (n << 0)
-enum {
- C0_C2_CHANGE_ENABLE = 0,
- C0_C2_CHANGE_DISABLE = 1,
- AUTO_CSC_DISABLE = 0,
- AUTO_CSC_ENABLE = 1,
-};
-
-#define HDMI_VIDEO_CONTRL3 0x04
-#define m_COLOR_DEPTH_NOT_INDICATED (1 << 4)
-#define m_SOF (1 << 3)
-#define m_COLOR_RANGE (1 << 2)
-#define m_CSC (1 << 0)
-#define v_COLOR_DEPTH_NOT_INDICATED(n) ((n) << 4)
-#define v_SOF_ENABLE (0 << 3)
-#define v_SOF_DISABLE (1 << 3)
-#define v_COLOR_RANGE_FULL (1 << 2)
-#define v_COLOR_RANGE_LIMITED (0 << 2)
-#define v_CSC_ENABLE 1
-#define v_CSC_DISABLE 0
-
-#define HDMI_AV_MUTE 0x05
-#define m_AVMUTE_CLEAR (1 << 7)
-#define m_AVMUTE_ENABLE (1 << 6)
-#define m_AUDIO_MUTE (1 << 1)
-#define m_VIDEO_BLACK (1 << 0)
-#define v_AVMUTE_CLEAR(n) (n << 7)
-#define v_AVMUTE_ENABLE(n) (n << 6)
-#define v_AUDIO_MUTE(n) (n << 1)
-#define v_VIDEO_MUTE(n) (n << 0)
-
-#define HDMI_VIDEO_TIMING_CTL 0x08
-#define v_HSYNC_POLARITY(n) (n << 3)
-#define v_VSYNC_POLARITY(n) (n << 2)
-#define v_INETLACE(n) (n << 1)
-#define v_EXTERANL_VIDEO(n) (n << 0)
-
-#define HDMI_VIDEO_EXT_HTOTAL_L 0x09
-#define HDMI_VIDEO_EXT_HTOTAL_H 0x0a
-#define HDMI_VIDEO_EXT_HBLANK_L 0x0b
-#define HDMI_VIDEO_EXT_HBLANK_H 0x0c
-#define HDMI_VIDEO_EXT_HDELAY_L 0x0d
-#define HDMI_VIDEO_EXT_HDELAY_H 0x0e
-#define HDMI_VIDEO_EXT_HDURATION_L 0x0f
-#define HDMI_VIDEO_EXT_HDURATION_H 0x10
-#define HDMI_VIDEO_EXT_VTOTAL_L 0x11
-#define HDMI_VIDEO_EXT_VTOTAL_H 0x12
-#define HDMI_VIDEO_EXT_VBLANK 0x13
-#define HDMI_VIDEO_EXT_VDELAY 0x14
-#define HDMI_VIDEO_EXT_VDURATION 0x15
-
-#define HDMI_VIDEO_CSC_COEF 0x18
-
-#define HDMI_AUDIO_CTRL1 0x35
-enum {
- CTS_SOURCE_INTERNAL = 0,
- CTS_SOURCE_EXTERNAL = 1,
-};
-#define v_CTS_SOURCE(n) (n << 7)
-
-enum {
- DOWNSAMPLE_DISABLE = 0,
- DOWNSAMPLE_1_2 = 1,
- DOWNSAMPLE_1_4 = 2,
-};
-#define v_DOWN_SAMPLE(n) (n << 5)
-
-enum {
- AUDIO_SOURCE_IIS = 0,
- AUDIO_SOURCE_SPDIF = 1,
-};
-#define v_AUDIO_SOURCE(n) (n << 3)
-
-#define v_MCLK_ENABLE(n) (n << 2)
-enum {
- MCLK_128FS = 0,
- MCLK_256FS = 1,
- MCLK_384FS = 2,
- MCLK_512FS = 3,
-};
-#define v_MCLK_RATIO(n) (n)
-
-#define AUDIO_SAMPLE_RATE 0x37
-enum {
- AUDIO_32K = 0x3,
- AUDIO_441K = 0x0,
- AUDIO_48K = 0x2,
- AUDIO_882K = 0x8,
- AUDIO_96K = 0xa,
- AUDIO_1764K = 0xc,
- AUDIO_192K = 0xe,
-};
-
-#define AUDIO_I2S_MODE 0x38
-enum {
- I2S_CHANNEL_1_2 = 1,
- I2S_CHANNEL_3_4 = 3,
- I2S_CHANNEL_5_6 = 7,
- I2S_CHANNEL_7_8 = 0xf
-};
-#define v_I2S_CHANNEL(n) ((n) << 2)
-enum {
- I2S_STANDARD = 0,
- I2S_LEFT_JUSTIFIED = 1,
- I2S_RIGHT_JUSTIFIED = 2,
-};
-#define v_I2S_MODE(n) (n)
-
-#define AUDIO_I2S_MAP 0x39
-#define AUDIO_I2S_SWAPS_SPDIF 0x3a
-#define v_SPIDF_FREQ(n) (n)
-
-#define N_32K 0x1000
-#define N_441K 0x1880
-#define N_882K 0x3100
-#define N_1764K 0x6200
-#define N_48K 0x1800
-#define N_96K 0x3000
-#define N_192K 0x6000
-
-#define HDMI_AUDIO_CHANNEL_STATUS 0x3e
-#define m_AUDIO_STATUS_NLPCM (1 << 7)
-#define m_AUDIO_STATUS_USE (1 << 6)
-#define m_AUDIO_STATUS_COPYRIGHT (1 << 5)
-#define m_AUDIO_STATUS_ADDITION (3 << 2)
-#define m_AUDIO_STATUS_CLK_ACCURACY (2 << 0)
-#define v_AUDIO_STATUS_NLPCM(n) ((n & 1) << 7)
-#define AUDIO_N_H 0x3f
-#define AUDIO_N_M 0x40
-#define AUDIO_N_L 0x41
-
-#define HDMI_AUDIO_CTS_H 0x45
-#define HDMI_AUDIO_CTS_M 0x46
-#define HDMI_AUDIO_CTS_L 0x47
-
-#define HDMI_DDC_CLK_L 0x4b
-#define HDMI_DDC_CLK_H 0x4c
-
-#define HDMI_EDID_SEGMENT_POINTER 0x4d
-#define HDMI_EDID_WORD_ADDR 0x4e
-#define HDMI_EDID_FIFO_OFFSET 0x4f
-#define HDMI_EDID_FIFO_ADDR 0x50
-
-#define HDMI_PACKET_SEND_MANUAL 0x9c
-#define HDMI_PACKET_SEND_AUTO 0x9d
-#define m_PACKET_GCP_EN (1 << 7)
-#define m_PACKET_MSI_EN (1 << 6)
-#define m_PACKET_SDI_EN (1 << 5)
-#define m_PACKET_VSI_EN (1 << 4)
-#define v_PACKET_GCP_EN(n) ((n & 1) << 7)
-#define v_PACKET_MSI_EN(n) ((n & 1) << 6)
-#define v_PACKET_SDI_EN(n) ((n & 1) << 5)
-#define v_PACKET_VSI_EN(n) ((n & 1) << 4)
-
-#define HDMI_CONTROL_PACKET_BUF_INDEX 0x9f
-enum {
- INFOFRAME_VSI = 0x05,
- INFOFRAME_AVI = 0x06,
- INFOFRAME_AAI = 0x08,
-};
-
-#define HDMI_CONTROL_PACKET_ADDR 0xa0
-#define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11
-enum {
- AVI_COLOR_MODE_RGB = 0,
- AVI_COLOR_MODE_YCBCR422 = 1,
- AVI_COLOR_MODE_YCBCR444 = 2,
- AVI_COLORIMETRY_NO_DATA = 0,
-
- AVI_COLORIMETRY_SMPTE_170M = 1,
- AVI_COLORIMETRY_ITU709 = 2,
- AVI_COLORIMETRY_EXTENDED = 3,
-
- AVI_CODED_FRAME_ASPECT_NO_DATA = 0,
- AVI_CODED_FRAME_ASPECT_4_3 = 1,
- AVI_CODED_FRAME_ASPECT_16_9 = 2,
-
- ACTIVE_ASPECT_RATE_SAME_AS_CODED_FRAME = 0x08,
- ACTIVE_ASPECT_RATE_4_3 = 0x09,
- ACTIVE_ASPECT_RATE_16_9 = 0x0A,
- ACTIVE_ASPECT_RATE_14_9 = 0x0B,
-};
-
-#define HDMI_HDCP_CTRL 0x52
-#define m_HDMI_DVI (1 << 1)
-#define v_HDMI_DVI(n) (n << 1)
-
-#define HDMI_INTERRUPT_MASK1 0xc0
-#define HDMI_INTERRUPT_STATUS1 0xc1
-#define m_INT_ACTIVE_VSYNC (1 << 5)
-#define m_INT_EDID_READY (1 << 2)
-
-#define HDMI_INTERRUPT_MASK2 0xc2
-#define HDMI_INTERRUPT_STATUS2 0xc3
-#define m_INT_HDCP_ERR (1 << 7)
-#define m_INT_BKSV_FLAG (1 << 6)
-#define m_INT_HDCP_OK (1 << 4)
-
-#define HDMI_STATUS 0xc8
-#define m_HOTPLUG (1 << 7)
-#define m_MASK_INT_HOTPLUG (1 << 5)
-#define m_INT_HOTPLUG (1 << 1)
-#define v_MASK_INT_HOTPLUG(n) ((n & 0x1) << 5)
-
-#define HDMI_COLORBAR 0xc9
-
-#define HDMI_PHY_SYNC 0xce
-#define HDMI_PHY_SYS_CTL 0xe0
-#define m_TMDS_CLK_SOURCE (1 << 5)
-#define v_TMDS_FROM_PLL (0 << 5)
-#define v_TMDS_FROM_GEN (1 << 5)
-#define m_PHASE_CLK (1 << 4)
-#define v_DEFAULT_PHASE (0 << 4)
-#define v_SYNC_PHASE (1 << 4)
-#define m_TMDS_CURRENT_PWR (1 << 3)
-#define v_TURN_ON_CURRENT (0 << 3)
-#define v_CAT_OFF_CURRENT (1 << 3)
-#define m_BANDGAP_PWR (1 << 2)
-#define v_BANDGAP_PWR_UP (0 << 2)
-#define v_BANDGAP_PWR_DOWN (1 << 2)
-#define m_PLL_PWR (1 << 1)
-#define v_PLL_PWR_UP (0 << 1)
-#define v_PLL_PWR_DOWN (1 << 1)
-#define m_TMDS_CHG_PWR (1 << 0)
-#define v_TMDS_CHG_PWR_UP (0 << 0)
-#define v_TMDS_CHG_PWR_DOWN (1 << 0)
-
-#define HDMI_PHY_CHG_PWR 0xe1
-#define v_CLK_CHG_PWR(n) ((n & 1) << 3)
-#define v_DATA_CHG_PWR(n) ((n & 7) << 0)
-
-#define HDMI_PHY_DRIVER 0xe2
-#define v_CLK_MAIN_DRIVER(n) (n << 4)
-#define v_DATA_MAIN_DRIVER(n) (n << 0)
-
-#define HDMI_PHY_PRE_EMPHASIS 0xe3
-#define v_PRE_EMPHASIS(n) ((n & 7) << 4)
-#define v_CLK_PRE_DRIVER(n) ((n & 3) << 2)
-#define v_DATA_PRE_DRIVER(n) ((n & 3) << 0)
-
-#define HDMI_PHY_FEEDBACK_DIV_RATIO_LOW 0xe7
-#define v_FEEDBACK_DIV_LOW(n) (n & 0xff)
-#define HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH 0xe8
-#define v_FEEDBACK_DIV_HIGH(n) (n & 1)
-
-#define HDMI_PHY_PRE_DIV_RATIO 0xed
-#define v_PRE_DIV_RATIO(n) (n & 0x1f)
-
-#define HDMI_CEC_CTRL 0xd0
-#define m_ADJUST_FOR_HISENSE (1 << 6)
-#define m_REJECT_RX_BROADCAST (1 << 5)
-#define m_BUSFREETIME_ENABLE (1 << 2)
-#define m_REJECT_RX (1 << 1)
-#define m_START_TX (1 << 0)
-
-#define HDMI_CEC_DATA 0xd1
-#define HDMI_CEC_TX_OFFSET 0xd2
-#define HDMI_CEC_RX_OFFSET 0xd3
-#define HDMI_CEC_CLK_H 0xd4
-#define HDMI_CEC_CLK_L 0xd5
-#define HDMI_CEC_TX_LENGTH 0xd6
-#define HDMI_CEC_RX_LENGTH 0xd7
-#define HDMI_CEC_TX_INT_MASK 0xd8
-#define m_TX_DONE (1 << 3)
-#define m_TX_NOACK (1 << 2)
-#define m_TX_BROADCAST_REJ (1 << 1)
-#define m_TX_BUSNOTFREE (1 << 0)
-
-#define HDMI_CEC_RX_INT_MASK 0xd9
-#define m_RX_LA_ERR (1 << 4)
-#define m_RX_GLITCH (1 << 3)
-#define m_RX_DONE (1 << 0)
-
-#define HDMI_CEC_TX_INT 0xda
-#define HDMI_CEC_RX_INT 0xdb
-#define HDMI_CEC_BUSFREETIME_L 0xdc
-#define HDMI_CEC_BUSFREETIME_H 0xdd
-#define HDMI_CEC_LOGICADDR 0xde
-
-#endif /* __INNO_HDMI_H__ */
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index e7875b52f298..ae4a5ac2299a 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -450,7 +450,7 @@ struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
};
static enum drm_connector_status
-rk3066_hdmi_bridge_detect(struct drm_bridge *bridge)
+rk3066_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index dcc1f07632c3..2f469d370021 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -30,21 +30,18 @@ static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers =
static struct drm_framebuffer *
rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_afbc_framebuffer *afbc_fb;
- const struct drm_format_info *info;
int ret;
- info = drm_get_format_info(dev, mode_cmd);
- if (!info)
- return ERR_PTR(-ENOMEM);
-
afbc_fb = kzalloc(sizeof(*afbc_fb), GFP_KERNEL);
if (!afbc_fb)
return ERR_PTR(-ENOMEM);
- ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base, file, mode_cmd,
+ ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base,
+ file, info, mode_cmd,
&rockchip_drm_fb_funcs);
if (ret) {
kfree(afbc_fb);
@@ -52,16 +49,9 @@ rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
}
if (drm_is_afbc(mode_cmd->modifier[0])) {
- int ret, i;
-
- ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb);
+ ret = drm_gem_fb_afbc_init(dev, info, mode_cmd, afbc_fb);
if (ret) {
- struct drm_gem_object **obj = afbc_fb->base.obj;
-
- for (i = 0; i < info->num_planes; ++i)
- drm_gem_object_put(obj[i]);
-
- kfree(afbc_fb);
+ drm_framebuffer_put(&afbc_fb->base);
return ERR_PTR(ret);
}
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index d0f5fea15e21..186f6452a7d3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -146,25 +146,6 @@ static void vop2_unlock(struct vop2 *vop2)
mutex_unlock(&vop2->vop2_lock);
}
-/*
- * Note:
- * The write mask function is documented but missing on rk3566/8, writes
- * to these bits have no effect. For newer soc(rk3588 and following) the
- * write mask is needed for register writes.
- *
- * GLB_CFG_DONE_EN has no write mask bit.
- *
- */
-static void vop2_cfg_done(struct vop2_video_port *vp)
-{
- struct vop2 *vop2 = vp->vop2;
- u32 val = RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN;
-
- val |= BIT(vp->id) | (BIT(vp->id) << 16);
-
- regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE, val);
-}
-
static void vop2_win_disable(struct vop2_win *win)
{
vop2_win_write(win, VOP2_WIN_ENABLE, 0);
@@ -854,6 +835,11 @@ static void vop2_enable(struct vop2 *vop2)
if (vop2->version == VOP_VERSION_RK3588)
rk3588_vop2_power_domain_enable_all(vop2);
+ if (vop2->version <= VOP_VERSION_RK3588) {
+ vop2->old_layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+ vop2->old_port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL);
+ }
+
vop2_writel(vop2, RK3568_REG_CFG_DONE, RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN);
/*
@@ -2422,6 +2408,10 @@ static int vop2_create_crtcs(struct vop2 *vop2)
break;
}
}
+
+ if (!vp->primary_plane)
+ return dev_err_probe(drm->dev, -ENOENT,
+ "no primary plane for vp %d\n", i);
}
/* Register all unused window as overlay plane */
@@ -2724,6 +2714,7 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
return dev_err_probe(drm->dev, vop2->irq, "cannot find irq for vop2\n");
mutex_init(&vop2->vop2_lock);
+ mutex_init(&vop2->ovl_lock);
ret = devm_request_irq(dev, vop2->irq, vop2_isr, IRQF_SHARED, dev_name(dev), vop2);
if (ret)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index fc3ecb9fcd95..fa5c56f16047 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -334,6 +334,19 @@ struct vop2 {
/* optional internal rgb encoder */
struct rockchip_rgb *rgb;
+ /*
+ * Used to record layer selection configuration on rk356x/rk3588
+ * as register RK3568_OVL_LAYER_SEL and RK3568_OVL_PORT_SEL are
+ * shared for all the Video Ports.
+ */
+ u32 old_layer_sel;
+ u32 old_port_sel;
+ /*
+ * Ensure that the updates to these two registers(RKK3568_OVL_LAYER_SEL/RK3568_OVL_PORT_SEL)
+ * take effect in sequence.
+ */
+ struct mutex ovl_lock;
+
/* must be put at the end of the struct */
struct vop2_win win[];
};
@@ -727,6 +740,7 @@ enum dst_factor_mode {
#define RK3588_OVL_PORT_SEL__CLUSTER2 GENMASK(21, 20)
#define RK3568_OVL_PORT_SEL__CLUSTER1 GENMASK(19, 18)
#define RK3568_OVL_PORT_SEL__CLUSTER0 GENMASK(17, 16)
+#define RK3588_OVL_PORT_SET__PORT3_MUX GENMASK(15, 12)
#define RK3568_OVL_PORT_SET__PORT2_MUX GENMASK(11, 8)
#define RK3568_OVL_PORT_SET__PORT1_MUX GENMASK(7, 4)
#define RK3568_OVL_PORT_SET__PORT0_MUX GENMASK(3, 0)
@@ -831,4 +845,23 @@ static inline struct vop2_win *to_vop2_win(struct drm_plane *p)
return container_of(p, struct vop2_win, base);
}
+/*
+ * Note:
+ * The write mask function is documented but missing on rk3566/8, writes
+ * to these bits have no effect. For newer soc(rk3588 and following) the
+ * write mask is needed for register writes.
+ *
+ * GLB_CFG_DONE_EN has no write mask bit.
+ *
+ */
+static inline void vop2_cfg_done(struct vop2_video_port *vp)
+{
+ struct vop2 *vop2 = vp->vop2;
+ u32 val = RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN;
+
+ val |= BIT(vp->id) | (BIT(vp->id) << 16);
+
+ regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE, val);
+}
+
#endif /* _ROCKCHIP_DRM_VOP2_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index a673779de3d2..2411260db51d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -56,14 +56,13 @@ struct rockchip_lvds {
struct drm_device *drm_dev;
struct drm_panel *panel;
struct drm_bridge *bridge;
- struct drm_connector connector;
struct rockchip_encoder encoder;
struct dev_pin_info *pins;
};
-static inline struct rockchip_lvds *connector_to_lvds(struct drm_connector *connector)
+static inline struct rockchip_lvds *brige_to_lvds(struct drm_bridge *bridge)
{
- return container_of(connector, struct rockchip_lvds, connector);
+ return (struct rockchip_lvds *)bridge->driver_private;
}
static inline struct rockchip_lvds *encoder_to_lvds(struct drm_encoder *encoder)
@@ -106,25 +105,21 @@ static inline int rockchip_lvds_name_to_output(const char *s)
return -EINVAL;
}
-static const struct drm_connector_funcs rockchip_lvds_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int rockchip_lvds_connector_get_modes(struct drm_connector *connector)
+static int
+rockchip_lvds_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector)
{
- struct rockchip_lvds *lvds = connector_to_lvds(connector);
+ struct rockchip_lvds *lvds = brige_to_lvds(bridge);
struct drm_panel *panel = lvds->panel;
return drm_panel_get_modes(panel, connector);
}
static const
-struct drm_connector_helper_funcs rockchip_lvds_connector_helper_funcs = {
- .get_modes = rockchip_lvds_connector_get_modes,
+struct drm_bridge_funcs rockchip_lvds_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .get_modes = rockchip_lvds_bridge_get_modes,
};
static int
@@ -606,26 +601,23 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
}
drm_encoder_helper_add(encoder, lvds->soc_data->helper_funcs);
- connector = &lvds->connector;
if (lvds->panel) {
- connector->dpms = DRM_MODE_DPMS_OFF;
- ret = drm_connector_init(drm_dev, connector,
- &rockchip_lvds_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- if (ret < 0) {
- drm_err(drm_dev,
- "failed to initialize connector: %d\n", ret);
+ lvds->bridge = drm_panel_bridge_add_typed(lvds->panel, DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(lvds->bridge)) {
+ ret = PTR_ERR(lvds->bridge);
goto err_free_encoder;
}
+ }
- drm_connector_helper_add(connector,
- &rockchip_lvds_connector_helper_funcs);
- } else {
- ret = drm_bridge_attach(encoder, lvds->bridge, NULL,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (lvds->bridge) {
+ lvds->bridge->driver_private = lvds;
+ lvds->bridge->ops = DRM_BRIDGE_OP_MODES;
+ lvds->bridge->funcs = &rockchip_lvds_bridge_funcs;
+
+ ret = drm_bridge_attach(encoder, lvds->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
- goto err_free_encoder;
+ goto err_free_bridge;
connector = drm_bridge_connector_init(lvds->drm_dev, encoder);
if (IS_ERR(connector)) {
@@ -633,14 +625,14 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
"failed to initialize bridge connector: %pe\n",
connector);
ret = PTR_ERR(connector);
- goto err_free_encoder;
+ goto err_free_bridge;
}
- }
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret < 0) {
- drm_err(drm_dev, "failed to attach encoder: %d\n", ret);
- goto err_free_connector;
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0) {
+ drm_err(drm_dev, "failed to attach encoder: %d\n", ret);
+ goto err_free_bridge;
+ }
}
pm_runtime_enable(dev);
@@ -649,8 +641,8 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
return 0;
-err_free_connector:
- drm_connector_cleanup(connector);
+err_free_bridge:
+ drm_panel_bridge_remove(lvds->bridge);
err_free_encoder:
drm_encoder_cleanup(encoder);
err_put_remote:
@@ -670,8 +662,6 @@ static void rockchip_lvds_unbind(struct device *dev, struct device *master,
encoder_funcs = lvds->soc_data->helper_funcs;
encoder_funcs->disable(&lvds->encoder.encoder);
pm_runtime_disable(dev);
- drm_connector_cleanup(&lvds->connector);
- drm_encoder_cleanup(&lvds->encoder.encoder);
}
static const struct component_ops rockchip_lvds_component_ops = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 32c4ed685739..45c5e3987813 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -2052,12 +2052,55 @@ static void vop2_setup_alpha(struct vop2_video_port *vp)
}
}
+static u32 rk3568_vop2_read_port_mux(struct vop2 *vop2)
+{
+ return vop2_readl(vop2, RK3568_OVL_PORT_SEL);
+}
+
+static void rk3568_vop2_wait_for_port_mux_done(struct vop2 *vop2)
+{
+ u32 port_mux_sel;
+ int ret;
+
+ /*
+ * Spin until the previous port_mux figuration is done.
+ */
+ ret = readx_poll_timeout_atomic(rk3568_vop2_read_port_mux, vop2, port_mux_sel,
+ port_mux_sel == vop2->old_port_sel, 0, 50 * 1000);
+ if (ret)
+ DRM_DEV_ERROR(vop2->dev, "wait port_mux done timeout: 0x%x--0x%x\n",
+ port_mux_sel, vop2->old_port_sel);
+}
+
+static u32 rk3568_vop2_read_layer_cfg(struct vop2 *vop2)
+{
+ return vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+}
+
+static void rk3568_vop2_wait_for_layer_cfg_done(struct vop2 *vop2, u32 cfg)
+{
+ u32 atv_layer_cfg;
+ int ret;
+
+ /*
+ * Spin until the previous layer configuration is done.
+ */
+ ret = readx_poll_timeout_atomic(rk3568_vop2_read_layer_cfg, vop2, atv_layer_cfg,
+ atv_layer_cfg == cfg, 0, 50 * 1000);
+ if (ret)
+ DRM_DEV_ERROR(vop2->dev, "wait layer cfg done timeout: 0x%x--0x%x\n",
+ atv_layer_cfg, cfg);
+}
+
static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
{
struct vop2 *vop2 = vp->vop2;
struct drm_plane *plane;
u32 layer_sel = 0;
u32 port_sel;
+ u32 old_layer_sel = 0;
+ u32 atv_layer_sel = 0;
+ u32 old_port_sel = 0;
u8 layer_id;
u8 old_layer_id;
u8 layer_sel_id;
@@ -2069,19 +2112,18 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
struct vop2_video_port *vp2 = &vop2->vps[2];
struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state);
+ mutex_lock(&vop2->ovl_lock);
ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL);
ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL;
- ovl_ctrl |= FIELD_PREP(RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL, vp->id);
if (vcstate->yuv_overlay)
ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id);
else
ovl_ctrl &= ~RK3568_OVL_CTRL__YUV_MODE(vp->id);
- vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl);
-
- port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL);
+ old_port_sel = vop2->old_port_sel;
+ port_sel = old_port_sel;
port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT;
if (vp0->nlayers)
@@ -2102,7 +2144,13 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
else
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8);
- layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+ /* Fixed value for rk3588 */
+ if (vop2->version == VOP_VERSION_RK3588)
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SET__PORT3_MUX, 7);
+
+ atv_layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+ old_layer_sel = vop2->old_layer_sel;
+ layer_sel = old_layer_sel;
ofs = 0;
for (i = 0; i < vp->id; i++)
@@ -2186,8 +2234,37 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
old_win->data->layer_sel_id[vp->id]);
}
+ vop2->old_layer_sel = layer_sel;
+ vop2->old_port_sel = port_sel;
+ /*
+ * As the RK3568_OVL_LAYER_SEL and RK3568_OVL_PORT_SEL are shared by all Video Ports,
+ * and the configuration take effect by one Video Port's vsync.
+ * When performing layer migration or change the zpos of layers, there are two things
+ * to be observed and followed:
+ * 1. When a layer is migrated from one VP to another, the configuration of the layer
+ * can only take effect after the Port mux configuration is enabled.
+ *
+ * 2. When we change the zpos of layers, we must ensure that the change for the previous
+ * VP takes effect before we proceed to change the next VP. Otherwise, the new
+ * configuration might overwrite the previous one for the previous VP, or it could
+ * lead to the configuration of the previous VP being take effect along with the VSYNC
+ * of the new VP.
+ */
+ if (layer_sel != old_layer_sel || port_sel != old_port_sel)
+ ovl_ctrl |= FIELD_PREP(RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL, vp->id);
+ vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl);
+
+ if (port_sel != old_port_sel) {
+ vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel);
+ vop2_cfg_done(vp);
+ rk3568_vop2_wait_for_port_mux_done(vop2);
+ }
+
+ if (layer_sel != old_layer_sel && atv_layer_sel != old_layer_sel)
+ rk3568_vop2_wait_for_layer_cfg_done(vop2, vop2->old_layer_sel);
+
vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
- vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel);
+ mutex_unlock(&vop2->ovl_lock);
}
static void rk3568_vop2_setup_dly_for_windows(struct vop2_video_port *vp)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index f56e77e7f6d0..261713dd7d5a 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -32,78 +32,123 @@
#define TRACE_SYSTEM gpu_scheduler
#define TRACE_INCLUDE_FILE gpu_scheduler_trace
+/**
+ * DOC: uAPI trace events
+ *
+ * ``drm_sched_job_queue``, ``drm_sched_job_run``, ``drm_sched_job_add_dep``,
+ * ``drm_sched_job_done`` and ``drm_sched_job_unschedulable`` are considered
+ * stable uAPI.
+ *
+ * Common trace events attributes:
+ *
+ * * ``dev`` - the dev_name() of the device running the job.
+ *
+ * * ``ring`` - the hardware ring running the job. Together with ``dev`` it
+ * uniquely identifies where the job is going to be executed.
+ *
+ * * ``fence`` - the &struct dma_fence.context and the &struct dma_fence.seqno of
+ * &struct drm_sched_fence.finished
+ *
+ * All the events depends on drm_sched_job_arm() having been called already for
+ * the job because they use &struct drm_sched_job.sched or
+ * &struct drm_sched_job.s_fence.
+ */
+
DECLARE_EVENT_CLASS(drm_sched_job,
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity),
TP_STRUCT__entry(
- __field(struct drm_sched_entity *, entity)
- __field(struct dma_fence *, fence)
__string(name, sched_job->sched->name)
- __field(uint64_t, id)
__field(u32, job_count)
__field(int, hw_job_count)
+ __string(dev, dev_name(sched_job->sched->dev))
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
+ __field(u64, client_id)
),
TP_fast_assign(
- __entry->entity = entity;
- __entry->id = sched_job->id;
- __entry->fence = &sched_job->s_fence->finished;
__assign_str(name);
__entry->job_count = spsc_queue_count(&entity->job_queue);
__entry->hw_job_count = atomic_read(
&sched_job->sched->credit_count);
+ __assign_str(dev);
+ __entry->fence_context = sched_job->s_fence->finished.context;
+ __entry->fence_seqno = sched_job->s_fence->finished.seqno;
+ __entry->client_id = sched_job->s_fence->drm_client_id;
),
- TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
- __entry->entity, __entry->id,
- __entry->fence, __get_str(name),
- __entry->job_count, __entry->hw_job_count)
+ TP_printk("dev=%s, fence=%llu:%llu, ring=%s, job count:%u, hw job count:%d, client_id:%llu",
+ __get_str(dev),
+ __entry->fence_context, __entry->fence_seqno, __get_str(name),
+ __entry->job_count, __entry->hw_job_count, __entry->client_id)
);
-DEFINE_EVENT(drm_sched_job, drm_sched_job,
+DEFINE_EVENT(drm_sched_job, drm_sched_job_queue,
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity)
);
-DEFINE_EVENT(drm_sched_job, drm_run_job,
+DEFINE_EVENT(drm_sched_job, drm_sched_job_run,
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity)
);
-TRACE_EVENT(drm_sched_process_job,
+TRACE_EVENT(drm_sched_job_done,
TP_PROTO(struct drm_sched_fence *fence),
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(struct dma_fence *, fence)
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
),
TP_fast_assign(
- __entry->fence = &fence->finished;
+ __entry->fence_context = fence->finished.context;
+ __entry->fence_seqno = fence->finished.seqno;
),
- TP_printk("fence=%p signaled", __entry->fence)
+ TP_printk("fence=%llu:%llu signaled",
+ __entry->fence_context, __entry->fence_seqno)
);
-TRACE_EVENT(drm_sched_job_wait_dep,
+TRACE_EVENT(drm_sched_job_add_dep,
+ TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
+ TP_ARGS(sched_job, fence),
+ TP_STRUCT__entry(
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
+ __field(u64, ctx)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->fence_context = sched_job->s_fence->finished.context;
+ __entry->fence_seqno = sched_job->s_fence->finished.seqno;
+ __entry->ctx = fence->context;
+ __entry->seqno = fence->seqno;
+ ),
+ TP_printk("fence=%llu:%llu depends on fence=%llu:%llu",
+ __entry->fence_context, __entry->fence_seqno,
+ __entry->ctx, __entry->seqno)
+);
+
+TRACE_EVENT(drm_sched_job_unschedulable,
TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
- __string(name, sched_job->sched->name)
- __field(uint64_t, id)
- __field(struct dma_fence *, fence)
- __field(uint64_t, ctx)
- __field(unsigned, seqno)
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
+ __field(u64, ctx)
+ __field(u64, seqno)
),
TP_fast_assign(
- __assign_str(name);
- __entry->id = sched_job->id;
- __entry->fence = fence;
+ __entry->fence_context = sched_job->s_fence->finished.context;
+ __entry->fence_seqno = sched_job->s_fence->finished.seqno;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
- TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u",
- __get_str(name), __entry->id,
- __entry->fence, __entry->ctx,
- __entry->seqno)
+ TP_printk("fence=%llu:%llu depends on unsignalled fence=%llu:%llu",
+ __entry->fence_context, __entry->fence_seqno,
+ __entry->ctx, __entry->seqno)
);
#endif /* _GPU_SCHED_TRACE_H_ */
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index ac678de7fe5e..8867b95ab089 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -21,7 +21,7 @@
*
*/
-#include <linux/kthread.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/completion.h>
@@ -461,10 +461,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
while ((entity->dependency =
drm_sched_job_dependency(sched_job, entity))) {
- trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
-
- if (drm_sched_entity_add_dependency_cb(entity))
+ if (drm_sched_entity_add_dependency_cb(entity)) {
+ trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
return NULL;
+ }
}
/* skip jobs from entity that marked guilty */
@@ -529,10 +529,10 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
return;
/*
- * Only when the queue is empty are we guaranteed that the scheduler
- * thread cannot change ->last_scheduled. To enforce ordering we need
- * a read barrier here. See drm_sched_entity_pop_job() for the other
- * side.
+ * Only when the queue is empty are we guaranteed that
+ * drm_sched_run_job_work() cannot change entity->last_scheduled. To
+ * enforce ordering we need a read barrier here. See
+ * drm_sched_entity_pop_job() for the other side.
*/
smp_rmb();
@@ -570,7 +570,15 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
bool first;
ktime_t submit_ts;
- trace_drm_sched_job(sched_job, entity);
+ trace_drm_sched_job_queue(sched_job, entity);
+
+ if (trace_drm_sched_job_add_dep_enabled()) {
+ struct dma_fence *entry;
+ unsigned long index;
+
+ xa_for_each(&sched_job->dependencies, index, entry)
+ trace_drm_sched_job_add_dep(sched_job, entry);
+ }
atomic_inc(entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index e971528504a5..9391d6f0dc01 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -21,7 +21,7 @@
*
*/
-#include <linux/kthread.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -206,7 +206,8 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
EXPORT_SYMBOL(to_drm_sched_fence);
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
- void *owner)
+ void *owner,
+ u64 drm_client_id)
{
struct drm_sched_fence *fence = NULL;
@@ -215,6 +216,7 @@ struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
return NULL;
fence->owner = owner;
+ fence->drm_client_id = drm_client_id;
spin_lock_init(&fence->lock);
return fence;
diff --git a/drivers/gpu/drm/scheduler/sched_internal.h b/drivers/gpu/drm/scheduler/sched_internal.h
index 599cf6e1bb74..7ea5a6736f98 100644
--- a/drivers/gpu/drm/scheduler/sched_internal.h
+++ b/drivers/gpu/drm/scheduler/sched_internal.h
@@ -24,7 +24,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *s_entity,
- void *owner);
+ void *owner, u64 drm_client_id);
void drm_sched_fence_init(struct drm_sched_fence *fence,
struct drm_sched_entity *entity);
void drm_sched_fence_free(struct drm_sched_fence *fence);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 829579c41c6b..e2cda28a1af4 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -66,6 +66,7 @@
* This implies waiting for previously executed jobs.
*/
+#include <linux/export.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/completion.h>
@@ -83,12 +84,6 @@
#define CREATE_TRACE_POINTS
#include "gpu_scheduler_trace.h"
-#ifdef CONFIG_LOCKDEP
-static struct lockdep_map drm_sched_lockdep_map = {
- .name = "drm_sched_lockdep_map"
-};
-#endif
-
int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
/**
@@ -268,38 +263,14 @@ drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
entity = rq->current_entity;
if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) {
- if (drm_sched_entity_is_ready(entity)) {
- /* If we can't queue yet, preserve the current
- * entity in terms of fairness.
- */
- if (!drm_sched_can_queue(sched, entity)) {
- spin_unlock(&rq->lock);
- return ERR_PTR(-ENOSPC);
- }
-
- rq->current_entity = entity;
- reinit_completion(&entity->entity_idle);
- spin_unlock(&rq->lock);
- return entity;
- }
+ if (drm_sched_entity_is_ready(entity))
+ goto found;
}
}
list_for_each_entry(entity, &rq->entities, list) {
- if (drm_sched_entity_is_ready(entity)) {
- /* If we can't queue yet, preserve the current entity in
- * terms of fairness.
- */
- if (!drm_sched_can_queue(sched, entity)) {
- spin_unlock(&rq->lock);
- return ERR_PTR(-ENOSPC);
- }
-
- rq->current_entity = entity;
- reinit_completion(&entity->entity_idle);
- spin_unlock(&rq->lock);
- return entity;
- }
+ if (drm_sched_entity_is_ready(entity))
+ goto found;
if (entity == rq->current_entity)
break;
@@ -308,6 +279,22 @@ drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
spin_unlock(&rq->lock);
return NULL;
+
+found:
+ if (!drm_sched_can_queue(sched, entity)) {
+ /*
+ * If scheduler cannot take more jobs signal the caller to not
+ * consider lower priority queues.
+ */
+ entity = ERR_PTR(-ENOSPC);
+ } else {
+ rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
+ }
+
+ spin_unlock(&rq->lock);
+
+ return entity;
}
/**
@@ -379,11 +366,16 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job;
- spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
if (job && dma_fence_is_signaled(&job->s_fence->finished))
__drm_sched_run_free_queue(sched);
+}
+
+static void drm_sched_run_free_queue_unlocked(struct drm_gpu_scheduler *sched)
+{
+ spin_lock(&sched->job_list_lock);
+ drm_sched_run_free_queue(sched);
spin_unlock(&sched->job_list_lock);
}
@@ -391,7 +383,7 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
*
- * Finish the job's fence and wake up the worker thread.
+ * Finish the job's fence and resubmit the work items.
*/
static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
{
@@ -401,7 +393,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
atomic_sub(s_job->credits, &sched->credit_count);
atomic_dec(sched->score);
- trace_drm_sched_process_job(s_fence);
+ trace_drm_sched_job_done(s_fence);
dma_fence_get(&s_fence->finished);
drm_sched_fence_finished(s_fence, result);
@@ -536,11 +528,37 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
spin_unlock(&sched->job_list_lock);
}
+/**
+ * drm_sched_job_reinsert_on_false_timeout - reinsert the job on a false timeout
+ * @sched: scheduler instance
+ * @job: job to be reinserted on the pending list
+ *
+ * In the case of a "false timeout" - when a timeout occurs but the GPU isn't
+ * hung and is making progress, the scheduler must reinsert the job back into
+ * @sched->pending_list. Otherwise, the job and its resources won't be freed
+ * through the &struct drm_sched_backend_ops.free_job callback.
+ *
+ * This function must be used in "false timeout" cases only.
+ */
+static void drm_sched_job_reinsert_on_false_timeout(struct drm_gpu_scheduler *sched,
+ struct drm_sched_job *job)
+{
+ spin_lock(&sched->job_list_lock);
+ list_add(&job->list, &sched->pending_list);
+
+ /* After reinserting the job, the scheduler enqueues the free-job work
+ * again if ready. Otherwise, a signaled job could be added to the
+ * pending list, but never freed.
+ */
+ drm_sched_run_free_queue(sched);
+ spin_unlock(&sched->job_list_lock);
+}
+
static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
- enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
+ enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
@@ -551,9 +569,10 @@ static void drm_sched_job_timedout(struct work_struct *work)
if (job) {
/*
- * Remove the bad job so it cannot be freed by concurrent
- * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
- * is parked at which point it's safe.
+ * Remove the bad job so it cannot be freed by a concurrent
+ * &struct drm_sched_backend_ops.free_job. It will be
+ * reinserted after the scheduler's work items have been
+ * cancelled, at which point it's safe.
*/
list_del_init(&job->list);
spin_unlock(&sched->job_list_lock);
@@ -568,6 +587,9 @@ static void drm_sched_job_timedout(struct work_struct *work)
job->sched->ops->free_job(job);
sched->free_guilty = false;
}
+
+ if (status == DRM_GPU_SCHED_STAT_NO_HANG)
+ drm_sched_job_reinsert_on_false_timeout(sched, job);
} else {
spin_unlock(&sched->job_list_lock);
}
@@ -590,6 +612,10 @@ static void drm_sched_job_timedout(struct work_struct *work)
* This function is typically used for reset recovery (see the docu of
* drm_sched_backend_ops.timedout_job() for details). Do not call it for
* scheduler teardown, i.e., before calling drm_sched_fini().
+ *
+ * As it's only used for reset recovery, drivers must not call this function
+ * in their &struct drm_sched_backend_ops.timedout_job callback when they
+ * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG.
*/
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
@@ -599,10 +625,10 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
/*
* Reinsert back the bad job here - now it's safe as
- * drm_sched_get_finished_job cannot race against us and release the
+ * drm_sched_get_finished_job() cannot race against us and release the
* bad job at this point - we parked (waited for) any in progress
- * (earlier) cleanups and drm_sched_get_finished_job will not be called
- * now until the scheduler thread is unparked.
+ * (earlier) cleanups and drm_sched_get_finished_job() will not be
+ * called now until the scheduler's work items are submitted again.
*/
if (bad && bad->sched == sched)
/*
@@ -615,7 +641,8 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* Iterate the job list from later to earlier one and either deactive
* their HW callbacks or remove them from pending list if they already
* signaled.
- * This iteration is thread safe as sched thread is stopped.
+ * This iteration is thread safe as the scheduler's work items have been
+ * cancelled.
*/
list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
list) {
@@ -674,15 +701,19 @@ EXPORT_SYMBOL(drm_sched_stop);
* drm_sched_backend_ops.timedout_job() for details). Do not call it for
* scheduler startup. The scheduler itself is fully operational after
* drm_sched_init() succeeded.
+ *
+ * As it's only used for reset recovery, drivers must not call this function
+ * in their &struct drm_sched_backend_ops.timedout_job callback when they
+ * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG.
*/
void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
{
struct drm_sched_job *s_job, *tmp;
/*
- * Locking the list is not required here as the sched thread is parked
- * so no new jobs are being inserted or removed. Also concurrent
- * GPU recovers can't run in parallel.
+ * Locking the list is not required here as the scheduler's work items
+ * are currently not running, so no new jobs are being inserted or
+ * removed. Also concurrent GPU recovers can't run in parallel.
*/
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct dma_fence *fence = s_job->s_fence->parent;
@@ -764,6 +795,8 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
* @credits: the number of credits this job contributes to the schedulers
* credit limit
* @owner: job owner for debugging
+ * @drm_client_id: &struct drm_file.client_id of the owner (used by trace
+ * events)
*
* Refer to drm_sched_entity_push_job() documentation
* for locking considerations.
@@ -784,7 +817,8 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
*/
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
- u32 credits, void *owner)
+ u32 credits, void *owner,
+ uint64_t drm_client_id)
{
if (!entity->rq) {
/* This will most likely be followed by missing frames
@@ -810,7 +844,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
job->entity = entity;
job->credits = credits;
- job->s_fence = drm_sched_fence_alloc(entity, owner);
+ job->s_fence = drm_sched_fence_alloc(entity, owner, drm_client_id);
if (!job->s_fence)
return -ENOMEM;
@@ -850,7 +884,6 @@ void drm_sched_job_arm(struct drm_sched_job *job)
job->sched = sched;
job->s_priority = entity->priority;
- job->id = atomic64_inc_return(&sched->job_id_count);
drm_sched_fence_init(job->s_fence, job->entity);
}
@@ -1193,7 +1226,7 @@ static void drm_sched_free_job_work(struct work_struct *w)
if (job)
sched->ops->free_job(job);
- drm_sched_run_free_queue(sched);
+ drm_sched_run_free_queue_unlocked(sched);
drm_sched_run_job_queue(sched);
}
@@ -1229,7 +1262,7 @@ static void drm_sched_run_job_work(struct work_struct *w)
atomic_add(sched_job->credits, &sched->credit_count);
drm_sched_job_begin(sched_job);
- trace_drm_run_job(sched_job, entity);
+ trace_drm_sched_job_run(sched_job, entity);
/*
* The run_job() callback must by definition return a fence whose
* refcount has been incremented for the scheduler already.
@@ -1256,6 +1289,25 @@ static void drm_sched_run_job_work(struct work_struct *w)
drm_sched_run_job_queue(sched);
}
+static struct workqueue_struct *drm_sched_alloc_wq(const char *name)
+{
+#if (IS_ENABLED(CONFIG_LOCKDEP))
+ static struct lockdep_map map = {
+ .name = "drm_sched_lockdep_map"
+ };
+
+ /*
+ * Avoid leaking a lockdep map on each drm sched creation and
+ * destruction by using a single lockdep map for all drm sched
+ * allocated submit_wq.
+ */
+
+ return alloc_ordered_workqueue_lockdep_map(name, WQ_MEM_RECLAIM, &map);
+#else
+ return alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+#endif
+}
+
/**
* drm_sched_init - Init a gpu scheduler instance
*
@@ -1296,13 +1348,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_
sched->submit_wq = args->submit_wq;
sched->own_submit_wq = false;
} else {
-#ifdef CONFIG_LOCKDEP
- sched->submit_wq = alloc_ordered_workqueue_lockdep_map(args->name,
- WQ_MEM_RECLAIM,
- &drm_sched_lockdep_map);
-#else
- sched->submit_wq = alloc_ordered_workqueue(args->name, WQ_MEM_RECLAIM);
-#endif
+ sched->submit_wq = drm_sched_alloc_wq(args->name);
if (!sched->submit_wq)
return -ENOMEM;
@@ -1348,6 +1394,18 @@ Out_check_own:
}
EXPORT_SYMBOL(drm_sched_init);
+static void drm_sched_cancel_remaining_jobs(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *job, *tmp;
+
+ /* All other accessors are stopped. No locking necessary. */
+ list_for_each_entry_safe_reverse(job, tmp, &sched->pending_list, list) {
+ sched->ops->cancel_job(job);
+ list_del(&job->list);
+ sched->ops->free_job(job);
+ }
+}
+
/**
* drm_sched_fini - Destroy a gpu scheduler
*
@@ -1355,19 +1413,11 @@ EXPORT_SYMBOL(drm_sched_init);
*
* Tears down and cleans up the scheduler.
*
- * This stops submission of new jobs to the hardware through
- * drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job()
- * will not be called for all jobs still in drm_gpu_scheduler.pending_list.
- * There is no solution for this currently. Thus, it is up to the driver to make
- * sure that:
- *
- * a) drm_sched_fini() is only called after for all submitted jobs
- * drm_sched_backend_ops.free_job() has been called or that
- * b) the jobs for which drm_sched_backend_ops.free_job() has not been called
- * after drm_sched_fini() ran are freed manually.
- *
- * FIXME: Take care of the above problem and prevent this function from leaking
- * the jobs in drm_gpu_scheduler.pending_list under any circumstances.
+ * This stops submission of new jobs to the hardware through &struct
+ * drm_sched_backend_ops.run_job. If &struct drm_sched_backend_ops.cancel_job
+ * is implemented, all jobs will be canceled through it and afterwards cleaned
+ * up through &struct drm_sched_backend_ops.free_job. If cancel_job is not
+ * implemented, memory could leak.
*/
void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
@@ -1397,11 +1447,18 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&sched->work_tdr);
+ /* Avoid memory leaks if supported by the driver. */
+ if (sched->ops->cancel_job)
+ drm_sched_cancel_remaining_jobs(sched);
+
if (sched->own_submit_wq)
destroy_workqueue(sched->submit_wq);
sched->ready = false;
kfree(sched->sched_rq);
sched->sched_rq = NULL;
+
+ if (!list_empty(&sched->pending_list))
+ dev_warn(sched->dev, "Tearing down scheduler while jobs are pending!\n");
}
EXPORT_SYMBOL(drm_sched_fini);
diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
index f999c8859cf7..65acffc3fea8 100644
--- a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
+++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
@@ -63,8 +63,8 @@ static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job)
lockdep_assert_held(&sched->lock);
job->flags |= DRM_MOCK_SCHED_JOB_DONE;
- list_move_tail(&job->link, &sched->done_list);
- dma_fence_signal(&job->hw_fence);
+ list_del(&job->link);
+ dma_fence_signal_locked(&job->hw_fence);
complete(&job->done);
}
@@ -117,13 +117,13 @@ drm_mock_sched_job_new(struct kunit *test,
ret = drm_sched_job_init(&job->base,
&entity->base,
1,
- NULL);
+ NULL,
+ 1);
KUNIT_ASSERT_EQ(test, ret, 0);
job->test = test;
init_completion(&job->done);
- spin_lock_init(&job->lock);
INIT_LIST_HEAD(&job->link);
hrtimer_setup(&job->timer, drm_mock_sched_job_signal_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
@@ -169,7 +169,7 @@ static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job)
dma_fence_init(&job->hw_fence,
&drm_mock_sched_hw_fence_ops,
- &job->lock,
+ &sched->lock,
sched->hw_timeline.context,
atomic_inc_return(&sched->hw_timeline.next_seqno));
@@ -200,38 +200,82 @@ static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job)
return &job->hw_fence;
}
+/*
+ * Normally, drivers would take appropriate measures in this callback, such as
+ * killing the entity the faulty job is associated with, resetting the hardware
+ * and / or resubmitting non-faulty jobs.
+ *
+ * For the mock scheduler, there are no hardware rings to be resetted nor jobs
+ * to be resubmitted. Thus, this function merely ensures that
+ * a) timedout fences get signaled properly and removed from the pending list
+ * b) the mock scheduler framework gets informed about the timeout via a flag
+ * c) The drm_sched_job, not longer needed, gets freed
+ */
static enum drm_gpu_sched_stat
mock_sched_timedout_job(struct drm_sched_job *sched_job)
{
+ struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched);
struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+ unsigned long flags;
+
+ if (job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET) {
+ job->flags &= ~DRM_MOCK_SCHED_JOB_DONT_RESET;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
+ }
- job->flags |= DRM_MOCK_SCHED_JOB_TIMEDOUT;
+ spin_lock_irqsave(&sched->lock, flags);
+ if (!dma_fence_is_signaled_locked(&job->hw_fence)) {
+ list_del(&job->link);
+ job->flags |= DRM_MOCK_SCHED_JOB_TIMEDOUT;
+ dma_fence_set_error(&job->hw_fence, -ETIMEDOUT);
+ dma_fence_signal_locked(&job->hw_fence);
+ }
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ dma_fence_put(&job->hw_fence);
+ drm_sched_job_cleanup(sched_job);
+ /* Mock job itself is freed by the kunit framework. */
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static void mock_sched_free_job(struct drm_sched_job *sched_job)
{
- struct drm_mock_scheduler *sched =
- drm_sched_to_mock_sched(sched_job->sched);
struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
- unsigned long flags;
- /* Remove from the scheduler done list. */
- spin_lock_irqsave(&sched->lock, flags);
- list_del(&job->link);
- spin_unlock_irqrestore(&sched->lock, flags);
dma_fence_put(&job->hw_fence);
-
drm_sched_job_cleanup(sched_job);
/* Mock job itself is freed by the kunit framework. */
}
+static void mock_sched_cancel_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched);
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+ unsigned long flags;
+
+ hrtimer_cancel(&job->timer);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ if (!dma_fence_is_signaled_locked(&job->hw_fence)) {
+ list_del(&job->link);
+ dma_fence_set_error(&job->hw_fence, -ECANCELED);
+ dma_fence_signal_locked(&job->hw_fence);
+ }
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ /*
+ * The GPU Scheduler will call drm_sched_backend_ops.free_job(), still.
+ * Mock job itself is freed by the kunit framework.
+ */
+}
+
static const struct drm_sched_backend_ops drm_mock_scheduler_ops = {
.run_job = mock_sched_run_job,
.timedout_job = mock_sched_timedout_job,
- .free_job = mock_sched_free_job
+ .free_job = mock_sched_free_job,
+ .cancel_job = mock_sched_cancel_job,
};
/**
@@ -265,7 +309,6 @@ struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
sched->hw_timeline.context = dma_fence_context_alloc(1);
atomic_set(&sched->hw_timeline.next_seqno, 0);
INIT_LIST_HEAD(&sched->job_list);
- INIT_LIST_HEAD(&sched->done_list);
spin_lock_init(&sched->lock);
return sched;
@@ -280,38 +323,6 @@ struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
*/
void drm_mock_sched_fini(struct drm_mock_scheduler *sched)
{
- struct drm_mock_sched_job *job, *next;
- unsigned long flags;
- LIST_HEAD(list);
-
- drm_sched_wqueue_stop(&sched->base);
-
- /* Force complete all unfinished jobs. */
- spin_lock_irqsave(&sched->lock, flags);
- list_for_each_entry_safe(job, next, &sched->job_list, link)
- list_move_tail(&job->link, &list);
- spin_unlock_irqrestore(&sched->lock, flags);
-
- list_for_each_entry(job, &list, link)
- hrtimer_cancel(&job->timer);
-
- spin_lock_irqsave(&sched->lock, flags);
- list_for_each_entry_safe(job, next, &list, link)
- drm_mock_sched_job_complete(job);
- spin_unlock_irqrestore(&sched->lock, flags);
-
- /*
- * Free completed jobs and jobs not yet processed by the DRM scheduler
- * free worker.
- */
- spin_lock_irqsave(&sched->lock, flags);
- list_for_each_entry_safe(job, next, &sched->done_list, link)
- list_move_tail(&job->link, &list);
- spin_unlock_irqrestore(&sched->lock, flags);
-
- list_for_each_entry_safe(job, next, &list, link)
- mock_sched_free_job(&job->base);
-
drm_sched_fini(&sched->base);
}
diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h
index 27caf8285fb7..63d4f2ac7074 100644
--- a/drivers/gpu/drm/scheduler/tests/sched_tests.h
+++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h
@@ -49,7 +49,6 @@ struct drm_mock_scheduler {
spinlock_t lock;
struct list_head job_list;
- struct list_head done_list;
struct {
u64 context;
@@ -98,6 +97,7 @@ struct drm_mock_sched_job {
#define DRM_MOCK_SCHED_JOB_DONE 0x1
#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2
+#define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4
unsigned long flags;
struct list_head link;
@@ -106,7 +106,6 @@ struct drm_mock_sched_job {
unsigned int duration_us;
ktime_t finish_at;
- spinlock_t lock;
struct dma_fence hw_fence;
struct kunit *test;
diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c
index 7230057e0594..55eb142bd7c5 100644
--- a/drivers/gpu/drm/scheduler/tests/tests_basic.c
+++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c
@@ -5,6 +5,8 @@
#include "sched_tests.h"
+#define MOCK_TIMEOUT (HZ / 5)
+
/*
* DRM scheduler basic tests should check the basic functional correctness of
* the scheduler, including some very light smoke testing. More targeted tests,
@@ -28,7 +30,7 @@ static void drm_sched_basic_exit(struct kunit *test)
static int drm_sched_timeout_init(struct kunit *test)
{
- test->priv = drm_mock_sched_new(test, HZ);
+ test->priv = drm_mock_sched_new(test, MOCK_TIMEOUT);
return 0;
}
@@ -204,6 +206,47 @@ static struct kunit_suite drm_sched_basic = {
.test_cases = drm_sched_basic_tests,
};
+static void drm_sched_basic_cancel(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_scheduler *sched;
+ struct drm_mock_sched_job *job;
+ bool done;
+
+ /*
+ * Check that drm_sched_fini() uses the cancel_job() callback to cancel
+ * jobs that are still pending.
+ */
+
+ sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+ entity = drm_mock_sched_entity_new(test, DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ job = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+ drm_mock_sched_fini(sched);
+
+ KUNIT_ASSERT_EQ(test, job->hw_fence.error, -ECANCELED);
+}
+
+static struct kunit_case drm_sched_cancel_tests[] = {
+ KUNIT_CASE(drm_sched_basic_cancel),
+ {}
+};
+
+static struct kunit_suite drm_sched_cancel = {
+ .name = "drm_sched_basic_cancel_tests",
+ .init = drm_sched_basic_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_cancel_tests,
+};
+
static void drm_sched_basic_timeout(struct kunit *test)
{
struct drm_mock_scheduler *sched = test->priv;
@@ -227,14 +270,14 @@ static void drm_sched_basic_timeout(struct kunit *test)
done = drm_mock_sched_job_wait_scheduled(job, HZ);
KUNIT_ASSERT_TRUE(test, done);
- done = drm_mock_sched_job_wait_finished(job, HZ / 2);
+ done = drm_mock_sched_job_wait_finished(job, MOCK_TIMEOUT / 2);
KUNIT_ASSERT_FALSE(test, done);
KUNIT_ASSERT_EQ(test,
job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT,
0);
- done = drm_mock_sched_job_wait_finished(job, HZ);
+ done = drm_mock_sched_job_wait_finished(job, MOCK_TIMEOUT);
KUNIT_ASSERT_FALSE(test, done);
KUNIT_ASSERT_EQ(test,
@@ -244,8 +287,51 @@ static void drm_sched_basic_timeout(struct kunit *test)
drm_mock_sched_entity_free(entity);
}
+static void drm_sched_skip_reset(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_sched_job *job;
+ unsigned int i;
+ bool done;
+
+ /*
+ * Submit a single job against a scheduler with the timeout configured
+ * and verify that if the job is still running, the timeout handler
+ * will skip the reset and allow the job to complete.
+ */
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+ job = drm_mock_sched_job_new(test, entity);
+
+ job->flags = DRM_MOCK_SCHED_JOB_DONT_RESET;
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_finished(job, 2 * MOCK_TIMEOUT);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ KUNIT_ASSERT_EQ(test,
+ job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET,
+ 0);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+}
+
static struct kunit_case drm_sched_timeout_tests[] = {
KUNIT_CASE(drm_sched_basic_timeout),
+ KUNIT_CASE(drm_sched_skip_reset),
{}
};
@@ -471,6 +557,7 @@ static struct kunit_suite drm_sched_credits = {
kunit_test_suites(&drm_sched_basic,
&drm_sched_timeout,
+ &drm_sched_cancel,
&drm_sched_priority,
&drm_sched_modify_sched,
&drm_sched_credits);
diff --git a/drivers/gpu/drm/sitronix/Kconfig b/drivers/gpu/drm/sitronix/Kconfig
index 741d1bb4b83f..6de7d92d9b74 100644
--- a/drivers/gpu/drm/sitronix/Kconfig
+++ b/drivers/gpu/drm/sitronix/Kconfig
@@ -11,10 +11,6 @@ config DRM_ST7571_I2C
if M is selected the module will be called st7571-i2c.
-config TINYDRM_ST7586
- tristate
- default n
-
config DRM_ST7586
tristate "DRM support for Sitronix ST7586 display panels"
depends on DRM && SPI
@@ -22,17 +18,12 @@ config DRM_ST7586
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
- default TINYDRM_ST7586
help
DRM driver for the following Sitronix ST7586 panels:
* LEGO MINDSTORMS EV3
If M is selected the module will be called st7586.
-config TINYDRM_ST7735R
- tristate
- default n
-
config DRM_ST7735R
tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
depends on DRM && SPI
@@ -41,7 +32,6 @@ config DRM_ST7735R
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
- default TINYDRM_ST7735R
help
DRM driver for Sitronix ST7715R/ST7735R with one of the following
LCDs:
diff --git a/drivers/gpu/drm/sitronix/st7571-i2c.c b/drivers/gpu/drm/sitronix/st7571-i2c.c
index eec846892962..453eb7e045e5 100644
--- a/drivers/gpu/drm/sitronix/st7571-i2c.c
+++ b/drivers/gpu/drm/sitronix/st7571-i2c.c
@@ -68,6 +68,9 @@
#define ST7571_SET_COLOR_MODE(c) (0x10 | FIELD_PREP(GENMASK(0, 0), (c)))
#define ST7571_COMMAND_SET_NORMAL (0x00)
+/* ST7567 commands */
+#define ST7567_SET_LCD_BIAS(m) (0xa2 | FIELD_PREP(GENMASK(0, 0), (m)))
+
#define ST7571_PAGE_HEIGHT 8
#define DRIVER_NAME "st7571"
@@ -92,6 +95,7 @@ struct st7571_panel_constraints {
struct st7571_panel_data {
int (*init)(struct st7571_device *st7571);
+ int (*parse_dt)(struct st7571_device *st7571);
struct st7571_panel_constraints constraints;
};
@@ -550,8 +554,8 @@ static const struct drm_crtc_funcs st7571_crtc_funcs = {
* Encoder
*/
-static void ssd130x_encoder_atomic_enable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+static void st7571_encoder_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_device *drm = encoder->dev;
struct st7571_device *st7571 = drm_to_st7571(drm);
@@ -565,8 +569,8 @@ static void ssd130x_encoder_atomic_enable(struct drm_encoder *encoder,
st7571_send_command_list(st7571, &command, 1);
}
-static void ssd130x_encoder_atomic_disable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+static void st7571_encoder_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_device *drm = encoder->dev;
struct st7571_device *st7571 = drm_to_st7571(drm);
@@ -581,8 +585,8 @@ static const struct drm_encoder_funcs st7571_encoder_funcs = {
};
static const struct drm_encoder_helper_funcs st7571_encoder_helper_funcs = {
- .atomic_enable = ssd130x_encoder_atomic_enable,
- .atomic_disable = ssd130x_encoder_atomic_disable,
+ .atomic_enable = st7571_encoder_atomic_enable,
+ .atomic_disable = st7571_encoder_atomic_disable,
};
/*
@@ -773,6 +777,32 @@ static int st7571_validate_parameters(struct st7571_device *st7571)
return 0;
}
+static int st7567_parse_dt(struct st7571_device *st7567)
+{
+ struct device *dev = &st7567->client->dev;
+ struct device_node *np = dev->of_node;
+ struct display_timing dt;
+ int ret;
+
+ ret = of_get_display_timing(np, "panel-timing", &dt);
+ if (ret) {
+ dev_err(dev, "Failed to get display timing from DT\n");
+ return ret;
+ }
+
+ of_property_read_u32(np, "width-mm", &st7567->width_mm);
+ of_property_read_u32(np, "height-mm", &st7567->height_mm);
+
+ st7567->pformat = &st7571_monochrome;
+ st7567->bpp = 1;
+
+ st7567->startline = dt.vfront_porch.typ;
+ st7567->nlines = dt.vactive.typ;
+ st7567->ncols = dt.hactive.typ;
+
+ return 0;
+}
+
static int st7571_parse_dt(struct st7571_device *st7571)
{
struct device *dev = &st7571->client->dev;
@@ -804,7 +834,9 @@ static int st7571_parse_dt(struct st7571_device *st7571)
st7571->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(st7571->reset))
- return PTR_ERR(st7571->reset);
+ return dev_err_probe(dev, PTR_ERR(st7571->reset),
+ "Failed to get reset gpio\n");
+
return 0;
}
@@ -816,6 +848,38 @@ static void st7571_reset(struct st7571_device *st7571)
gpiod_set_value_cansleep(st7571->reset, 0);
}
+static int st7567_lcd_init(struct st7571_device *st7567)
+{
+ /*
+ * Most of the initialization sequence is taken directly from the
+ * referential initial code in the ST7567 datasheet.
+ */
+ u8 commands[] = {
+ ST7571_DISPLAY_OFF,
+
+ ST7567_SET_LCD_BIAS(1),
+
+ ST7571_SET_SEG_SCAN_DIR(0),
+ ST7571_SET_COM_SCAN_DIR(1),
+
+ ST7571_SET_REGULATOR_REG(4),
+ ST7571_SET_CONTRAST_MSB,
+ ST7571_SET_CONTRAST_LSB(0x20),
+
+ ST7571_SET_START_LINE_MSB,
+ ST7571_SET_START_LINE_LSB(st7567->startline),
+
+ ST7571_SET_POWER(0x4), /* Power Control, VC: ON, VR: OFF, VF: OFF */
+ ST7571_SET_POWER(0x6), /* Power Control, VC: ON, VR: ON, VF: OFF */
+ ST7571_SET_POWER(0x7), /* Power Control, VC: ON, VR: ON, VF: ON */
+
+ ST7571_SET_REVERSE(0),
+ ST7571_SET_ENTIRE_DISPLAY_ON(0),
+ };
+
+ return st7571_send_command_list(st7567, commands, ARRAY_SIZE(commands));
+}
+
static int st7571_lcd_init(struct st7571_device *st7571)
{
/*
@@ -879,7 +943,7 @@ static int st7571_probe(struct i2c_client *client)
i2c_set_clientdata(client, st7571);
st7571->pdata = device_get_match_data(&client->dev);
- ret = st7571_parse_dt(st7571);
+ ret = st7571->pdata->parse_dt(st7571);
if (ret)
return ret;
@@ -960,8 +1024,21 @@ static void st7571_remove(struct i2c_client *client)
drm_dev_unplug(&st7571->dev);
}
+struct st7571_panel_data st7567_config = {
+ .init = st7567_lcd_init,
+ .parse_dt = st7567_parse_dt,
+ .constraints = {
+ .min_nlines = 1,
+ .max_nlines = 64,
+ .min_ncols = 128,
+ .max_ncols = 128,
+ .support_grayscale = false,
+ },
+};
+
struct st7571_panel_data st7571_config = {
.init = st7571_lcd_init,
+ .parse_dt = st7571_parse_dt,
.constraints = {
.min_nlines = 1,
.max_nlines = 128,
@@ -972,12 +1049,14 @@ struct st7571_panel_data st7571_config = {
};
static const struct of_device_id st7571_of_match[] = {
+ { .compatible = "sitronix,st7567", .data = &st7567_config },
{ .compatible = "sitronix,st7571", .data = &st7571_config },
{},
};
MODULE_DEVICE_TABLE(of, st7571_of_match);
static const struct i2c_device_id st7571_id[] = {
+ { "st7567", 0 },
{ "st7571", 0 },
{ }
};
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 74a1eef4674e..7484d3c3f4ed 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -97,7 +97,7 @@ struct sti_dvo {
struct dvo_config *config;
bool enabled;
struct drm_encoder *encoder;
- struct drm_bridge *bridge;
+ struct drm_bridge bridge;
};
struct sti_dvo_connector {
@@ -439,7 +439,6 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
struct drm_encoder *encoder;
struct sti_dvo_connector *connector;
struct drm_connector *drm_connector;
- struct drm_bridge *bridge;
int err;
/* Set the drm device handle */
@@ -455,20 +454,14 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
connector->dvo = dvo;
- bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge)
- return -ENOMEM;
-
- bridge->driver_private = dvo;
- bridge->funcs = &sti_dvo_bridge_funcs;
- bridge->of_node = dvo->dev.of_node;
- drm_bridge_add(bridge);
+ dvo->bridge.driver_private = dvo;
+ dvo->bridge.of_node = dvo->dev.of_node;
+ drm_bridge_add(&dvo->bridge);
- err = drm_bridge_attach(encoder, bridge, NULL, 0);
+ err = drm_bridge_attach(encoder, &dvo->bridge, NULL, 0);
if (err)
return err;
- dvo->bridge = bridge;
connector->encoder = encoder;
dvo->encoder = encoder;
@@ -490,7 +483,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
return 0;
err_sysfs:
- drm_bridge_remove(bridge);
+ drm_bridge_remove(&dvo->bridge);
return -EINVAL;
}
@@ -499,7 +492,7 @@ static void sti_dvo_unbind(struct device *dev,
{
struct sti_dvo *dvo = dev_get_drvdata(dev);
- drm_bridge_remove(dvo->bridge);
+ drm_bridge_remove(&dvo->bridge);
}
static const struct component_ops sti_dvo_ops = {
@@ -515,10 +508,10 @@ static int sti_dvo_probe(struct platform_device *pdev)
DRM_INFO("%s\n", __func__);
- dvo = devm_kzalloc(dev, sizeof(*dvo), GFP_KERNEL);
- if (!dvo) {
- DRM_ERROR("Failed to allocate memory for DVO\n");
- return -ENOMEM;
+ dvo = devm_drm_bridge_alloc(dev, struct sti_dvo, bridge, &sti_dvo_bridge_funcs);
+ if (IS_ERR(dvo)) {
+ DRM_ERROR("Failed to allocate DVO\n");
+ return PTR_ERR(dvo);
}
dvo->dev = pdev->dev;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index d202b6c1eb8f..2c015f563de9 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -246,6 +246,7 @@ struct sti_hda {
struct device dev;
struct drm_device *drm_dev;
struct drm_display_mode mode;
+ struct drm_bridge bridge;
void __iomem *regs;
void __iomem *video_dacs_ctrl;
struct clk *clk_pix;
@@ -262,6 +263,11 @@ struct sti_hda_connector {
#define to_sti_hda_connector(x) \
container_of(x, struct sti_hda_connector, drm_connector)
+static struct sti_hda *drm_bridge_to_sti_hda(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sti_hda, bridge);
+}
+
static u32 hda_read(struct sti_hda *hda, int offset)
{
return readl(hda->regs + offset);
@@ -401,7 +407,7 @@ static void sti_hda_configure_awg(struct sti_hda *hda, u32 *awg_instr, int nb)
static void sti_hda_disable(struct drm_bridge *bridge)
{
- struct sti_hda *hda = bridge->driver_private;
+ struct sti_hda *hda = drm_bridge_to_sti_hda(bridge);
u32 val;
if (!hda->enabled)
@@ -426,7 +432,7 @@ static void sti_hda_disable(struct drm_bridge *bridge)
static void sti_hda_pre_enable(struct drm_bridge *bridge)
{
- struct sti_hda *hda = bridge->driver_private;
+ struct sti_hda *hda = drm_bridge_to_sti_hda(bridge);
u32 val, i, mode_idx;
u32 src_filter_y, src_filter_c;
u32 *coef_y, *coef_c;
@@ -517,7 +523,7 @@ static void sti_hda_set_mode(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- struct sti_hda *hda = bridge->driver_private;
+ struct sti_hda *hda = drm_bridge_to_sti_hda(bridge);
u32 mode_idx;
int hddac_rate;
int ret;
@@ -677,7 +683,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
struct drm_encoder *encoder;
struct sti_hda_connector *connector;
struct drm_connector *drm_connector;
- struct drm_bridge *bridge;
int err;
/* Set the drm device handle */
@@ -693,13 +698,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
connector->hda = hda;
- bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge)
- return -ENOMEM;
-
- bridge->driver_private = hda;
- bridge->funcs = &sti_hda_bridge_funcs;
- drm_bridge_attach(encoder, bridge, NULL, 0);
+ drm_bridge_attach(encoder, &hda->bridge, NULL, 0);
connector->encoder = encoder;
@@ -745,9 +744,9 @@ static int sti_hda_probe(struct platform_device *pdev)
DRM_INFO("%s\n", __func__);
- hda = devm_kzalloc(dev, sizeof(*hda), GFP_KERNEL);
- if (!hda)
- return -ENOMEM;
+ hda = devm_drm_bridge_alloc(dev, struct sti_hda, bridge, &sti_hda_bridge_funcs);
+ if (IS_ERR(hda))
+ return PTR_ERR(hda);
hda->dev = pdev->dev;
hda->regs = devm_platform_ioremap_resource_byname(pdev, "hda-reg");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 37b8d619066e..4e7c3d78b2b9 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -168,6 +168,11 @@ struct sti_hdmi_connector {
#define to_sti_hdmi_connector(x) \
container_of(x, struct sti_hdmi_connector, drm_connector)
+static struct sti_hdmi *drm_bridge_to_sti_hdmi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sti_hdmi, bridge);
+}
+
static const struct drm_prop_enum_list colorspace_mode_names[] = {
{ HDMI_COLORSPACE_RGB, "rgb" },
{ HDMI_COLORSPACE_YUV422, "yuv422" },
@@ -749,7 +754,7 @@ static void hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
static void sti_hdmi_disable(struct drm_bridge *bridge)
{
- struct sti_hdmi *hdmi = bridge->driver_private;
+ struct sti_hdmi *hdmi = drm_bridge_to_sti_hdmi(bridge);
u32 val = hdmi_read(hdmi, HDMI_CFG);
@@ -881,7 +886,7 @@ static int hdmi_audio_configure(struct sti_hdmi *hdmi)
static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
{
- struct sti_hdmi *hdmi = bridge->driver_private;
+ struct sti_hdmi *hdmi = drm_bridge_to_sti_hdmi(bridge);
DRM_DEBUG_DRIVER("\n");
@@ -936,7 +941,7 @@ static void sti_hdmi_set_mode(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- struct sti_hdmi *hdmi = bridge->driver_private;
+ struct sti_hdmi *hdmi = drm_bridge_to_sti_hdmi(bridge);
int ret;
DRM_DEBUG_DRIVER("\n");
@@ -1273,7 +1278,6 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
struct sti_hdmi_connector *connector;
struct cec_connector_info conn_info;
struct drm_connector *drm_connector;
- struct drm_bridge *bridge;
int err;
/* Set the drm device handle */
@@ -1289,13 +1293,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
connector->hdmi = hdmi;
- bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge)
- return -EINVAL;
-
- bridge->driver_private = hdmi;
- bridge->funcs = &sti_hdmi_bridge_funcs;
- drm_bridge_attach(encoder, bridge, NULL, 0);
+ drm_bridge_attach(encoder, &hdmi->bridge, NULL, 0);
connector->encoder = encoder;
@@ -1385,9 +1383,9 @@ static int sti_hdmi_probe(struct platform_device *pdev)
DRM_INFO("%s\n", __func__);
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
+ hdmi = devm_drm_bridge_alloc(dev, struct sti_hdmi, bridge, &sti_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0);
if (ddc) {
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index 6d4c3f57bc46..91d43dd46f13 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -12,6 +12,7 @@
#include <media/cec-notifier.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_modes.h>
#include <drm/drm_property.h>
@@ -86,6 +87,7 @@ struct sti_hdmi {
struct hdmi_audio_params audio;
struct drm_connector *drm_connector;
struct cec_notifier *notifier;
+ struct drm_bridge bridge;
};
u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index a3ae9a93ce66..07788e8d3d83 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -1049,9 +1049,9 @@ static int lvds_probe(struct platform_device *pdev)
dev_dbg(dev, "Probing LVDS driver...\n");
- lvds = devm_kzalloc(dev, sizeof(*lvds), GFP_KERNEL);
- if (!lvds)
- return -ENOMEM;
+ lvds = devm_drm_bridge_alloc(dev, struct stm_lvds, lvds_bridge, &lvds_bridge_funcs);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
lvds->dev = dev;
@@ -1164,7 +1164,6 @@ static int lvds_probe(struct platform_device *pdev)
goto err_lvds_probe;
}
- lvds->lvds_bridge.funcs = &lvds_bridge_funcs;
lvds->lvds_bridge.of_node = dev->of_node;
lvds->hw_version = lvds_read(lvds, LVDS_VERR);
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c
index 58480d8e4f70..c100d29b1a89 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.c
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.c
@@ -212,7 +212,7 @@ void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
{
u32 base;
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3) {
sun8i_de3_ccsc_set_coefficients(mixer->engine.regs, layer,
mode, encoding, range);
return;
@@ -228,7 +228,7 @@ void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable)
{
u32 base;
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3) {
sun8i_de3_ccsc_enable(mixer->engine.regs, layer, enable);
return;
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 8b41d33baa30..31a8409b98f4 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -274,6 +274,7 @@ static void sun8i_mixer_commit(struct sunxi_engine *engine,
{
struct sun8i_mixer *mixer = engine_to_sun8i_mixer(engine);
u32 bld_base = sun8i_blender_base(mixer);
+ struct regmap *bld_regs = sun8i_blender_regmap(mixer);
struct drm_plane_state *plane_state;
struct drm_plane *plane;
u32 route = 0, pipe_en = 0;
@@ -313,12 +314,13 @@ static void sun8i_mixer_commit(struct sunxi_engine *engine,
pipe_en |= SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
}
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ROUTE(bld_base), route);
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
+ regmap_write(bld_regs, SUN8I_MIXER_BLEND_ROUTE(bld_base), route);
+ regmap_write(bld_regs, SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
pipe_en | SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
- regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_DBUFF,
- SUN8I_MIXER_GLOBAL_DBUFF_ENABLE);
+ if (mixer->cfg->de_type != SUN8I_MIXER_DE33)
+ regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_DBUFF,
+ SUN8I_MIXER_GLOBAL_DBUFF_ENABLE);
}
static struct drm_plane **sun8i_layers_init(struct drm_device *drm,
@@ -367,25 +369,31 @@ static void sun8i_mixer_mode_set(struct sunxi_engine *engine,
const struct drm_display_mode *mode)
{
struct sun8i_mixer *mixer = engine_to_sun8i_mixer(engine);
+ struct regmap *bld_regs;
u32 bld_base, size, val;
bool interlaced;
bld_base = sun8i_blender_base(mixer);
+ bld_regs = sun8i_blender_regmap(mixer);
interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
size = SUN8I_MIXER_SIZE(mode->hdisplay, mode->vdisplay);
DRM_DEBUG_DRIVER("Updating global size W: %u H: %u\n",
mode->hdisplay, mode->vdisplay);
- regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_SIZE, size);
- regmap_write(engine->regs, SUN8I_MIXER_BLEND_OUTSIZE(bld_base), size);
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ regmap_write(mixer->top_regs, SUN50I_MIXER_GLOBAL_SIZE, size);
+ else
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_GLOBAL_SIZE, size);
+
+ regmap_write(bld_regs, SUN8I_MIXER_BLEND_OUTSIZE(bld_base), size);
if (interlaced)
val = SUN8I_MIXER_BLEND_OUTCTL_INTERLACED;
else
val = 0;
- regmap_update_bits(engine->regs, SUN8I_MIXER_BLEND_OUTCTL(bld_base),
+ regmap_update_bits(bld_regs, SUN8I_MIXER_BLEND_OUTCTL(bld_base),
SUN8I_MIXER_BLEND_OUTCTL_INTERLACED, val);
DRM_DEBUG_DRIVER("Switching display mixer interlaced mode %s\n",
@@ -399,12 +407,29 @@ static const struct sunxi_engine_ops sun8i_engine_ops = {
};
static const struct regmap_config sun8i_mixer_regmap_config = {
+ .name = "layers",
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0xffffc, /* guessed */
};
+static const struct regmap_config sun8i_top_regmap_config = {
+ .name = "top",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x3c,
+};
+
+static const struct regmap_config sun8i_disp_regmap_config = {
+ .name = "display",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x20000,
+};
+
static int sun8i_mixer_of_get_id(struct device_node *node)
{
struct device_node *ep, *remote;
@@ -425,6 +450,50 @@ static int sun8i_mixer_of_get_id(struct device_node *node)
return of_ep.id;
}
+static void sun8i_mixer_init(struct sun8i_mixer *mixer)
+{
+ struct regmap *top_regs, *disp_regs;
+ unsigned int base = sun8i_blender_base(mixer);
+ int plane_cnt, i;
+
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33) {
+ top_regs = mixer->top_regs;
+ disp_regs = mixer->disp_regs;
+ } else {
+ top_regs = mixer->engine.regs;
+ disp_regs = mixer->engine.regs;
+ }
+
+ /* Enable the mixer */
+ regmap_write(top_regs, SUN8I_MIXER_GLOBAL_CTL,
+ SUN8I_MIXER_GLOBAL_CTL_RT_EN);
+
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ regmap_write(top_regs, SUN50I_MIXER_GLOBAL_CLK, 1);
+
+ /* Set background color to black */
+ regmap_write(disp_regs, SUN8I_MIXER_BLEND_BKCOLOR(base),
+ SUN8I_MIXER_BLEND_COLOR_BLACK);
+
+ /*
+ * Set fill color of bottom plane to black. Generally not needed
+ * except when VI plane is at bottom (zpos = 0) and enabled.
+ */
+ regmap_write(disp_regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
+ SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
+ regmap_write(disp_regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, 0),
+ SUN8I_MIXER_BLEND_COLOR_BLACK);
+
+ plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
+ for (i = 0; i < plane_cnt; i++)
+ regmap_write(disp_regs,
+ SUN8I_MIXER_BLEND_MODE(base, i),
+ SUN8I_MIXER_BLEND_MODE_DEF);
+
+ regmap_update_bits(disp_regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
+ SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
+}
+
static int sun8i_mixer_bind(struct device *dev, struct device *master,
void *data)
{
@@ -433,8 +502,6 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
struct sun4i_drv *drv = drm->dev_private;
struct sun8i_mixer *mixer;
void __iomem *regs;
- unsigned int base;
- int plane_cnt;
int i, ret;
/*
@@ -495,6 +562,30 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
return PTR_ERR(mixer->engine.regs);
}
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33) {
+ regs = devm_platform_ioremap_resource_byname(pdev, "top");
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ mixer->top_regs = devm_regmap_init_mmio(dev, regs,
+ &sun8i_top_regmap_config);
+ if (IS_ERR(mixer->top_regs)) {
+ dev_err(dev, "Couldn't create the top regmap\n");
+ return PTR_ERR(mixer->top_regs);
+ }
+
+ regs = devm_platform_ioremap_resource_byname(pdev, "display");
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ mixer->disp_regs = devm_regmap_init_mmio(dev, regs,
+ &sun8i_disp_regmap_config);
+ if (IS_ERR(mixer->disp_regs)) {
+ dev_err(dev, "Couldn't create the disp regmap\n");
+ return PTR_ERR(mixer->disp_regs);
+ }
+ }
+
mixer->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(mixer->reset)) {
dev_err(dev, "Couldn't get our reset line\n");
@@ -534,10 +625,8 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
list_add_tail(&mixer->engine.list, &drv->engine_list);
- base = sun8i_blender_base(mixer);
-
/* Reset registers and disable unused sub-engines */
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3) {
for (i = 0; i < DE3_MIXER_UNIT_SIZE; i += 4)
regmap_write(mixer->engine.regs, i, 0);
@@ -551,7 +640,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
regmap_write(mixer->engine.regs, SUN50I_MIXER_FMT_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC0_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC1_EN, 0);
- } else {
+ } else if (mixer->cfg->de_type == SUN8I_MIXER_DE2) {
for (i = 0; i < DE2_MIXER_UNIT_SIZE; i += 4)
regmap_write(mixer->engine.regs, i, 0);
@@ -564,31 +653,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
regmap_write(mixer->engine.regs, SUN8I_MIXER_DCSC_EN, 0);
}
- /* Enable the mixer */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_GLOBAL_CTL,
- SUN8I_MIXER_GLOBAL_CTL_RT_EN);
-
- /* Set background color to black */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_BKCOLOR(base),
- SUN8I_MIXER_BLEND_COLOR_BLACK);
-
- /*
- * Set fill color of bottom plane to black. Generally not needed
- * except when VI plane is at bottom (zpos = 0) and enabled.
- */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
- SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, 0),
- SUN8I_MIXER_BLEND_COLOR_BLACK);
-
- plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
- for (i = 0; i < plane_cnt; i++)
- regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_MODE(base, i),
- SUN8I_MIXER_BLEND_MODE_DEF);
-
- regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
- SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
+ sun8i_mixer_init(mixer);
return 0;
@@ -628,6 +693,7 @@ static void sun8i_mixer_remove(struct platform_device *pdev)
static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.scaler_mask = 0xf,
.scanline_yuv = 2048,
.ui_num = 3,
@@ -636,6 +702,7 @@ static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
.ui_num = 1,
@@ -644,6 +711,7 @@ static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = {
static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 432000000,
.scaler_mask = 0xf,
.scanline_yuv = 2048,
@@ -653,6 +721,7 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0xf,
.scanline_yuv = 2048,
@@ -662,6 +731,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
@@ -670,6 +740,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
};
static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
+ .de_type = SUN8I_MIXER_DE2,
.vi_num = 2,
.ui_num = 1,
.scaler_mask = 0x3,
@@ -680,6 +751,7 @@ static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
static const struct sun8i_mixer_cfg sun20i_d1_mixer0_cfg = {
.ccsc = CCSC_D1_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
@@ -689,6 +761,7 @@ static const struct sun8i_mixer_cfg sun20i_d1_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun20i_d1_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x1,
.scanline_yuv = 1024,
@@ -698,6 +771,7 @@ static const struct sun8i_mixer_cfg sun20i_d1_mixer1_cfg = {
static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0xf,
.scanline_yuv = 4096,
@@ -707,6 +781,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
@@ -716,7 +791,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
- .is_de3 = true,
+ .de_type = SUN8I_MIXER_DE3,
.mod_rate = 600000000,
.scaler_mask = 0xf,
.scanline_yuv = 4096,
@@ -724,6 +799,17 @@ static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
.vi_num = 1,
};
+static const struct sun8i_mixer_cfg sun50i_h616_mixer0_cfg = {
+ .ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE33,
+ .mod_rate = 600000000,
+ .scaler_mask = 0xf,
+ .scanline_yuv = 4096,
+ .ui_num = 3,
+ .vi_num = 1,
+ .map = {0, 6, 7, 8},
+};
+
static const struct of_device_id sun8i_mixer_of_table[] = {
{
.compatible = "allwinner,sun8i-a83t-de2-mixer-0",
@@ -769,6 +855,10 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
.compatible = "allwinner,sun50i-h6-de3-mixer-0",
.data = &sun50i_h6_mixer0_cfg,
},
+ {
+ .compatible = "allwinner,sun50i-h616-de33-mixer-0",
+ .data = &sun50i_h616_mixer0_cfg,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sun8i_mixer_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index d7898c9c9cc0..a1c1cbccc654 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -21,6 +21,9 @@
#define SUN8I_MIXER_GLOBAL_DBUFF 0x8
#define SUN8I_MIXER_GLOBAL_SIZE 0xc
+#define SUN50I_MIXER_GLOBAL_SIZE 0x8
+#define SUN50I_MIXER_GLOBAL_CLK 0xc
+
#define SUN8I_MIXER_GLOBAL_CTL_RT_EN BIT(0)
#define SUN8I_MIXER_GLOBAL_DBUFF_ENABLE BIT(0)
@@ -151,6 +154,12 @@ enum {
CCSC_D1_MIXER0_LAYOUT,
};
+enum sun8i_mixer_type {
+ SUN8I_MIXER_DE2,
+ SUN8I_MIXER_DE3,
+ SUN8I_MIXER_DE33,
+};
+
/**
* struct sun8i_mixer_cfg - mixer HW configuration
* @vi_num: number of VI channels
@@ -162,8 +171,9 @@ enum {
* @ccsc: select set of CCSC base addresses from the enumeration above.
* @mod_rate: module clock rate that needs to be set in order to have
* a functional block.
- * @is_de3: true, if this is next gen display engine 3.0, false otherwise.
+ * @de_type: sun8i_mixer_type enum representing the display engine generation.
* @scaline_yuv: size of a scanline for VI scaler for YUV formats.
+ * @map: channel map for DE variants processing YUV separately (DE33)
*/
struct sun8i_mixer_cfg {
int vi_num;
@@ -171,8 +181,9 @@ struct sun8i_mixer_cfg {
int scaler_mask;
int ccsc;
unsigned long mod_rate;
- unsigned int is_de3 : 1;
+ unsigned int de_type;
unsigned int scanline_yuv;
+ unsigned int map[6];
};
struct sun8i_mixer {
@@ -184,6 +195,9 @@ struct sun8i_mixer {
struct clk *bus_clk;
struct clk *mod_clk;
+
+ struct regmap *top_regs;
+ struct regmap *disp_regs;
};
enum {
@@ -214,13 +228,22 @@ engine_to_sun8i_mixer(struct sunxi_engine *engine)
static inline u32
sun8i_blender_base(struct sun8i_mixer *mixer)
{
- return mixer->cfg->is_de3 ? DE3_BLD_BASE : DE2_BLD_BASE;
+ return mixer->cfg->de_type == SUN8I_MIXER_DE3 ? DE3_BLD_BASE : DE2_BLD_BASE;
+}
+
+static inline struct regmap *
+sun8i_blender_regmap(struct sun8i_mixer *mixer)
+{
+ return mixer->cfg->de_type == SUN8I_MIXER_DE33 ?
+ mixer->disp_regs : mixer->engine.regs;
}
static inline u32
sun8i_channel_base(struct sun8i_mixer *mixer, int channel)
{
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ return mixer->cfg->map[channel] * 0x20000 + DE2_CH_SIZE;
+ else if (mixer->cfg->de_type == SUN8I_MIXER_DE3)
return DE3_CH_BASE + channel * DE3_CH_SIZE;
else
return DE2_CH_BASE + channel * DE2_CH_SIZE;
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index b90e5edef4e8..f97be0040aab 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -23,6 +23,7 @@
#include "sun8i_mixer.h"
#include "sun8i_ui_layer.h"
#include "sun8i_ui_scaler.h"
+#include "sun8i_vi_scaler.h"
static void sun8i_ui_layer_update_alpha(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
@@ -51,6 +52,7 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
{
struct drm_plane_state *state = plane->state;
u32 src_w, src_h, dst_w, dst_h;
+ struct regmap *bld_regs;
u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
@@ -59,6 +61,7 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
channel, overlay);
bld_base = sun8i_blender_base(mixer);
+ bld_regs = sun8i_blender_regmap(mixer);
ch_base = sun8i_channel_base(mixer, channel);
src_w = drm_rect_width(&state->src) >> 16;
@@ -91,22 +94,34 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
hscale = state->src_w / state->crtc_w;
vscale = state->src_h / state->crtc_h;
- sun8i_ui_scaler_setup(mixer, channel, src_w, src_h, dst_w,
- dst_h, hscale, vscale, hphase, vphase);
- sun8i_ui_scaler_enable(mixer, channel, true);
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33) {
+ sun8i_vi_scaler_setup(mixer, channel, src_w, src_h,
+ dst_w, dst_h, hscale, vscale,
+ hphase, vphase,
+ state->fb->format);
+ sun8i_vi_scaler_enable(mixer, channel, true);
+ } else {
+ sun8i_ui_scaler_setup(mixer, channel, src_w, src_h,
+ dst_w, dst_h, hscale, vscale,
+ hphase, vphase);
+ sun8i_ui_scaler_enable(mixer, channel, true);
+ }
} else {
DRM_DEBUG_DRIVER("HW scaling is not needed\n");
- sun8i_ui_scaler_enable(mixer, channel, false);
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ sun8i_vi_scaler_enable(mixer, channel, false);
+ else
+ sun8i_ui_scaler_enable(mixer, channel, false);
}
/* Set base coordinates */
DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
outsize);
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
index ae0806bccac7..8b7a58e27517 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
@@ -93,7 +93,7 @@ static u32 sun8i_ui_scaler_base(struct sun8i_mixer *mixer, int channel)
{
int vi_num = mixer->cfg->vi_num;
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3)
return DE3_VI_SCALER_UNIT_BASE +
DE3_VI_SCALER_UNIT_SIZE * vi_num +
DE3_UI_SCALER_UNIT_SIZE * (channel - vi_num);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 9c09d9c08496..a09ee4097537 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -25,7 +25,7 @@ static void sun8i_vi_layer_update_alpha(struct sun8i_mixer *mixer, int channel,
ch_base = sun8i_channel_base(mixer, channel);
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
mask = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK |
SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_MASK;
val = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA
@@ -55,6 +55,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
struct drm_plane_state *state = plane->state;
const struct drm_format_info *format = state->fb->format;
u32 src_w, src_h, dst_w, dst_h;
+ struct regmap *bld_regs;
u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
@@ -66,6 +67,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
channel, overlay);
bld_base = sun8i_blender_base(mixer);
+ bld_regs = sun8i_blender_regmap(mixer);
ch_base = sun8i_channel_base(mixer, channel);
src_w = drm_rect_width(&state->src) >> 16;
@@ -183,10 +185,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
outsize);
@@ -483,7 +485,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
if (!layer)
return ERR_PTR(-ENOMEM);
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
formats = sun8i_vi_layer_de3_formats;
format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats);
} else {
@@ -507,7 +509,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
- if (mixer->cfg->vi_num == 1 || mixer->cfg->is_de3) {
+ if (mixer->cfg->vi_num == 1 || mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
ret = drm_plane_create_alpha_property(&layer->plane);
if (ret) {
dev_err(drm->dev, "Couldn't add alpha property\n");
@@ -524,7 +526,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709);
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3)
supported_encodings |= BIT(DRM_COLOR_YCBCR_BT2020);
supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
index 7ba75011adf9..82df6244af88 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
@@ -835,7 +835,9 @@ static const u32 bicubic4coefftab32[480] = {
static u32 sun8i_vi_scaler_base(struct sun8i_mixer *mixer, int channel)
{
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ return sun8i_channel_base(mixer, channel) + 0x3000;
+ else if (mixer->cfg->de_type == SUN8I_MIXER_DE3)
return DE3_VI_SCALER_UNIT_BASE +
DE3_VI_SCALER_UNIT_SIZE * channel;
else
@@ -956,7 +958,7 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
cvphase = vphase;
}
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
u32 val;
if (format->hsub == 1 && format->vsub == 1)
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
index cb08a88242cc..1424b63dde99 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
@@ -93,6 +93,10 @@ static inline struct drm_sysfb_device *to_drm_sysfb_device(struct drm_device *de
* Plane
*/
+size_t drm_sysfb_build_fourcc_list(struct drm_device *dev,
+ const u32 *native_fourccs, size_t native_nfourccs,
+ u32 *fourccs_out, size_t nfourccs_out);
+
int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state);
void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
index ffaa2522ab96..1bcdb5ee8f09 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
@@ -47,6 +47,144 @@ EXPORT_SYMBOL(drm_sysfb_mode);
* Plane
*/
+static u32 to_nonalpha_fourcc(u32 fourcc)
+{
+ /* only handle formats with depth != 0 and alpha channel */
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB1555:
+ return DRM_FORMAT_XRGB1555;
+ case DRM_FORMAT_ABGR1555:
+ return DRM_FORMAT_XBGR1555;
+ case DRM_FORMAT_RGBA5551:
+ return DRM_FORMAT_RGBX5551;
+ case DRM_FORMAT_BGRA5551:
+ return DRM_FORMAT_BGRX5551;
+ case DRM_FORMAT_ARGB8888:
+ return DRM_FORMAT_XRGB8888;
+ case DRM_FORMAT_ABGR8888:
+ return DRM_FORMAT_XBGR8888;
+ case DRM_FORMAT_RGBA8888:
+ return DRM_FORMAT_RGBX8888;
+ case DRM_FORMAT_BGRA8888:
+ return DRM_FORMAT_BGRX8888;
+ case DRM_FORMAT_ARGB2101010:
+ return DRM_FORMAT_XRGB2101010;
+ case DRM_FORMAT_ABGR2101010:
+ return DRM_FORMAT_XBGR2101010;
+ case DRM_FORMAT_RGBA1010102:
+ return DRM_FORMAT_RGBX1010102;
+ case DRM_FORMAT_BGRA1010102:
+ return DRM_FORMAT_BGRX1010102;
+ }
+
+ return fourcc;
+}
+
+static bool is_listed_fourcc(const u32 *fourccs, size_t nfourccs, u32 fourcc)
+{
+ const u32 *fourccs_end = fourccs + nfourccs;
+
+ while (fourccs < fourccs_end) {
+ if (*fourccs == fourcc)
+ return true;
+ ++fourccs;
+ }
+ return false;
+}
+
+/**
+ * drm_sysfb_build_fourcc_list - Filters a list of supported color formats against
+ * the device's native formats
+ * @dev: DRM device
+ * @native_fourccs: 4CC codes of natively supported color formats
+ * @native_nfourccs: The number of entries in @native_fourccs
+ * @fourccs_out: Returns 4CC codes of supported color formats
+ * @nfourccs_out: The number of available entries in @fourccs_out
+ *
+ * This function create a list of supported color format from natively
+ * supported formats and additional emulated formats.
+ * At a minimum, most userspace programs expect at least support for
+ * XRGB8888 on the primary plane. Sysfb devices that have to emulate
+ * the format should use drm_sysfb_build_fourcc_list() to create a list
+ * of supported color formats. The returned list can be handed over to
+ * drm_universal_plane_init() et al. Native formats will go before
+ * emulated formats. Native formats with alpha channel will be replaced
+ * by equal formats without alpha channel, as primary planes usually
+ * don't support alpha. Other heuristics might be applied to optimize
+ * the sorting order. Formats near the beginning of the list are usually
+ * preferred over formats near the end of the list.
+ *
+ * Returns:
+ * The number of color-formats 4CC codes returned in @fourccs_out.
+ */
+size_t drm_sysfb_build_fourcc_list(struct drm_device *dev,
+ const u32 *native_fourccs, size_t native_nfourccs,
+ u32 *fourccs_out, size_t nfourccs_out)
+{
+ /*
+ * XRGB8888 is the default fallback format for most of userspace
+ * and it's currently the only format that should be emulated for
+ * the primary plane. Only if there's ever another default fallback,
+ * it should be added here.
+ */
+ static const u32 extra_fourccs[] = {
+ DRM_FORMAT_XRGB8888,
+ };
+ static const size_t extra_nfourccs = ARRAY_SIZE(extra_fourccs);
+
+ u32 *fourccs = fourccs_out;
+ const u32 *fourccs_end = fourccs_out + nfourccs_out;
+ size_t i;
+
+ /*
+ * The device's native formats go first.
+ */
+
+ for (i = 0; i < native_nfourccs; ++i) {
+ /*
+ * Several DTs, boot loaders and firmware report native
+ * alpha formats that are non-alpha formats instead. So
+ * replace alpha formats by non-alpha formats.
+ */
+ u32 fourcc = to_nonalpha_fourcc(native_fourccs[i]);
+
+ if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
+ continue; /* skip duplicate entries */
+ } else if (fourccs == fourccs_end) {
+ drm_warn(dev, "Ignoring native format %p4cc\n", &fourcc);
+ continue; /* end of available output buffer */
+ }
+
+ drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc);
+
+ *fourccs = fourcc;
+ ++fourccs;
+ }
+
+ /*
+ * The extra formats, emulated by the driver, go second.
+ */
+
+ for (i = 0; (i < extra_nfourccs) && (fourccs < fourccs_end); ++i) {
+ u32 fourcc = extra_fourccs[i];
+
+ if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
+ continue; /* skip duplicate and native entries */
+ } else if (fourccs == fourccs_end) {
+ drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc);
+ continue; /* end of available output buffer */
+ }
+
+ drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc);
+
+ *fourccs = fourcc;
+ ++fourccs;
+ }
+
+ return fourccs - fourccs_out;
+}
+EXPORT_SYMBOL(drm_sysfb_build_fourcc_list);
+
int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state)
{
diff --git a/drivers/gpu/drm/sysfb/efidrm.c b/drivers/gpu/drm/sysfb/efidrm.c
index 46912924636a..1883c4a8604c 100644
--- a/drivers/gpu/drm/sysfb/efidrm.c
+++ b/drivers/gpu/drm/sysfb/efidrm.c
@@ -202,7 +202,7 @@ static struct efidrm_device *efidrm_device_create(struct drm_driver *drv,
drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d bytes\n",
&format->format, width, height, stride);
-#ifdef CONFIG_X86
+#if defined(CONFIG_FIRMWARE_EDID)
if (drm_edid_header_is_valid(edid_info.dummy) == 8)
sysfb->edid = edid_info.dummy;
#endif
@@ -271,8 +271,8 @@ static struct efidrm_device *efidrm_device_create(struct drm_driver *drv,
/* Primary plane */
- nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- efi->formats, ARRAY_SIZE(efi->formats));
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ efi->formats, ARRAY_SIZE(efi->formats));
primary_plane = &efi->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &efidrm_primary_plane_funcs,
diff --git a/drivers/gpu/drm/sysfb/ofdrm.c b/drivers/gpu/drm/sysfb/ofdrm.c
index fddfe8bea9f7..8d8ab39c5f36 100644
--- a/drivers/gpu/drm/sysfb/ofdrm.c
+++ b/drivers/gpu/drm/sysfb/ofdrm.c
@@ -8,13 +8,13 @@
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fbdev_shmem.h>
-#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -644,36 +644,36 @@ static void ofdrm_qemu_cmap_write(struct ofdrm_device *odev, unsigned char index
writeb(b, data);
}
-static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev,
- const struct drm_format_info *format)
+static void ofdrm_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ struct drm_device *dev = crtc->dev;
+ struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
+ u8 i8 = index & 0xff;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ odev->funcs->cmap_write(odev, i8, r8, g8, b8);
+}
+
+static void ofdrm_device_fill_gamma(struct ofdrm_device *odev,
+ const struct drm_format_info *format)
{
struct drm_device *dev = &odev->sysfb.dev;
- int i;
+ struct drm_crtc *crtc = &odev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
- /* Use better interpolation, to take 32 values from 0 to 255 */
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE / 8; i++) {
- unsigned char r = i * 8 + i / 4;
- unsigned char g = i * 4 + i / 16;
- unsigned char b = i * 8 + i / 4;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = OFDRM_GAMMA_LUT_SIZE / 8; i < OFDRM_GAMMA_LUT_SIZE / 4; i++) {
- unsigned char r = 0;
- unsigned char g = i * 4 + i / 16;
- unsigned char b = 0;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
+ drm_crtc_fill_gamma_565(crtc, ofdrm_set_gamma_lut);
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE; i++)
- odev->funcs->cmap_write(odev, i, i, i, i);
+ drm_crtc_fill_gamma_888(crtc, ofdrm_set_gamma_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -682,42 +682,21 @@ static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev,
}
}
-static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+static void ofdrm_device_load_gamma(struct ofdrm_device *odev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
struct drm_device *dev = &odev->sysfb.dev;
- int i;
+ struct drm_crtc *crtc = &odev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
- /* Use better interpolation, to take 32 values from lut[0] to lut[255] */
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE / 8; i++) {
- unsigned char r = lut[i * 8 + i / 4].red >> 8;
- unsigned char g = lut[i * 4 + i / 16].green >> 8;
- unsigned char b = lut[i * 8 + i / 4].blue >> 8;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = OFDRM_GAMMA_LUT_SIZE / 8; i < OFDRM_GAMMA_LUT_SIZE / 4; i++) {
- unsigned char r = 0;
- unsigned char g = lut[i * 4 + i / 16].green >> 8;
- unsigned char b = 0;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
+ drm_crtc_load_gamma_565_from_888(crtc, lut, ofdrm_set_gamma_lut);
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE; i++) {
- unsigned char r = lut[i].red >> 8;
- unsigned char g = lut[i].green >> 8;
- unsigned char b = lut[i].blue >> 8;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
+ drm_crtc_load_gamma_888(crtc, lut, ofdrm_set_gamma_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -753,9 +732,9 @@ static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_ato
const struct drm_format_info *format = sysfb_crtc_state->format;
if (crtc_state->gamma_lut)
- ofdrm_device_set_gamma(odev, format, crtc_state->gamma_lut->data);
+ ofdrm_device_load_gamma(odev, format, crtc_state->gamma_lut->data);
else
- ofdrm_device_set_gamma_linear(odev, format);
+ ofdrm_device_fill_gamma(odev, format);
}
}
@@ -1035,8 +1014,8 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
/* Primary plane */
- nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- odev->formats, ARRAY_SIZE(odev->formats));
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ odev->formats, ARRAY_SIZE(odev->formats));
primary_plane = &odev->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &ofdrm_primary_plane_funcs,
diff --git a/drivers/gpu/drm/sysfb/simpledrm.c b/drivers/gpu/drm/sysfb/simpledrm.c
index a1c3119330de..8530a3ef8a7a 100644
--- a/drivers/gpu/drm/sysfb/simpledrm.c
+++ b/drivers/gpu/drm/sysfb/simpledrm.c
@@ -18,7 +18,6 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
-#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -765,8 +764,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
/* Primary plane */
- nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- sdev->formats, ARRAY_SIZE(sdev->formats));
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ sdev->formats, ARRAY_SIZE(sdev->formats));
primary_plane = &sdev->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &simpledrm_primary_plane_funcs,
diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c
index f7532db3831f..90615e9ac86b 100644
--- a/drivers/gpu/drm/sysfb/vesadrm.c
+++ b/drivers/gpu/drm/sysfb/vesadrm.c
@@ -9,6 +9,7 @@
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
@@ -87,15 +88,10 @@ static struct vesadrm_device *to_vesadrm_device(struct drm_device *dev)
static void vesadrm_vga_cmap_write(struct vesadrm_device *vesa, unsigned int index,
u16 red, u16 green, u16 blue)
{
- u8 i8, r8, g8, b8;
-
- if (index > 255)
- return;
-
- i8 = index;
- r8 = red >> 8;
- g8 = green >> 8;
- b8 = blue >> 8;
+ u8 i8 = index;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
outb_p(i8, VGA_PEL_IW);
outb_p(r8, VGA_PEL_D);
@@ -120,9 +116,6 @@ static void vesadrm_pmi_cmap_write(struct vesadrm_device *vesa, unsigned int ind
0x00,
};
- if (index > 255)
- return;
-
__asm__ __volatile__ (
"call *(%%esi)"
: /* no return value */
@@ -135,43 +128,36 @@ static void vesadrm_pmi_cmap_write(struct vesadrm_device *vesa, unsigned int ind
}
#endif
-static void vesadrm_set_gamma_linear(struct vesadrm_device *vesa,
- const struct drm_format_info *format)
+static void vesadrm_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vesadrm_device *vesa = to_vesadrm_device(dev);
+ u8 i8 = index & 0xff;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ vesa->cmap_write(vesa, i8, red, green, blue);
+}
+
+static void vesadrm_fill_gamma_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format)
{
struct drm_device *dev = &vesa->sysfb.dev;
- size_t i;
- u16 r16, g16, b16;
+ struct drm_crtc *crtc = &vesa->crtc;
switch (format->format) {
case DRM_FORMAT_XRGB1555:
- for (i = 0; i < 32; ++i) {
- r16 = i * 8 + i / 4;
- r16 |= (r16 << 8) | r16;
- vesa->cmap_write(vesa, i, r16, r16, r16);
- }
+ drm_crtc_fill_gamma_555(crtc, vesadrm_set_gamma_lut);
break;
case DRM_FORMAT_RGB565:
- for (i = 0; i < 32; ++i) {
- r16 = i * 8 + i / 4;
- r16 |= (r16 << 8) | r16;
- g16 = i * 4 + i / 16;
- g16 |= (g16 << 8) | g16;
- b16 = r16;
- vesa->cmap_write(vesa, i, r16, g16, b16);
- }
- for (i = 32; i < 64; ++i) {
- g16 = i * 4 + i / 16;
- g16 |= (g16 << 8) | g16;
- vesa->cmap_write(vesa, i, 0, g16, 0);
- }
+ drm_crtc_fill_gamma_565(crtc, vesadrm_set_gamma_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- for (i = 0; i < 256; ++i) {
- r16 = (i << 8) | i;
- vesa->cmap_write(vesa, i, r16, r16, r16);
- }
+ drm_crtc_fill_gamma_888(crtc, vesadrm_set_gamma_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -180,38 +166,24 @@ static void vesadrm_set_gamma_linear(struct vesadrm_device *vesa,
}
}
-static void vesadrm_set_gamma_lut(struct vesadrm_device *vesa,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+static void vesadrm_load_gamma_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
struct drm_device *dev = &vesa->sysfb.dev;
- size_t i;
- u16 r16, g16, b16;
+ struct drm_crtc *crtc = &vesa->crtc;
switch (format->format) {
case DRM_FORMAT_XRGB1555:
- for (i = 0; i < 32; ++i) {
- r16 = lut[i * 8 + i / 4].red;
- g16 = lut[i * 8 + i / 4].green;
- b16 = lut[i * 8 + i / 4].blue;
- vesa->cmap_write(vesa, i, r16, g16, b16);
- }
+ drm_crtc_load_gamma_555_from_888(crtc, lut, vesadrm_set_gamma_lut);
break;
case DRM_FORMAT_RGB565:
- for (i = 0; i < 32; ++i) {
- r16 = lut[i * 8 + i / 4].red;
- g16 = lut[i * 4 + i / 16].green;
- b16 = lut[i * 8 + i / 4].blue;
- vesa->cmap_write(vesa, i, r16, g16, b16);
- }
- for (i = 32; i < 64; ++i)
- vesa->cmap_write(vesa, i, 0, lut[i * 4 + i / 16].green, 0);
+ drm_crtc_load_gamma_565_from_888(crtc, lut, vesadrm_set_gamma_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- for (i = 0; i < 256; ++i)
- vesa->cmap_write(vesa, i, lut[i].red, lut[i].green, lut[i].blue);
+ drm_crtc_load_gamma_888(crtc, lut, vesadrm_set_gamma_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -253,13 +225,13 @@ static void vesadrm_crtc_helper_atomic_flush(struct drm_crtc *crtc,
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
if (sysfb_crtc_state->format == sysfb->fb_format) {
if (crtc_state->gamma_lut)
- vesadrm_set_gamma_lut(vesa,
- sysfb_crtc_state->format,
- crtc_state->gamma_lut->data);
+ vesadrm_load_gamma_lut(vesa,
+ sysfb_crtc_state->format,
+ crtc_state->gamma_lut->data);
else
- vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format);
+ vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
} else {
- vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format);
+ vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
}
}
}
@@ -377,7 +349,7 @@ static struct vesadrm_device *vesadrm_device_create(struct drm_driver *drv,
drm_warn(dev, "hardware palette is unchangeable, colors may be incorrect\n");
}
-#ifdef CONFIG_X86
+#if defined(CONFIG_FIRMWARE_EDID)
if (drm_edid_header_is_valid(edid_info.dummy) == 8)
sysfb->edid = edid_info.dummy;
#endif
@@ -435,8 +407,8 @@ static struct vesadrm_device *vesadrm_device_create(struct drm_driver *drv,
/* Primary plane */
- nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- vesa->formats, ARRAY_SIZE(vesa->formats));
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ vesa->formats, ARRAY_SIZE(vesa->formats));
primary_plane = &vesa->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &vesadrm_primary_plane_funcs,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 0b65e69f3a8a..1dd3670f37db 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -185,11 +185,13 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
struct tegra_bo_tiling *tiling);
struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct tegra_bo **planes,
unsigned int num_planes);
struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *cmd);
#ifdef CONFIG_DRM_FBDEV_EMULATION
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 46170753699d..dd041089f797 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -102,6 +102,7 @@ static const struct drm_framebuffer_funcs tegra_fb_funcs = {
};
struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct tegra_bo **planes,
unsigned int num_planes)
@@ -114,7 +115,7 @@ struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
if (!fb)
return ERR_PTR(-ENOMEM);
- drm_helper_mode_fill_fb_struct(drm, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(drm, fb, info, mode_cmd);
for (i = 0; i < fb->format->num_planes; i++)
fb->obj[i] = &planes[i]->gem;
@@ -132,9 +133,9 @@ struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *cmd)
{
- const struct drm_format_info *info = drm_get_format_info(drm, cmd);
struct tegra_bo *planes[4];
struct drm_gem_object *gem;
struct drm_framebuffer *fb;
@@ -166,7 +167,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
planes[i] = to_tegra_bo(gem);
}
- fb = tegra_fb_alloc(drm, cmd, planes, i);
+ fb = tegra_fb_alloc(drm, info, cmd, planes, i);
if (IS_ERR(fb)) {
err = PTR_ERR(fb);
goto unreference;
diff --git a/drivers/gpu/drm/tegra/fbdev.c b/drivers/gpu/drm/tegra/fbdev.c
index cd9d798f8870..1b70f5e164af 100644
--- a/drivers/gpu/drm/tegra/fbdev.c
+++ b/drivers/gpu/drm/tegra/fbdev.c
@@ -106,7 +106,9 @@ int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
return PTR_ERR(info);
}
- fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
+ fb = tegra_fb_alloc(drm,
+ drm_get_format_info(drm, cmd.pixel_format, cmd.modifier[0]),
+ &cmd, &bo, 1);
if (IS_ERR(fb)) {
err = PTR_ERR(fb);
dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n",
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index dbc1394f96b8..41a285ec889f 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -523,10 +523,10 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
if (tegra->domain) {
tegra_bo_iommu_unmap(tegra, bo);
- if (gem->import_attach) {
+ if (drm_gem_is_imported(gem)) {
dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
DMA_TO_DEVICE);
- dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach);
+ dma_buf_detach(gem->dma_buf, gem->import_attach);
}
}
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 3afd6587df08..c0e952293ad0 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_modes_test.o \
drm_plane_helper_test.o \
drm_probe_helper_test.o \
- drm_rect_test.o
+ drm_rect_test.o \
+ drm_sysfb_modeset_test.o
CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/gpu/drm/tests/drm_bridge_test.c b/drivers/gpu/drm/tests/drm_bridge_test.c
index ff88ec2e911c..887020141c7f 100644
--- a/drivers/gpu/drm/tests/drm_bridge_test.c
+++ b/drivers/gpu/drm/tests/drm_bridge_test.c
@@ -8,36 +8,64 @@
#include <drm/drm_bridge_helper.h>
#include <drm/drm_kunit_helpers.h>
+#include <kunit/device.h>
#include <kunit/test.h>
+/*
+ * Mimick the typical "private" struct defined by a bridge driver, which
+ * embeds a bridge plus other fields.
+ *
+ * Having at least one member before @bridge ensures we test non-zero
+ * @bridge offset.
+ */
+struct drm_bridge_priv {
+ unsigned int enable_count;
+ unsigned int disable_count;
+ struct drm_bridge bridge;
+ void *data;
+};
+
struct drm_bridge_init_priv {
struct drm_device drm;
+ /** @dev: device, only for tests not needing a whole drm_device */
+ struct device *dev;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_encoder encoder;
- struct drm_bridge bridge;
+ struct drm_bridge_priv *test_bridge;
struct drm_connector *connector;
- unsigned int enable_count;
- unsigned int disable_count;
+ bool destroyed;
};
+static struct drm_bridge_priv *bridge_to_priv(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct drm_bridge_priv, bridge);
+}
+
+static void drm_test_bridge_priv_destroy(struct drm_bridge *bridge)
+{
+ struct drm_bridge_priv *bridge_priv = bridge_to_priv(bridge);
+ struct drm_bridge_init_priv *priv = (struct drm_bridge_init_priv *)bridge_priv->data;
+
+ priv->destroyed = true;
+}
+
static void drm_test_bridge_enable(struct drm_bridge *bridge)
{
- struct drm_bridge_init_priv *priv =
- container_of(bridge, struct drm_bridge_init_priv, bridge);
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
priv->enable_count++;
}
static void drm_test_bridge_disable(struct drm_bridge *bridge)
{
- struct drm_bridge_init_priv *priv =
- container_of(bridge, struct drm_bridge_init_priv, bridge);
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
priv->disable_count++;
}
static const struct drm_bridge_funcs drm_test_bridge_legacy_funcs = {
+ .destroy = drm_test_bridge_priv_destroy,
.enable = drm_test_bridge_enable,
.disable = drm_test_bridge_disable,
};
@@ -45,8 +73,7 @@ static const struct drm_bridge_funcs drm_test_bridge_legacy_funcs = {
static void drm_test_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
- struct drm_bridge_init_priv *priv =
- container_of(bridge, struct drm_bridge_init_priv, bridge);
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
priv->enable_count++;
}
@@ -54,13 +81,13 @@ static void drm_test_bridge_atomic_enable(struct drm_bridge *bridge,
static void drm_test_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
- struct drm_bridge_init_priv *priv =
- container_of(bridge, struct drm_bridge_init_priv, bridge);
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
priv->disable_count++;
}
static const struct drm_bridge_funcs drm_test_bridge_atomic_funcs = {
+ .destroy = drm_test_bridge_priv_destroy,
.atomic_enable = drm_test_bridge_atomic_enable,
.atomic_disable = drm_test_bridge_atomic_disable,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
@@ -102,6 +129,12 @@ drm_test_bridge_init(struct kunit *test, const struct drm_bridge_funcs *funcs)
if (IS_ERR(priv))
return ERR_CAST(priv);
+ priv->test_bridge = devm_drm_bridge_alloc(dev, struct drm_bridge_priv, bridge, funcs);
+ if (IS_ERR(priv->test_bridge))
+ return ERR_CAST(priv->test_bridge);
+
+ priv->test_bridge->data = priv;
+
drm = &priv->drm;
priv->plane = drm_kunit_helper_create_primary_plane(test, drm,
NULL,
@@ -125,9 +158,8 @@ drm_test_bridge_init(struct kunit *test, const struct drm_bridge_funcs *funcs)
enc->possible_crtcs = drm_crtc_mask(priv->crtc);
- bridge = &priv->bridge;
+ bridge = &priv->test_bridge->bridge;
bridge->type = DRM_MODE_CONNECTOR_VIRTUAL;
- bridge->funcs = funcs;
ret = drm_kunit_bridge_add(test, bridge);
if (ret)
@@ -173,7 +205,7 @@ static void drm_test_drm_bridge_get_current_state_atomic(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
retry_commit:
- bridge = &priv->bridge;
+ bridge = &priv->test_bridge->bridge;
bridge_state = drm_atomic_get_bridge_state(state, bridge);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bridge_state);
@@ -228,7 +260,7 @@ static void drm_test_drm_bridge_get_current_state_legacy(struct kunit *test)
* locking. The function would return NULL in all cases anyway,
* so we don't really have any concurrency to worry about.
*/
- bridge = &priv->bridge;
+ bridge = &priv->test_bridge->bridge;
KUNIT_EXPECT_NULL(test, drm_bridge_get_current_state(bridge));
}
@@ -253,7 +285,7 @@ static void drm_test_drm_bridge_helper_reset_crtc_atomic(struct kunit *test)
struct drm_modeset_acquire_ctx ctx;
struct drm_bridge_init_priv *priv;
struct drm_display_mode *mode;
- struct drm_bridge *bridge;
+ struct drm_bridge_priv *bridge_priv;
int ret;
priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
@@ -279,14 +311,14 @@ retry_commit:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- bridge = &priv->bridge;
- KUNIT_ASSERT_EQ(test, priv->enable_count, 1);
- KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+ bridge_priv = priv->test_bridge;
+ KUNIT_ASSERT_EQ(test, bridge_priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, bridge_priv->disable_count, 0);
drm_modeset_acquire_init(&ctx, 0);
retry_reset:
- ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ ret = drm_bridge_helper_reset_crtc(&bridge_priv->bridge, &ctx);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry_reset;
@@ -296,8 +328,8 @@ retry_reset:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- KUNIT_EXPECT_EQ(test, priv->enable_count, 2);
- KUNIT_EXPECT_EQ(test, priv->disable_count, 1);
+ KUNIT_EXPECT_EQ(test, bridge_priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, bridge_priv->disable_count, 1);
}
/*
@@ -309,7 +341,7 @@ static void drm_test_drm_bridge_helper_reset_crtc_atomic_disabled(struct kunit *
struct drm_modeset_acquire_ctx ctx;
struct drm_bridge_init_priv *priv;
struct drm_display_mode *mode;
- struct drm_bridge *bridge;
+ struct drm_bridge_priv *bridge_priv;
int ret;
priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
@@ -318,14 +350,14 @@ static void drm_test_drm_bridge_helper_reset_crtc_atomic_disabled(struct kunit *
mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
- bridge = &priv->bridge;
- KUNIT_ASSERT_EQ(test, priv->enable_count, 0);
- KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+ bridge_priv = priv->test_bridge;
+ KUNIT_ASSERT_EQ(test, bridge_priv->enable_count, 0);
+ KUNIT_ASSERT_EQ(test, bridge_priv->disable_count, 0);
drm_modeset_acquire_init(&ctx, 0);
retry_reset:
- ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ ret = drm_bridge_helper_reset_crtc(&bridge_priv->bridge, &ctx);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry_reset;
@@ -335,8 +367,8 @@ retry_reset:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- KUNIT_EXPECT_EQ(test, priv->enable_count, 0);
- KUNIT_EXPECT_EQ(test, priv->disable_count, 0);
+ KUNIT_EXPECT_EQ(test, bridge_priv->enable_count, 0);
+ KUNIT_EXPECT_EQ(test, bridge_priv->disable_count, 0);
}
/*
@@ -348,7 +380,7 @@ static void drm_test_drm_bridge_helper_reset_crtc_legacy(struct kunit *test)
struct drm_modeset_acquire_ctx ctx;
struct drm_bridge_init_priv *priv;
struct drm_display_mode *mode;
- struct drm_bridge *bridge;
+ struct drm_bridge_priv *bridge_priv;
int ret;
priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs);
@@ -374,14 +406,14 @@ retry_commit:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- bridge = &priv->bridge;
- KUNIT_ASSERT_EQ(test, priv->enable_count, 1);
- KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+ bridge_priv = priv->test_bridge;
+ KUNIT_ASSERT_EQ(test, bridge_priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, bridge_priv->disable_count, 0);
drm_modeset_acquire_init(&ctx, 0);
retry_reset:
- ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ ret = drm_bridge_helper_reset_crtc(&bridge_priv->bridge, &ctx);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry_reset;
@@ -391,8 +423,8 @@ retry_reset:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- KUNIT_EXPECT_EQ(test, priv->enable_count, 2);
- KUNIT_EXPECT_EQ(test, priv->disable_count, 1);
+ KUNIT_EXPECT_EQ(test, bridge_priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, bridge_priv->disable_count, 1);
}
static struct kunit_case drm_bridge_helper_reset_crtc_tests[] = {
@@ -407,11 +439,83 @@ static struct kunit_suite drm_bridge_helper_reset_crtc_test_suite = {
.test_cases = drm_bridge_helper_reset_crtc_tests,
};
+static int drm_test_bridge_alloc_init(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ priv->dev = kunit_device_register(test, "drm-bridge-dev");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
+
+ test->priv = priv;
+
+ priv->test_bridge = devm_drm_bridge_alloc(priv->dev, struct drm_bridge_priv, bridge,
+ &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->test_bridge);
+
+ priv->test_bridge->data = priv;
+
+ KUNIT_ASSERT_FALSE(test, priv->destroyed);
+
+ return 0;
+}
+
+/*
+ * Test that a bridge is freed when the device is destroyed in lack of
+ * other drm_bridge_get/put() operations.
+ */
+static void drm_test_drm_bridge_alloc_basic(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv = test->priv;
+
+ KUNIT_ASSERT_FALSE(test, priv->destroyed);
+
+ kunit_device_unregister(test, priv->dev);
+ KUNIT_EXPECT_TRUE(test, priv->destroyed);
+}
+
+/*
+ * Test that a bridge is not freed when the device is destroyed when there
+ * is still a reference to it, and freed when that reference is put.
+ */
+static void drm_test_drm_bridge_alloc_get_put(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv = test->priv;
+
+ KUNIT_ASSERT_FALSE(test, priv->destroyed);
+
+ drm_bridge_get(&priv->test_bridge->bridge);
+ KUNIT_EXPECT_FALSE(test, priv->destroyed);
+
+ kunit_device_unregister(test, priv->dev);
+ KUNIT_EXPECT_FALSE(test, priv->destroyed);
+
+ drm_bridge_put(&priv->test_bridge->bridge);
+ KUNIT_EXPECT_TRUE(test, priv->destroyed);
+}
+
+static struct kunit_case drm_bridge_alloc_tests[] = {
+ KUNIT_CASE(drm_test_drm_bridge_alloc_basic),
+ KUNIT_CASE(drm_test_drm_bridge_alloc_get_put),
+ { }
+};
+
+static struct kunit_suite drm_bridge_alloc_test_suite = {
+ .name = "drm_bridge_alloc",
+ .init = drm_test_bridge_alloc_init,
+ .test_cases = drm_bridge_alloc_tests,
+};
+
kunit_test_suites(
&drm_bridge_get_current_state_test_suite,
&drm_bridge_helper_reset_crtc_test_suite,
+ &drm_bridge_alloc_test_suite,
);
MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
+
MODULE_DESCRIPTION("Kunit test for drm_bridge functions");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index 35cd3405d045..7299fa8971ce 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -279,9 +279,9 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.dst_pitch = TEST_USE_DEFAULT_PITCH,
.expected = {
0xFF, 0x00,
- 0x4C, 0x99,
- 0x19, 0x66,
- 0xE5, 0xB2,
+ 0x4C, 0x95,
+ 0x1C, 0x69,
+ 0xE2, 0xB2,
},
},
.rgb332_result = {
@@ -430,9 +430,9 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.gray8_result = {
.dst_pitch = 5,
.expected = {
- 0x3C, 0x33, 0xC4, 0x00, 0x00,
- 0xBB, 0x3C, 0x33, 0x00, 0x00,
- 0x34, 0xBB, 0x3C, 0x00, 0x00,
+ 0x3D, 0x32, 0xC1, 0x00, 0x00,
+ 0xBA, 0x3D, 0x32, 0x00, 0x00,
+ 0x34, 0xBA, 0x3D, 0x00, 0x00,
},
},
.rgb332_result = {
@@ -735,27 +735,22 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
NULL : &result->dst_pitch;
drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, &params->clip,
- &fmtcnv_state, false);
+ &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr; /* restore original value of buf */
- drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip,
- &fmtcnv_state, true);
+ drm_fb_xrgb8888_to_rgb565be(&dst, &result->dst_pitch, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected_swab, dst_size);
buf = dst.vaddr;
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB565, &src, &fb, &params->clip,
+ drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, &params->clip,
&fmtcnv_state);
-
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -795,14 +790,8 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB1555, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_xrgb1555(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -842,14 +831,8 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB1555, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_argb1555(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -889,14 +872,8 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGBA5551, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_rgba5551(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -939,12 +916,7 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
- KUNIT_EXPECT_FALSE(test, blit_result);
+ drm_fb_xrgb8888_to_rgb888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -985,12 +957,8 @@ static void drm_test_fb_xrgb8888_to_bgr888(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, &result->dst_pitch, DRM_FORMAT_BGR888, &src, &fb, &params->clip,
+ drm_fb_xrgb8888_to_bgr888(&dst, &result->dst_pitch, &src, &fb, &params->clip,
&fmtcnv_state);
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1030,14 +998,8 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB8888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_argb8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1077,12 +1039,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB2101010, &src, &fb,
- &params->clip, &fmtcnv_state);
-
- KUNIT_EXPECT_FALSE(test, blit_result);
+ drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1122,14 +1079,8 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB2101010, &src, &fb,
- &params->clip, &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_argb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1202,23 +1153,15 @@ static void drm_test_fb_swab(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888 | DRM_FORMAT_BIG_ENDIAN,
- &src, &fb, &params->clip, &fmtcnv_state);
+ drm_fb_swab(&dst, dst_pitch, &src, &fb, &params->clip, false, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr;
memset(buf, 0, dst_size);
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_BGRX8888, &src, &fb, &params->clip,
- &fmtcnv_state);
+ drm_fb_xrgb8888_to_bgrx8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr;
@@ -1229,11 +1172,8 @@ static void drm_test_fb_swab(struct kunit *test)
mock_format.format |= DRM_FORMAT_BIG_ENDIAN;
fb.format = &mock_format;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888, &src, &fb, &params->clip,
- &fmtcnv_state);
+ drm_fb_swab(&dst, dst_pitch, &src, &fb, &params->clip, false, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1266,14 +1206,8 @@ static void drm_test_fb_xrgb8888_to_abgr8888(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ABGR8888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_abgr8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1306,14 +1240,8 @@ static void drm_test_fb_xrgb8888_to_xbgr8888(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XBGR8888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_xbgr8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1407,147 +1335,6 @@ static void drm_test_fb_clip_offset(struct kunit *test)
KUNIT_EXPECT_EQ(test, offset, params->expected_offset);
}
-struct fb_build_fourcc_list_case {
- const char *name;
- u32 native_fourccs[TEST_BUF_SIZE];
- size_t native_fourccs_size;
- u32 expected[TEST_BUF_SIZE];
- size_t expected_fourccs_size;
-};
-
-static struct fb_build_fourcc_list_case fb_build_fourcc_list_cases[] = {
- {
- .name = "no native formats",
- .native_fourccs = { },
- .native_fourccs_size = 0,
- .expected = { DRM_FORMAT_XRGB8888 },
- .expected_fourccs_size = 1,
- },
- {
- .name = "XRGB8888 as native format",
- .native_fourccs = { DRM_FORMAT_XRGB8888 },
- .native_fourccs_size = 1,
- .expected = { DRM_FORMAT_XRGB8888 },
- .expected_fourccs_size = 1,
- },
- {
- .name = "remove duplicates",
- .native_fourccs = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- },
- .native_fourccs_size = 11,
- .expected = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
- },
- .expected_fourccs_size = 3,
- },
- {
- .name = "convert alpha formats",
- .native_fourccs = {
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ABGR1555,
- DRM_FORMAT_RGBA5551,
- DRM_FORMAT_BGRA5551,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_RGBA8888,
- DRM_FORMAT_BGRA8888,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_RGBA1010102,
- DRM_FORMAT_BGRA1010102,
- },
- .native_fourccs_size = 12,
- .expected = {
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_XBGR1555,
- DRM_FORMAT_RGBX5551,
- DRM_FORMAT_BGRX5551,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_RGBX8888,
- DRM_FORMAT_BGRX8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_RGBX1010102,
- DRM_FORMAT_BGRX1010102,
- },
- .expected_fourccs_size = 12,
- },
- {
- .name = "random formats",
- .native_fourccs = {
- DRM_FORMAT_Y212,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ABGR16161616F,
- DRM_FORMAT_C8,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGBA5551,
- DRM_FORMAT_BGR565_A8,
- DRM_FORMAT_R10,
- DRM_FORMAT_XYUV8888,
- },
- .native_fourccs_size = 10,
- .expected = {
- DRM_FORMAT_Y212,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ABGR16161616F,
- DRM_FORMAT_C8,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_RGBX5551,
- DRM_FORMAT_BGR565_A8,
- DRM_FORMAT_R10,
- DRM_FORMAT_XYUV8888,
- DRM_FORMAT_XRGB8888,
- },
- .expected_fourccs_size = 10,
- },
-};
-
-static void fb_build_fourcc_list_case_desc(struct fb_build_fourcc_list_case *t, char *desc)
-{
- strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
-}
-
-KUNIT_ARRAY_PARAM(fb_build_fourcc_list, fb_build_fourcc_list_cases, fb_build_fourcc_list_case_desc);
-
-static void drm_test_fb_build_fourcc_list(struct kunit *test)
-{
- const struct fb_build_fourcc_list_case *params = test->param_value;
- u32 fourccs_out[TEST_BUF_SIZE] = {0};
- size_t nfourccs_out;
- struct drm_device *drm;
- struct device *dev;
-
- dev = drm_kunit_helper_alloc_device(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
-
- drm = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm), 0, DRIVER_MODESET);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm);
-
- nfourccs_out = drm_fb_build_fourcc_list(drm, params->native_fourccs,
- params->native_fourccs_size,
- fourccs_out, TEST_BUF_SIZE);
-
- KUNIT_EXPECT_EQ(test, nfourccs_out, params->expected_fourccs_size);
- KUNIT_EXPECT_MEMEQ(test, fourccs_out, params->expected, TEST_BUF_SIZE);
-}
-
struct fb_memcpy_case {
const char *name;
u32 format;
@@ -1910,12 +1697,8 @@ static void drm_test_fb_memcpy(struct kunit *test)
memset(buf[i], 0, dst_size[i]);
}
- int blit_result;
-
- blit_result = drm_fb_blit(dst, dst_pitches, params->format, src, &fb, &params->clip,
- &fmtcnv_state);
+ drm_fb_memcpy(dst, dst_pitches, src, &fb, &params->clip);
- KUNIT_EXPECT_FALSE(test, blit_result);
for (size_t i = 0; i < fb.format->num_planes; i++) {
expected[i] = cpubuf_to_le32(test, params->expected[i], TEST_BUF_SIZE);
KUNIT_EXPECT_MEMEQ_MSG(test, buf[i], expected[i], dst_size[i],
@@ -1940,7 +1723,6 @@ static struct kunit_case drm_format_helper_test_cases[] = {
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xbgr8888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_abgr8888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_clip_offset, clip_offset_gen_params),
- KUNIT_CASE_PARAM(drm_test_fb_build_fourcc_list, fb_build_fourcc_list_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_memcpy, fb_memcpy_gen_params),
{}
};
diff --git a/drivers/gpu/drm/tests/drm_framebuffer_test.c b/drivers/gpu/drm/tests/drm_framebuffer_test.c
index 6ea04cc8f324..9b8e01e8cd91 100644
--- a/drivers/gpu/drm/tests/drm_framebuffer_test.c
+++ b/drivers/gpu/drm/tests/drm_framebuffer_test.c
@@ -363,6 +363,7 @@ struct drm_framebuffer_test_priv {
static struct drm_framebuffer *fb_create_mock(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_framebuffer_test_priv *priv = container_of(dev, typeof(*priv), dev);
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index 7ffd666753b1..8bd412735000 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -33,7 +33,7 @@ struct drm_atomic_helper_connector_hdmi_priv {
struct drm_encoder encoder;
struct drm_connector connector;
- const char *current_edid;
+ const void *current_edid;
size_t current_edid_len;
};
@@ -56,7 +56,7 @@ static struct drm_display_mode *find_preferred_mode(struct drm_connector *connec
}
static int set_connector_edid(struct kunit *test, struct drm_connector *connector,
- const char *edid, size_t edid_len)
+ const void *edid, size_t edid_len)
{
struct drm_atomic_helper_connector_hdmi_priv *priv =
connector_to_priv(connector);
@@ -89,15 +89,15 @@ static const struct drm_connector_hdmi_funcs reject_connector_hdmi_funcs = {
};
static enum drm_mode_status
-reject_100MHz_connector_tmds_char_rate_valid(const struct drm_connector *connector,
+reject_100mhz_connector_tmds_char_rate_valid(const struct drm_connector *connector,
const struct drm_display_mode *mode,
unsigned long long tmds_rate)
{
return (tmds_rate > 100ULL * 1000 * 1000) ? MODE_BAD : MODE_OK;
}
-static const struct drm_connector_hdmi_funcs reject_100_MHz_connector_hdmi_funcs = {
- .tmds_char_rate_valid = reject_100MHz_connector_tmds_char_rate_valid,
+static const struct drm_connector_hdmi_funcs reject_100mhz_connector_hdmi_funcs = {
+ .tmds_char_rate_valid = reject_100mhz_connector_tmds_char_rate_valid,
};
static int dummy_connector_get_modes(struct drm_connector *connector)
@@ -140,10 +140,11 @@ static const struct drm_connector_funcs dummy_connector_funcs = {
static
struct drm_atomic_helper_connector_hdmi_priv *
-drm_kunit_helper_connector_hdmi_init_funcs(struct kunit *test,
- unsigned int formats,
- unsigned int max_bpc,
- const struct drm_connector_hdmi_funcs *hdmi_funcs)
+__connector_hdmi_init(struct kunit *test,
+ unsigned int formats,
+ unsigned int max_bpc,
+ const struct drm_connector_hdmi_funcs *hdmi_funcs,
+ const void *edid_data, size_t edid_len)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_connector *conn;
@@ -182,6 +183,8 @@ drm_kunit_helper_connector_hdmi_init_funcs(struct kunit *test,
enc->possible_crtcs = drm_crtc_mask(priv->crtc);
conn = &priv->connector;
+ conn->ycbcr_420_allowed = !!(formats & BIT(HDMI_COLORSPACE_YUV420));
+
ret = drmm_connector_hdmi_init(drm, conn,
"Vendor", "Product",
&dummy_connector_funcs,
@@ -197,29 +200,28 @@ drm_kunit_helper_connector_hdmi_init_funcs(struct kunit *test,
drm_mode_config_reset(drm);
+ if (edid_data && edid_len) {
+ ret = set_connector_edid(test, &priv->connector, edid_data, edid_len);
+ KUNIT_ASSERT_GT(test, ret, 0);
+ }
+
return priv;
}
+#define drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test, formats, max_bpc, funcs, edid) \
+ __connector_hdmi_init(test, formats, max_bpc, funcs, edid, ARRAY_SIZE(edid))
+
static
struct drm_atomic_helper_connector_hdmi_priv *
drm_kunit_helper_connector_hdmi_init(struct kunit *test,
unsigned int formats,
unsigned int max_bpc)
{
- struct drm_atomic_helper_connector_hdmi_priv *priv;
- int ret;
-
- priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
- formats, max_bpc,
- &dummy_connector_hdmi_funcs);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
-
- ret = set_connector_edid(test, &priv->connector,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
- return priv;
+ return drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ formats,
+ max_bpc,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
}
/*
@@ -414,7 +416,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range);
@@ -474,7 +476,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range);
@@ -533,7 +535,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -595,7 +597,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -658,7 +660,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -720,7 +722,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -734,6 +736,107 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
}
/*
+ * Test that for an HDMI connector, with an HDMI monitor, we will
+ * get a limited RGB Quantization Range with a YUV420 mode, no
+ * matter what the value of the Broadcast RGB property is set to.
+ */
+static void drm_test_check_broadcast_rgb_cea_mode_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ enum drm_hdmi_broadcast_rgb broadcast_rgb;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_atomic_state *state;
+ struct drm_display_mode *mode;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ broadcast_rgb = *(enum drm_hdmi_broadcast_rgb *)test->param_value;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV420),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 95);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ mode, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+retry_conn_state:
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ if (PTR_ERR(conn_state) == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_state;
+ }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ conn_state->hdmi.broadcast_rgb = broadcast_rgb;
+
+ ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_state;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ KUNIT_ASSERT_EQ(test, conn_state->hdmi.broadcast_rgb, broadcast_rgb);
+ KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_YUV420);
+
+ KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+static const enum drm_hdmi_broadcast_rgb check_broadcast_rgb_cea_mode_yuv420_tests[] = {
+ DRM_HDMI_BROADCAST_RGB_AUTO,
+ DRM_HDMI_BROADCAST_RGB_FULL,
+ DRM_HDMI_BROADCAST_RGB_LIMITED,
+};
+
+static void
+check_broadcast_rgb_cea_mode_yuv420_desc(const enum drm_hdmi_broadcast_rgb *broadcast_rgb,
+ char *desc)
+{
+ sprintf(desc, "%s", drm_hdmi_connector_get_broadcast_rgb_name(*broadcast_rgb));
+}
+
+KUNIT_ARRAY_PARAM(check_broadcast_rgb_cea_mode_yuv420,
+ check_broadcast_rgb_cea_mode_yuv420_tests,
+ check_broadcast_rgb_cea_mode_yuv420_desc);
+
+/*
* Test that if we change the maximum bpc property to a different value,
* we trigger a mode change on the connector's CRTC, which will in turn
* disable/enable the connector.
@@ -752,19 +855,16 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -831,19 +931,16 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -905,21 +1002,18 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_dvi_1080p);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_dvi_1080p,
- ARRAY_SIZE(test_edid_dvi_1080p));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_FALSE(test, info->is_hdmi);
@@ -959,19 +1053,16 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1011,19 +1102,16 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1063,19 +1151,16 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1168,7 +1253,7 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
* Then we will pick the latter, and the computed TMDS character rate
* will be equal to 1.25 times the mode pixel clock.
*/
-static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
+static void drm_test_check_max_tmds_rate_bpc_fallback_rgb(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_modeset_acquire_ctx ctx;
@@ -1181,19 +1266,16 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1229,6 +1311,80 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
/*
* Test that if:
+ * - We have an HDMI connector and a display supporting both RGB and YUV420
+ * - The chosen mode can be supported in YUV420 output format only
+ * - The chosen mode has a TMDS character rate higher than the display
+ * supports in YUV420/12bpc
+ * - The chosen mode has a TMDS character rate lower than the display
+ * supports in YUV420/10bpc.
+ *
+ * Then we will pick the latter, and the computed TMDS character rate
+ * will be equal to 1.25 * 0.5 times the mode pixel clock.
+ */
+static void drm_test_check_max_tmds_rate_bpc_fallback_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_display_info *info;
+ struct drm_display_mode *yuv420_only_mode;
+ unsigned long long rate;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV420),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ info = &conn->display_info;
+ KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
+ KUNIT_ASSERT_TRUE(test, conn->ycbcr_420_allowed);
+
+ yuv420_only_mode = drm_kunit_display_mode_from_cea_vic(test, drm, 95);
+ KUNIT_ASSERT_NOT_NULL(test, yuv420_only_mode);
+ KUNIT_ASSERT_TRUE(test, drm_mode_is_420_only(info, yuv420_only_mode));
+
+ rate = drm_hdmi_compute_mode_clock(yuv420_only_mode, 12, HDMI_COLORSPACE_YUV420);
+ KUNIT_ASSERT_GT(test, rate, info->max_tmds_clock * 1000);
+
+ rate = drm_hdmi_compute_mode_clock(yuv420_only_mode, 10, HDMI_COLORSPACE_YUV420);
+ KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ yuv420_only_mode, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+ KUNIT_ASSERT_NOT_NULL(test, conn_state);
+
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 10);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_YUV420);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, yuv420_only_mode->clock * 625);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
+ * Test that if:
* - We have an HDMI connector supporting both RGB and YUV422 and up to
* 12 bpc
* - The chosen mode has a TMDS character rate higher than the display
@@ -1240,7 +1396,7 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
* Then we will prefer to keep the RGB format with a lower bpc over
* picking YUV422.
*/
-static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
+static void drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv422(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_modeset_acquire_ctx ctx;
@@ -1253,21 +1409,18 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1304,6 +1457,170 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
}
/*
+ * Test that if:
+ * - We have an HDMI connector supporting both RGB and YUV420 and up to
+ * 12 bpc
+ * - The chosen mode has a TMDS character rate higher than the display
+ * supports in RGB/10bpc but lower than the display supports in
+ * RGB/8bpc
+ * - The chosen mode has a TMDS character rate lower than the display
+ * supports in YUV420/12bpc.
+ *
+ * Then we will prefer to keep the RGB format with a lower bpc over
+ * picking YUV420.
+ */
+static void drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_display_info *info;
+ struct drm_display_mode *preferred;
+ unsigned long long rate;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV420),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_4k_rgb_yuv420_dc_max_340mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ info = &conn->display_info;
+ KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
+ KUNIT_ASSERT_TRUE(test, conn->ycbcr_420_allowed);
+
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+ KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
+ KUNIT_ASSERT_TRUE(test, drm_mode_is_420_also(info, preferred));
+
+ rate = drm_hdmi_compute_mode_clock(preferred, 8, HDMI_COLORSPACE_RGB);
+ KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+
+ rate = drm_hdmi_compute_mode_clock(preferred, 10, HDMI_COLORSPACE_RGB);
+ KUNIT_ASSERT_GT(test, rate, info->max_tmds_clock * 1000);
+
+ rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV420);
+ KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ preferred, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+ KUNIT_ASSERT_NOT_NULL(test, conn_state);
+
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
+ * Test that if a driver supports only RGB, but the chosen mode can be
+ * supported by the screen only in YUV420 output format, we end up with
+ * unsuccessful fallback attempts.
+ */
+static void drm_test_check_driver_unsupported_fallback_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_atomic_state *state;
+ struct drm_display_info *info;
+ struct drm_display_mode *preferred, *yuv420_only_mode;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ info = &conn->display_info;
+ KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ KUNIT_ASSERT_FALSE(test, conn->ycbcr_420_allowed);
+
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+ KUNIT_ASSERT_FALSE(test, drm_mode_is_420_also(info, preferred));
+
+ yuv420_only_mode = drm_kunit_display_mode_from_cea_vic(test, drm, 95);
+ KUNIT_ASSERT_NOT_NULL(test, yuv420_only_mode);
+ KUNIT_ASSERT_TRUE(test, drm_mode_is_420_only(info, yuv420_only_mode));
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ preferred, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+ KUNIT_ASSERT_NOT_NULL(test, conn_state);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+retry_crtc_state:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (PTR_ERR(crtc_state) == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_crtc_state;
+ }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, yuv420_only_mode);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_crtc_state;
+ }
+ KUNIT_ASSERT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
* Test that if a driver and screen supports RGB and YUV formats, and we
* try to set the VIC 1 mode, we end up with 8bpc RGB even if we could
* have had a higher bpc.
@@ -1321,20 +1638,17 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1388,19 +1702,16 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1458,21 +1769,18 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1531,19 +1839,16 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1594,21 +1899,18 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1704,17 +2006,17 @@ static struct kunit_case drm_atomic_helper_connector_hdmi_check_tests[] = {
KUNIT_CASE(drm_test_check_broadcast_rgb_full_cea_mode_vic_1),
KUNIT_CASE(drm_test_check_broadcast_rgb_limited_cea_mode),
KUNIT_CASE(drm_test_check_broadcast_rgb_limited_cea_mode_vic_1),
- /*
- * TODO: When we'll have YUV output support, we need to check
- * that the limited range is always set to limited no matter
- * what the value of Broadcast RGB is.
- */
+ KUNIT_CASE_PARAM(drm_test_check_broadcast_rgb_cea_mode_yuv420,
+ check_broadcast_rgb_cea_mode_yuv420_gen_params),
KUNIT_CASE(drm_test_check_broadcast_rgb_crtc_mode_changed),
KUNIT_CASE(drm_test_check_broadcast_rgb_crtc_mode_not_changed),
KUNIT_CASE(drm_test_check_disable_connector),
KUNIT_CASE(drm_test_check_hdmi_funcs_reject_rate),
- KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback),
- KUNIT_CASE(drm_test_check_max_tmds_rate_format_fallback),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_rgb),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_yuv420),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv422),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv420),
+ KUNIT_CASE(drm_test_check_driver_unsupported_fallback_yuv420),
KUNIT_CASE(drm_test_check_output_bpc_crtc_mode_changed),
KUNIT_CASE(drm_test_check_output_bpc_crtc_mode_not_changed),
KUNIT_CASE(drm_test_check_output_bpc_dvi),
@@ -1927,28 +2229,20 @@ static void drm_test_check_mode_valid(struct kunit *test)
static void drm_test_check_mode_valid_reject_rate(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_connector *conn;
struct drm_display_mode *preferred;
- int ret;
- priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8,
- &reject_100_MHz_connector_hdmi_funcs);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &reject_100mhz_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
- conn = &priv->connector;
-
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
/*
* Unlike the drm_test_check_mode_valid() here 1080p is rejected, but
* 480p is allowed.
*/
- preferred = find_preferred_mode(conn);
+ preferred = find_preferred_mode(&priv->connector);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_EXPECT_EQ(test, preferred->hdisplay, 640);
KUNIT_EXPECT_EQ(test, preferred->vdisplay, 480);
@@ -1966,12 +2260,14 @@ static void drm_test_check_mode_valid_reject(struct kunit *test)
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_connector *conn;
struct drm_display_mode *preferred;
+ unsigned char no_edid[] = {};
int ret;
- priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8,
- &reject_connector_hdmi_funcs);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &reject_connector_hdmi_funcs,
+ no_edid);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1996,20 +2292,15 @@ static void drm_test_check_mode_valid_reject_max_clock(struct kunit *test)
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_connector *conn;
struct drm_display_mode *preferred;
- int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_100mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
-
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_100mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_100mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
KUNIT_ASSERT_EQ(test, conn->display_info.max_tmds_clock, 100 * 1000);
preferred = find_preferred_mode(conn);
diff --git a/drivers/gpu/drm/tests/drm_kunit_edid.h b/drivers/gpu/drm/tests/drm_kunit_edid.h
index 6358397a5d7a..c59c8528a3f7 100644
--- a/drivers/gpu/drm/tests/drm_kunit_edid.h
+++ b/drivers/gpu/drm/tests/drm_kunit_edid.h
@@ -46,6 +46,13 @@
* Monitor ranges (GTF): 50-70 Hz V, 30-70 kHz H, max dotclock 150 MHz
* Dummy Descriptor:
* Checksum: 0xab
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * EDID conformity: PASS
*/
static const unsigned char test_edid_dvi_1080p[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -62,6 +69,10 @@ static const unsigned char test_edid_dvi_1080p[] = {
};
/*
+ *
+ * This edid is intentionally broken with the 100MHz limit. It's meant
+ * to be used only with tests in unusual situations.
+ *
* edid-decode (hex):
*
* 00 ff ff ff ff ff ff 00 31 d8 2a 00 00 00 00 00
@@ -73,14 +84,14 @@ static const unsigned char test_edid_dvi_1080p[] = {
* 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92
*
- * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c
- * 00 12 34 00 14 20 00 00 00 00 00 00 00 00 00 00
+ * 02 03 15 81 e3 05 00 20 41 10 e2 00 4a 67 03 0c
+ * 00 12 34 00 14 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
- * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 e4
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 10
*
* ----------------
*
@@ -135,8 +146,19 @@ static const unsigned char test_edid_dvi_1080p[] = {
* Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
* Source physical address: 1.2.3.4
* Maximum TMDS clock: 100 MHz
- * Extended HDMI video details:
- * Checksum: 0xe4 Unused space in Extension Block: 100 bytes
+ * Checksum: 0x10 Unused space in Extension Block: 106 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * Failures:
+ *
+ * EDID:
+ * CTA-861: The maximum HDMI TMDS clock is 100000 kHz, but one or more video timings go up to 148500 kHz.
+ *
+ * EDID conformity: FAIL
*/
static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -147,11 +169,11 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
- 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81,
- 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c,
- 0x00, 0x12, 0x34, 0x00, 0x14, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x92, 0x02, 0x03, 0x15, 0x81,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x67, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -160,7 +182,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xe4
+ 0x00, 0x00, 0x00, 0x10
};
/*
@@ -175,14 +197,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
* 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92
*
- * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c
- * 00 12 34 00 28 20 00 00 00 00 00 00 00 00 00 00
+ * 02 03 15 81 e3 05 00 20 41 10 e2 00 4a 67 03 0c
+ * 00 12 34 00 28 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
- * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d0
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fc
*
* ----------------
*
@@ -237,8 +259,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
* Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
* Source physical address: 1.2.3.4
* Maximum TMDS clock: 200 MHz
- * Extended HDMI video details:
- * Checksum: 0xd0 Unused space in Extension Block: 100 bytes
+ * Checksum: 0xfc Unused space in Extension Block: 106 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * EDID conformity: PASS
*/
static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -249,11 +277,11 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = {
0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
- 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81,
- 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c,
- 0x00, 0x12, 0x34, 0x00, 0x28, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x92, 0x02, 0x03, 0x15, 0x81,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x67, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x00, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -262,7 +290,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xd0
+ 0x00, 0x00, 0x00, 0xfc
};
/*
@@ -277,14 +305,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = {
* 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92
*
- * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c
- * 00 12 34 00 28 20 00 00 00 00 00 00 00 00 00 00
+ * 02 03 15 81 e3 05 00 20 41 10 e2 00 4a 67 03 0c
+ * 00 12 34 00 44 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
- * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d0
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 e0
*
* ----------------
*
@@ -339,8 +367,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = {
* Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
* Source physical address: 1.2.3.4
* Maximum TMDS clock: 340 MHz
- * Extended HDMI video details:
- * Checksum: 0xd0 Unused space in Extension Block: 100 bytes
+ * Checksum: 0xe0 Unused space in Extension Block: 106 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * EDID conformity: PASS
*/
static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -351,11 +385,11 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = {
0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
- 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81,
- 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c,
- 0x00, 0x12, 0x34, 0x00, 0x44, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x92, 0x02, 0x03, 0x15, 0x81,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x67, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -364,7 +398,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xd0
+ 0x00, 0x00, 0x00, 0xe0
};
/*
@@ -379,14 +413,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = {
* 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 7a
*
- * 02 03 1b b1 e3 05 00 20 41 10 e2 00 ca 6d 03 0c
- * 00 12 34 78 28 20 00 00 00 00 00 00 00 00 00 00
+ * 02 03 15 b1 e3 05 00 20 41 10 e2 00 ca 67 03 0c
+ * 00 12 34 78 28 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
- * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 a8
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d4
*
* ----------------
*
@@ -447,8 +481,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = {
* DC_30bit
* DC_Y444
* Maximum TMDS clock: 200 MHz
- * Extended HDMI video details:
- * Checksum: 0xa8 Unused space in Extension Block: 100 bytes
+ * Checksum: 0xd4 Unused space in Extension Block: 106 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * EDID conformity: PASS
*/
static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -461,9 +501,9 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = {
0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7a, 0x02, 0x03, 0x1b, 0xb1,
- 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x6d, 0x03, 0x0c,
- 0x00, 0x12, 0x34, 0x78, 0x28, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7a, 0x02, 0x03, 0x15, 0xb1,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x67, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x78, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -472,7 +512,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xa8
+ 0x00, 0x00, 0x00, 0xd4
};
/*
@@ -487,14 +527,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = {
* 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 8a
*
- * 02 03 1b b1 e3 05 00 20 41 10 e2 00 ca 6d 03 0c
- * 00 12 34 78 44 20 00 00 00 00 00 00 00 00 00 00
+ * 02 03 15 b1 e3 05 00 20 41 10 e2 00 ca 67 03 0c
+ * 00 12 34 78 44 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
- * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 8c
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 b8
*
* ----------------
*
@@ -555,8 +595,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = {
* DC_30bit
* DC_Y444
* Maximum TMDS clock: 340 MHz
- * Extended HDMI video details:
- * Checksum: 0x8c Unused space in Extension Block: 100 bytes
+ * Checksum: 0xb8 Unused space in Extension Block: 106 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.30.0-5367
+ * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22
+ *
+ * EDID conformity: PASS
*/
static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
@@ -569,18 +615,250 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz[] = {
0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x8a, 0x02, 0x03, 0x1b, 0xb1,
- 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x6d, 0x03, 0x0c,
- 0x00, 0x12, 0x34, 0x78, 0x44, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x8a, 0x02, 0x03, 0x15, 0xb1,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x67, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x78, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xb8
+};
+
+/*
+ * Max resolution:
+ * - 1920x1080@60Hz with RGB, YUV444, YUV422
+ * - 3840x2160@30Hz with YUV420 only
+ * Max BPC: 16 for all modes
+ * Max TMDS clock: 200 MHz
+ *
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 31 d8 34 00 00 00 00 00
+ * ff 23 01 03 80 60 36 78 0f ee 91 a3 54 4c 99 26
+ * 0f 50 54 20 00 00 01 01 01 01 01 01 01 01 01 01
+ * 01 01 01 01 01 01 02 3a 80 18 71 38 2d 40 58 2c
+ * 45 00 c0 1c 32 00 00 1e 00 00 00 fc 00 54 65 73
+ * 74 20 45 44 49 44 0a 20 20 20 00 00 00 fd 00 18
+ * 55 18 5e 11 00 0a 20 20 20 20 20 20 00 00 00 10
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 bb
+ *
+ * 02 03 29 31 42 90 5f 6c 03 0c 00 10 00 78 28 20
+ * 00 00 01 03 6d d8 5d c4 01 28 80 07 00 00 00 00
+ * 00 00 e3 0f 00 00 e2 0e 5f 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ca
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: LNX
+ * Model: 52
+ * Model year: 2025
+ * Basic Display Parameters & Features:
+ * Digital display
+ * Maximum image size: 96 cm x 54 cm
+ * Gamma: 2.20
+ * RGB color display
+ * Default (sRGB) color space is primary color space
+ * First detailed timing is the preferred timing
+ * Supports GTF timings within operating range
+ * Color Characteristics:
+ * Red : 0.6396, 0.3300
+ * Green: 0.2998, 0.5996
+ * Blue : 0.1503, 0.0595
+ * White: 0.3125, 0.3291
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * Standard Timings: none
+ * Detailed Timing Descriptors:
+ * DTD 1: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (960 mm x 540 mm)
+ * Hfront 88 Hsync 44 Hback 148 Hpol P
+ * Vfront 4 Vsync 5 Vback 36 Vpol P
+ * Display Product Name: 'Test EDID'
+ * Display Range Limits:
+ * Monitor ranges (GTF): 24-85 Hz V, 24-94 kHz H, max dotclock 170 MHz
+ * Dummy Descriptor:
+ * Extension blocks: 1
+ * Checksum: 0xbb
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Supports YCbCr 4:4:4
+ * Supports YCbCr 4:2:2
+ * Native detailed modes: 1
+ * Video Data Block:
+ * VIC 16: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (native)
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.0.0.0
+ * DC_48bit
+ * DC_36bit
+ * DC_30bit
+ * DC_Y444
+ * Maximum TMDS clock: 200 MHz
+ * Extended HDMI video details:
+ * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8:
+ * Version: 1
+ * Maximum TMDS Character Rate: 200 MHz
+ * SCDC Present
+ * Supports 16-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding
+ * YCbCr 4:2:0 Capability Map Data Block:
+ * Empty Capability Map
+ * YCbCr 4:2:0 Video Data Block:
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * Checksum: 0xca
+ */
+static const unsigned char test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x34, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x23, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0x20,
+ 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38,
+ 0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
+ 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18,
+ 0x55, 0x18, 0x5e, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xbb, 0x02, 0x03, 0x29, 0x31,
+ 0x42, 0x90, 0x5f, 0x6c, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x78, 0x28, 0x20,
+ 0x00, 0x00, 0x01, 0x03, 0x6d, 0xd8, 0x5d, 0xc4, 0x01, 0x28, 0x80, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0x0f, 0x00, 0x00, 0xe2, 0x0e,
+ 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xca
+};
+
+/*
+ * Max resolution: 3840x2160@30Hz with RGB, YUV444, YUV422, YUV420
+ * Max BPC: 16 for all modes
+ * Max TMDS clock: 340 MHz
+ *
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 31 d8 34 00 00 00 00 00
+ * ff 23 01 03 80 60 36 78 0f ee 91 a3 54 4c 99 26
+ * 0f 50 54 20 00 00 01 01 01 01 01 01 01 01 01 01
+ * 01 01 01 01 01 01 04 74 00 30 f2 70 5a 80 b0 58
+ * 8a 00 40 84 63 00 00 1e 00 00 00 fc 00 54 65 73
+ * 74 20 45 44 49 44 0a 20 20 20 00 00 00 fd 00 18
+ * 55 18 5e 22 00 0a 20 20 20 20 20 20 00 00 00 10
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 ce
+ *
+ * 02 03 27 31 41 5f 6c 03 0c 00 10 00 78 44 20 00
+ * 00 01 03 6d d8 5d c4 01 44 80 07 00 00 00 00 00
+ * 00 e3 0f 01 00 e1 0e 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 84
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: LNX
+ * Model: 52
+ * Model year: 2025
+ * Basic Display Parameters & Features:
+ * Digital display
+ * Maximum image size: 96 cm x 54 cm
+ * Gamma: 2.20
+ * RGB color display
+ * Default (sRGB) color space is primary color space
+ * First detailed timing is the preferred timing
+ * Supports GTF timings within operating range
+ * Color Characteristics:
+ * Red : 0.6396, 0.3300
+ * Green: 0.2998, 0.5996
+ * Blue : 0.1503, 0.0595
+ * White: 0.3125, 0.3291
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * Standard Timings: none
+ * Detailed Timing Descriptors:
+ * DTD 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz (1600 mm x 900 mm)
+ * Hfront 176 Hsync 88 Hback 296 Hpol P
+ * Vfront 8 Vsync 10 Vback 72 Vpol P
+ * Display Product Name: 'Test EDID'
+ * Display Range Limits:
+ * Monitor ranges (GTF): 24-85 Hz V, 24-94 kHz H, max dotclock 340 MHz
+ * Dummy Descriptor:
+ * Extension blocks: 1
+ * Checksum: 0xce
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Supports YCbCr 4:4:4
+ * Supports YCbCr 4:2:2
+ * Native detailed modes: 1
+ * Video Data Block:
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.0.0.0
+ * DC_48bit
+ * DC_36bit
+ * DC_30bit
+ * DC_Y444
+ * Maximum TMDS clock: 340 MHz
+ * Extended HDMI video details:
+ * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8:
+ * Version: 1
+ * Maximum TMDS Character Rate: 340 MHz
+ * SCDC Present
+ * Supports 16-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding
+ * YCbCr 4:2:0 Capability Map Data Block:
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * YCbCr 4:2:0 Video Data Block:
+ * Checksum: 0x84
+ */
+static const unsigned char test_edid_hdmi_4k_rgb_yuv420_dc_max_340mhz[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x34, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x23, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0x20,
+ 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x74, 0x00, 0x30, 0xf2, 0x70,
+ 0x5a, 0x80, 0xb0, 0x58, 0x8a, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
+ 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18,
+ 0x55, 0x18, 0x5e, 0x22, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xce, 0x02, 0x03, 0x27, 0x31,
+ 0x41, 0x5f, 0x6c, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x78, 0x44, 0x20, 0x00,
+ 0x00, 0x01, 0x03, 0x6d, 0xd8, 0x5d, 0xc4, 0x01, 0x44, 0x80, 0x07, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0x0f, 0x01, 0x00, 0xe1, 0x0e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x8c
+ 0x00, 0x00, 0x00, 0x84
};
#endif // DRM_KUNIT_EDID_H_
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 5f7257840d8e..04edb6079c0d 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -13,6 +13,7 @@
#include <kunit/resource.h>
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#define KUNIT_DEVICE_NAME "drm-kunit-mock-device"
diff --git a/drivers/gpu/drm/tests/drm_sysfb_modeset_test.c b/drivers/gpu/drm/tests/drm_sysfb_modeset_test.c
new file mode 100644
index 000000000000..e875d876118f
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_sysfb_modeset_test.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include "../sysfb/drm_sysfb_helper.h"
+
+#define TEST_BUF_SIZE 50
+
+struct sysfb_build_fourcc_list_case {
+ const char *name;
+ u32 native_fourccs[TEST_BUF_SIZE];
+ size_t native_fourccs_size;
+ u32 expected[TEST_BUF_SIZE];
+ size_t expected_fourccs_size;
+};
+
+static struct sysfb_build_fourcc_list_case sysfb_build_fourcc_list_cases[] = {
+ {
+ .name = "no native formats",
+ .native_fourccs = { },
+ .native_fourccs_size = 0,
+ .expected = { DRM_FORMAT_XRGB8888 },
+ .expected_fourccs_size = 1,
+ },
+ {
+ .name = "XRGB8888 as native format",
+ .native_fourccs = { DRM_FORMAT_XRGB8888 },
+ .native_fourccs_size = 1,
+ .expected = { DRM_FORMAT_XRGB8888 },
+ .expected_fourccs_size = 1,
+ },
+ {
+ .name = "remove duplicates",
+ .native_fourccs = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ },
+ .native_fourccs_size = 11,
+ .expected = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ },
+ .expected_fourccs_size = 3,
+ },
+ {
+ .name = "convert alpha formats",
+ .native_fourccs = {
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_RGBA1010102,
+ DRM_FORMAT_BGRA1010102,
+ },
+ .native_fourccs_size = 12,
+ .expected = {
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_RGBX1010102,
+ DRM_FORMAT_BGRX1010102,
+ },
+ .expected_fourccs_size = 12,
+ },
+ {
+ .name = "random formats",
+ .native_fourccs = {
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_C8,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGR565_A8,
+ DRM_FORMAT_R10,
+ DRM_FORMAT_XYUV8888,
+ },
+ .native_fourccs_size = 10,
+ .expected = {
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_C8,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGR565_A8,
+ DRM_FORMAT_R10,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_XRGB8888,
+ },
+ .expected_fourccs_size = 10,
+ },
+};
+
+static void sysfb_build_fourcc_list_case_desc(struct sysfb_build_fourcc_list_case *t, char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(sysfb_build_fourcc_list, sysfb_build_fourcc_list_cases,
+ sysfb_build_fourcc_list_case_desc);
+
+static void drm_test_sysfb_build_fourcc_list(struct kunit *test)
+{
+ const struct sysfb_build_fourcc_list_case *params = test->param_value;
+ u32 fourccs_out[TEST_BUF_SIZE] = {0};
+ size_t nfourccs_out;
+ struct drm_device *drm;
+ struct device *dev;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ drm = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm), 0, DRIVER_MODESET);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm);
+
+ nfourccs_out = drm_sysfb_build_fourcc_list(drm, params->native_fourccs,
+ params->native_fourccs_size,
+ fourccs_out, TEST_BUF_SIZE);
+
+ KUNIT_EXPECT_EQ(test, nfourccs_out, params->expected_fourccs_size);
+ KUNIT_EXPECT_MEMEQ(test, fourccs_out, params->expected, TEST_BUF_SIZE);
+}
+
+static struct kunit_case drm_sysfb_modeset_test_cases[] = {
+ KUNIT_CASE_PARAM(drm_test_sysfb_build_fourcc_list, sysfb_build_fourcc_list_gen_params),
+ {}
+};
+
+static struct kunit_suite drm_sysfb_modeset_test_suite = {
+ .name = "drm_sysfb_modeset_test",
+ .test_cases = drm_sysfb_modeset_test_cases,
+};
+
+kunit_test_suite(drm_sysfb_modeset_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for the drm_sysfb_modeset APIs");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("José Expósito <jose.exposito89@gmail.com>");
diff --git a/drivers/gpu/drm/tidss/Makefile b/drivers/gpu/drm/tidss/Makefile
index 312645271014..b6d6becf1683 100644
--- a/drivers/gpu/drm/tidss/Makefile
+++ b/drivers/gpu/drm/tidss/Makefile
@@ -7,6 +7,7 @@ tidss-y := tidss_crtc.o \
tidss_irq.o \
tidss_plane.o \
tidss_scale_coefs.o \
- tidss_dispc.o
+ tidss_dispc.o \
+ tidss_oldi.o
obj-$(CONFIG_DRM_TIDSS) += tidss.o
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 94f8e3178df5..a2f40a5c7703 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -130,7 +130,7 @@ static void tidss_crtc_position_planes(struct tidss_device *tidss,
!to_tidss_crtc_state(cstate)->plane_pos_changed)
return;
- for (layer = 0; layer < tidss->feat->num_planes; layer++) {
+ for (layer = 0; layer < tidss->feat->num_vids ; layer++) {
struct drm_plane_state *pstate;
struct drm_plane *plane;
bool layer_active = false;
@@ -271,7 +271,7 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
* another videoport, the DSS will report sync lost issues. Disable all
* the layers here as a work-around.
*/
- for (u32 layer = 0; layer < tidss->feat->num_planes; layer++)
+ for (u32 layer = 0; layer < tidss->feat->num_vids; layer++)
dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer,
false);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index cacb5f3d8085..c0277fa36425 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -103,9 +103,16 @@ const struct dispc_features dispc_k2g_feats = {
},
},
- .num_planes = 1,
- .vid_name = { "vid1" },
- .vid_lite = { false },
+ .num_vids = 1,
+
+ .vid_info = {
+ {
+ .name = "vid1",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ },
+
.vid_order = { 0 },
};
@@ -139,7 +146,7 @@ static const u16 tidss_am65x_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
const struct dispc_features dispc_am65x_feats = {
.max_pclk_khz = {
[DISPC_VP_DPI] = 165000,
- [DISPC_VP_OLDI] = 165000,
+ [DISPC_VP_OLDI_AM65X] = 165000,
},
.scaling = {
@@ -169,7 +176,7 @@ const struct dispc_features dispc_am65x_feats = {
.vp_name = { "vp1", "vp2" },
.ovr_name = { "ovr1", "ovr2" },
.vpclk_name = { "vp1", "vp2" },
- .vp_bus_type = { DISPC_VP_OLDI, DISPC_VP_DPI },
+ .vp_bus_type = { DISPC_VP_OLDI_AM65X, DISPC_VP_DPI },
.vp_feat = { .color = {
.has_ctm = true,
@@ -178,11 +185,22 @@ const struct dispc_features dispc_am65x_feats = {
},
},
- .num_planes = 2,
+ .num_vids = 2,
/* note: vid is plane_id 0 and vidl1 is plane_id 1 */
- .vid_name = { "vid", "vidl1" },
- .vid_lite = { false, true, },
- .vid_order = { 1, 0 },
+ .vid_info = {
+ {
+ .name = "vid",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ },
+ },
+
+ .vid_order = {1, 0},
};
static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
@@ -267,9 +285,32 @@ const struct dispc_features dispc_j721e_feats = {
.gamma_type = TIDSS_GAMMA_10BIT,
},
},
- .num_planes = 4,
- .vid_name = { "vid1", "vidl1", "vid2", "vidl2" },
- .vid_lite = { 0, 1, 0, 1, },
+
+ .num_vids = 4,
+
+ .vid_info = {
+ {
+ .name = "vid1",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ },
+ {
+ .name = "vid2",
+ .is_lite = false,
+ .hw_id = 2,
+ },
+ {
+ .name = "vidl2",
+ .is_lite = true,
+ .hw_id = 3,
+ },
+ },
+
.vid_order = { 1, 3, 0, 2 },
};
@@ -315,11 +356,23 @@ const struct dispc_features dispc_am625_feats = {
},
},
- .num_planes = 2,
+ .num_vids = 2,
+
/* note: vid is plane_id 0 and vidl1 is plane_id 1 */
- .vid_name = { "vid", "vidl1" },
- .vid_lite = { false, true, },
- .vid_order = { 1, 0 },
+ .vid_info = {
+ {
+ .name = "vid",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ }
+ },
+
+ .vid_order = {1, 0},
};
const struct dispc_features dispc_am62a7_feats = {
@@ -369,11 +422,58 @@ const struct dispc_features dispc_am62a7_feats = {
},
},
- .num_planes = 2,
- /* note: vid is plane_id 0 and vidl1 is plane_id 1 */
- .vid_name = { "vid", "vidl1" },
- .vid_lite = { false, true, },
- .vid_order = { 1, 0 },
+ .num_vids = 2,
+
+ .vid_info = {
+ {
+ .name = "vid",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ }
+ },
+
+ .vid_order = {1, 0},
+};
+
+const struct dispc_features dispc_am62l_feats = {
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 165000,
+ },
+
+ .subrev = DISPC_AM62L,
+
+ .common = "common",
+ .common_regs = tidss_am65x_common_regs,
+
+ .num_vps = 1,
+ .vp_name = { "vp1" },
+ .ovr_name = { "ovr1" },
+ .vpclk_name = { "vp1" },
+ .vp_bus_type = { DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_vids = 1,
+
+ .vid_info = {
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ }
+ },
+
+ .vid_order = {0},
};
static const u16 *dispc_common_regmap;
@@ -391,7 +491,7 @@ struct dispc_device {
void __iomem *base_ovr[TIDSS_MAX_PORTS];
void __iomem *base_vp[TIDSS_MAX_PORTS];
- struct regmap *oldi_io_ctrl;
+ struct regmap *am65x_oldi_io_ctrl;
struct clk *vp_clk[TIDSS_MAX_PORTS];
@@ -466,6 +566,29 @@ static u32 dispc_vp_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg)
return ioread32(base + reg);
}
+int tidss_configure_oldi(struct tidss_device *tidss, u32 hw_videoport,
+ u32 oldi_cfg)
+{
+ u32 count = 0;
+ u32 oldi_reset_bit = BIT(5 + hw_videoport);
+
+ dispc_vp_write(tidss->dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, oldi_cfg);
+
+ while (!(oldi_reset_bit & dispc_read(tidss->dispc, DSS_SYSSTATUS)) &&
+ count < 10000)
+ count++;
+
+ if (!(oldi_reset_bit & dispc_read(tidss->dispc, DSS_SYSSTATUS)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void tidss_disable_oldi(struct tidss_device *tidss, u32 hw_videoport)
+{
+ dispc_vp_write(tidss->dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, 0);
+}
+
/*
* TRM gives bitfields as start:end, where start is the higher bit
* number. For example 7:0
@@ -734,7 +857,8 @@ static void dispc_k3_vp_write_irqstatus(struct dispc_device *dispc,
static dispc_irq_t dispc_k3_vid_read_irqstatus(struct dispc_device *dispc,
u32 hw_plane)
{
- u32 stat = dispc_read(dispc, DISPC_VID_IRQSTATUS(hw_plane));
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQSTATUS(hw_id));
return dispc_vid_irq_from_raw(stat, hw_plane);
}
@@ -742,9 +866,10 @@ static dispc_irq_t dispc_k3_vid_read_irqstatus(struct dispc_device *dispc,
static void dispc_k3_vid_write_irqstatus(struct dispc_device *dispc,
u32 hw_plane, dispc_irq_t vidstat)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
- dispc_write(dispc, DISPC_VID_IRQSTATUS(hw_plane), stat);
+ dispc_write(dispc, DISPC_VID_IRQSTATUS(hw_id), stat);
}
static dispc_irq_t dispc_k3_vp_read_irqenable(struct dispc_device *dispc,
@@ -766,7 +891,8 @@ static void dispc_k3_vp_set_irqenable(struct dispc_device *dispc,
static dispc_irq_t dispc_k3_vid_read_irqenable(struct dispc_device *dispc,
u32 hw_plane)
{
- u32 stat = dispc_read(dispc, DISPC_VID_IRQENABLE(hw_plane));
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQENABLE(hw_id));
return dispc_vid_irq_from_raw(stat, hw_plane);
}
@@ -774,9 +900,10 @@ static dispc_irq_t dispc_k3_vid_read_irqenable(struct dispc_device *dispc,
static void dispc_k3_vid_set_irqenable(struct dispc_device *dispc,
u32 hw_plane, dispc_irq_t vidstat)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
- dispc_write(dispc, DISPC_VID_IRQENABLE(hw_plane), stat);
+ dispc_write(dispc, DISPC_VID_IRQENABLE(hw_id), stat);
}
static
@@ -788,7 +915,8 @@ void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
if (clearmask & DSS_IRQ_VP_MASK(i))
dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
}
- for (i = 0; i < dispc->feat->num_planes; ++i) {
+
+ for (i = 0; i < dispc->feat->num_vids; ++i) {
if (clearmask & DSS_IRQ_PLANE_MASK(i))
dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
}
@@ -809,7 +937,7 @@ dispc_irq_t dispc_k3_read_and_clear_irqstatus(struct dispc_device *dispc)
for (i = 0; i < dispc->feat->num_vps; ++i)
status |= dispc_k3_vp_read_irqstatus(dispc, i);
- for (i = 0; i < dispc->feat->num_planes; ++i)
+ for (i = 0; i < dispc->feat->num_vids; ++i)
status |= dispc_k3_vid_read_irqstatus(dispc, i);
dispc_k3_clear_irqstatus(dispc, status);
@@ -825,7 +953,7 @@ static dispc_irq_t dispc_k3_read_irqenable(struct dispc_device *dispc)
for (i = 0; i < dispc->feat->num_vps; ++i)
enable |= dispc_k3_vp_read_irqenable(dispc, i);
- for (i = 0; i < dispc->feat->num_planes; ++i)
+ for (i = 0; i < dispc->feat->num_vids; ++i)
enable |= dispc_k3_vid_read_irqenable(dispc, i);
return enable;
@@ -851,12 +979,15 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
main_disable |= BIT(i); /* VP IRQ */
}
- for (i = 0; i < dispc->feat->num_planes; ++i) {
+ for (i = 0; i < dispc->feat->num_vids; ++i) {
+ u32 hw_id = dispc->feat->vid_info[i].hw_id;
+
dispc_k3_vid_set_irqenable(dispc, i, mask);
+
if (mask & DSS_IRQ_PLANE_MASK(i))
- main_enable |= BIT(i + 4); /* VID IRQ */
+ main_enable |= BIT(hw_id + 4); /* VID IRQ */
else
- main_disable |= BIT(i + 4); /* VID IRQ */
+ main_disable |= BIT(hw_id + 4); /* VID IRQ */
}
if (main_enable)
@@ -879,6 +1010,7 @@ dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
return dispc_k2g_read_and_clear_irqstatus(dispc);
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
case DISPC_J721E:
return dispc_k3_read_and_clear_irqstatus(dispc);
@@ -896,6 +1028,7 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_set_irqenable(dispc, mask);
@@ -906,13 +1039,11 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
}
}
-enum dispc_oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
-
struct dispc_bus_format {
u32 bus_fmt;
u32 data_width;
bool is_oldi_fmt;
- enum dispc_oldi_mode_reg_val oldi_mode_reg_val;
+ enum oldi_mode_reg_val am65x_oldi_mode_reg_val;
};
static const struct dispc_bus_format dispc_bus_formats[] = {
@@ -956,7 +1087,7 @@ int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
return -EINVAL;
}
- if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI &&
+ if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI_AM65X &&
fmt->is_oldi_fmt) {
dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n",
__func__, dispc->feat->vp_name[hw_videoport]);
@@ -966,23 +1097,23 @@ int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
return 0;
}
-static void dispc_oldi_tx_power(struct dispc_device *dispc, bool power)
+static void dispc_am65x_oldi_tx_power(struct dispc_device *dispc, bool power)
{
- u32 val = power ? 0 : OLDI_PWRDN_TX;
+ u32 val = power ? 0 : AM65X_OLDI_PWRDN_TX;
- if (WARN_ON(!dispc->oldi_io_ctrl))
+ if (WARN_ON(!dispc->am65x_oldi_io_ctrl))
return;
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT0_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT1_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT2_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT3_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_CLK_IO_CTRL,
- OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT0_IO_CTRL,
+ AM65X_OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT1_IO_CTRL,
+ AM65X_OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT2_IO_CTRL,
+ AM65X_OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT3_IO_CTRL,
+ AM65X_OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_CLK_IO_CTRL,
+ AM65X_OLDI_PWRDN_TX, val);
}
static void dispc_set_num_datalines(struct dispc_device *dispc,
@@ -1011,8 +1142,8 @@ static void dispc_set_num_datalines(struct dispc_device *dispc,
VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 10, 8);
}
-static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
- const struct dispc_bus_format *fmt)
+static void dispc_enable_am65x_oldi(struct dispc_device *dispc, u32 hw_videoport,
+ const struct dispc_bus_format *fmt)
{
u32 oldi_cfg = 0;
u32 oldi_reset_bit = BIT(5 + hw_videoport);
@@ -1031,7 +1162,7 @@ static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
oldi_cfg |= BIT(7); /* DEPOL */
- oldi_cfg = FLD_MOD(oldi_cfg, fmt->oldi_mode_reg_val, 3, 1);
+ oldi_cfg = FLD_MOD(oldi_cfg, fmt->am65x_oldi_mode_reg_val, 3, 1);
oldi_cfg |= BIT(12); /* SOFTRST */
@@ -1060,10 +1191,10 @@ void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
if (WARN_ON(!fmt))
return;
- if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
- dispc_oldi_tx_power(dispc, true);
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI_AM65X) {
+ dispc_am65x_oldi_tx_power(dispc, true);
- dispc_enable_oldi(dispc, hw_videoport, fmt);
+ dispc_enable_am65x_oldi(dispc, hw_videoport, fmt);
}
}
@@ -1119,7 +1250,7 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
align = true;
/* always use DE_HIGH for OLDI */
- if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI)
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI_AM65X)
ieo = false;
dispc_vp_write(dispc, hw_videoport, DISPC_VP_POL_FREQ,
@@ -1145,10 +1276,10 @@ void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport)
void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport)
{
- if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI_AM65X) {
dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, 0);
- dispc_oldi_tx_power(dispc, false);
+ dispc_am65x_oldi_tx_power(dispc, false);
}
}
@@ -1310,7 +1441,6 @@ void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport)
* Calculate the percentage difference between the requested pixel clock rate
* and the effective rate resulting from calculating the clock divider value.
*/
-static
unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate)
{
int r = rate / 100, rr = real_rate / 100;
@@ -1358,8 +1488,10 @@ static void dispc_am65x_ovr_set_plane(struct dispc_device *dispc,
u32 hw_plane, u32 hw_videoport,
u32 x, u32 y, u32 layer)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- hw_plane, 4, 1);
+ hw_id, 4, 1);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
x, 17, 6);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
@@ -1370,8 +1502,10 @@ static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc,
u32 hw_plane, u32 hw_videoport,
u32 x, u32 y, u32 layer)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- hw_plane, 4, 1);
+ hw_id, 4, 1);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
x, 13, 0);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
@@ -1388,6 +1522,7 @@ void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
x, y, layer);
@@ -2025,7 +2160,7 @@ int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
const struct drm_plane_state *state,
u32 hw_videoport)
{
- bool lite = dispc->feat->vid_lite[hw_plane];
+ bool lite = dispc->feat->vid_info[hw_plane].is_lite;
u32 fourcc = state->fb->format->format;
bool need_scaling = state->src_w >> 16 != state->crtc_w ||
state->src_h >> 16 != state->crtc_h;
@@ -2096,7 +2231,7 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
const struct drm_plane_state *state,
u32 hw_videoport)
{
- bool lite = dispc->feat->vid_lite[hw_plane];
+ bool lite = dispc->feat->vid_info[hw_plane].is_lite;
u32 fourcc = state->fb->format->format;
u16 cpp = state->fb->format->cpp[0];
u32 fb_width = state->fb->pitches[0] / cpp;
@@ -2210,7 +2345,7 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc)
/* MFLAG_START = MFLAGNORMALSTARTMODE */
REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
- for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) {
u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
u32 thr_low, thr_high;
u32 mflag_low, mflag_high;
@@ -2226,7 +2361,7 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc)
dev_dbg(dispc->dev,
"%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
- dispc->feat->vid_name[hw_plane],
+ dispc->feat->vid_info[hw_plane].name,
size,
thr_high, thr_low,
mflag_high, mflag_low,
@@ -2265,7 +2400,7 @@ static void dispc_k3_plane_init(struct dispc_device *dispc)
/* MFLAG_START = MFLAGNORMALSTARTMODE */
REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
- for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) {
u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
u32 thr_low, thr_high;
u32 mflag_low, mflag_high;
@@ -2281,7 +2416,7 @@ static void dispc_k3_plane_init(struct dispc_device *dispc)
dev_dbg(dispc->dev,
"%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
- dispc->feat->vid_name[hw_plane],
+ dispc->feat->vid_info[hw_plane].name,
size,
thr_high, thr_low,
mflag_high, mflag_low,
@@ -2308,6 +2443,7 @@ static void dispc_plane_init(struct dispc_device *dispc)
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_plane_init(dispc);
@@ -2416,6 +2552,7 @@ static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
break;
@@ -2735,15 +2872,15 @@ static int dispc_iomap_resource(struct platform_device *pdev, const char *name,
static int dispc_init_am65x_oldi_io_ctrl(struct device *dev,
struct dispc_device *dispc)
{
- dispc->oldi_io_ctrl =
+ dispc->am65x_oldi_io_ctrl =
syscon_regmap_lookup_by_phandle(dev->of_node,
"ti,am65x-oldi-io-ctrl");
- if (PTR_ERR(dispc->oldi_io_ctrl) == -ENODEV) {
- dispc->oldi_io_ctrl = NULL;
- } else if (IS_ERR(dispc->oldi_io_ctrl)) {
+ if (PTR_ERR(dispc->am65x_oldi_io_ctrl) == -ENODEV) {
+ dispc->am65x_oldi_io_ctrl = NULL;
+ } else if (IS_ERR(dispc->am65x_oldi_io_ctrl)) {
dev_err(dev, "%s: syscon_regmap_lookup_by_phandle failed %ld\n",
- __func__, PTR_ERR(dispc->oldi_io_ctrl));
- return PTR_ERR(dispc->oldi_io_ctrl);
+ __func__, PTR_ERR(dispc->am65x_oldi_io_ctrl));
+ return PTR_ERR(dispc->am65x_oldi_io_ctrl);
}
return 0;
}
@@ -2898,8 +3035,8 @@ int dispc_init(struct tidss_device *tidss)
if (r)
return r;
- for (i = 0; i < dispc->feat->num_planes; i++) {
- r = dispc_iomap_resource(pdev, dispc->feat->vid_name[i],
+ for (i = 0; i < dispc->feat->num_vids; i++) {
+ r = dispc_iomap_resource(pdev, dispc->feat->vid_info[i].name,
&dispc->base_vid[i]);
if (r)
return r;
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index 086327d51a90..b8614f62186c 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -46,13 +46,19 @@ struct dispc_features_scaling {
u32 xinc_max;
};
+struct dispc_vid_info {
+ const char *name; /* Should match dt reg names */
+ u32 hw_id;
+ bool is_lite;
+};
+
struct dispc_errata {
bool i2000; /* DSS Does Not Support YUV Pixel Data Formats */
};
enum dispc_vp_bus_type {
DISPC_VP_DPI, /* DPI output */
- DISPC_VP_OLDI, /* OLDI (LVDS) output */
+ DISPC_VP_OLDI_AM65X, /* OLDI (LVDS) output for AM65x DSS */
DISPC_VP_INTERNAL, /* SoC internal routing */
DISPC_VP_TIED_OFF, /* Tied off / Unavailable */
DISPC_VP_MAX_BUS_TYPE,
@@ -61,6 +67,7 @@ enum dispc_vp_bus_type {
enum dispc_dss_subrevision {
DISPC_K2G,
DISPC_AM625,
+ DISPC_AM62L,
DISPC_AM62A7,
DISPC_AM65X,
DISPC_J721E,
@@ -82,18 +89,23 @@ struct dispc_features {
const char *vpclk_name[TIDSS_MAX_PORTS]; /* Should match dt clk names */
const enum dispc_vp_bus_type vp_bus_type[TIDSS_MAX_PORTS];
struct tidss_vp_feat vp_feat;
- u32 num_planes;
- const char *vid_name[TIDSS_MAX_PLANES]; /* Should match dt reg names */
- bool vid_lite[TIDSS_MAX_PLANES];
+ u32 num_vids;
+ struct dispc_vid_info vid_info[TIDSS_MAX_PLANES];
u32 vid_order[TIDSS_MAX_PLANES];
};
extern const struct dispc_features dispc_k2g_feats;
extern const struct dispc_features dispc_am625_feats;
extern const struct dispc_features dispc_am62a7_feats;
+extern const struct dispc_features dispc_am62l_feats;
extern const struct dispc_features dispc_am65x_feats;
extern const struct dispc_features dispc_j721e_feats;
+int tidss_configure_oldi(struct tidss_device *tidss, u32 hw_videoport,
+ u32 oldi_cfg);
+void tidss_disable_oldi(struct tidss_device *tidss, u32 hw_videoport);
+unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate);
+
void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask);
dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
index e88148e44937..50a3f28250ef 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc_regs.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
@@ -226,18 +226,35 @@ enum dispc_common_regs {
#define DISPC_VP_DSS_DMA_THREADSIZE 0x170 /* J721E */
#define DISPC_VP_DSS_DMA_THREADSIZE_STATUS 0x174 /* J721E */
+/* OLDI Config Bits (DISPC_VP_DSS_OLDI_CFG) */
+#define OLDI_ENABLE BIT(0)
+#define OLDI_MAP (BIT(1) | BIT(2) | BIT(3))
+#define OLDI_SRC BIT(4)
+#define OLDI_CLONE_MODE BIT(5)
+#define OLDI_MASTERSLAVE BIT(6)
+#define OLDI_DEPOL BIT(7)
+#define OLDI_MSB BIT(8)
+#define OLDI_LBEN BIT(9)
+#define OLDI_LBDATA BIT(10)
+#define OLDI_DUALMODESYNC BIT(11)
+#define OLDI_SOFTRST BIT(12)
+#define OLDI_TPATCFG BIT(13)
+
+/* LVDS Format values for OLDI_MAP field in DISPC_VP_OLDI_CFG register */
+enum oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
+
/*
* OLDI IO_CTRL register offsets. On AM654 the registers are found
* from CTRL_MMR0, there the syscon regmap should map 0x14 bytes from
* CTRLMMR0P1_OLDI_DAT0_IO_CTRL to CTRLMMR0P1_OLDI_CLK_IO_CTRL
* register range.
*/
-#define OLDI_DAT0_IO_CTRL 0x00
-#define OLDI_DAT1_IO_CTRL 0x04
-#define OLDI_DAT2_IO_CTRL 0x08
-#define OLDI_DAT3_IO_CTRL 0x0C
-#define OLDI_CLK_IO_CTRL 0x10
+#define AM65X_OLDI_DAT0_IO_CTRL 0x00
+#define AM65X_OLDI_DAT1_IO_CTRL 0x04
+#define AM65X_OLDI_DAT2_IO_CTRL 0x08
+#define AM65X_OLDI_DAT3_IO_CTRL 0x0C
+#define AM65X_OLDI_CLK_IO_CTRL 0x10
-#define OLDI_PWRDN_TX BIT(8)
+#define AM65X_OLDI_PWRDN_TX BIT(8)
#endif /* __TIDSS_DISPC_REGS_H */
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index d4652e8cc28c..a1b12e52aca4 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -24,6 +24,7 @@
#include "tidss_drv.h"
#include "tidss_kms.h"
#include "tidss_irq.h"
+#include "tidss_oldi.h"
/* Power management */
@@ -147,6 +148,10 @@ static int tidss_probe(struct platform_device *pdev)
return ret;
}
+ ret = tidss_oldi_init(tidss);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init OLDI\n");
+
pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, 1000);
@@ -203,6 +208,8 @@ err_runtime_suspend:
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
+ tidss_oldi_deinit(tidss);
+
return ret;
}
@@ -227,6 +234,8 @@ static void tidss_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
+ tidss_oldi_deinit(tidss);
+
/* devm allocated dispc goes away with the dev so mark it NULL */
dispc_remove(tidss);
@@ -242,6 +251,7 @@ static const struct of_device_id tidss_of_table[] = {
{ .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
{ .compatible = "ti,am625-dss", .data = &dispc_am625_feats, },
{ .compatible = "ti,am62a7-dss", .data = &dispc_am62a7_feats, },
+ { .compatible = "ti,am62l-dss", .data = &dispc_am62l_feats, },
{ .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
{ .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
{ }
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
index 7f4f4282bc04..d14d5d28f0a3 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.h
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -11,8 +11,10 @@
#define TIDSS_MAX_PORTS 4
#define TIDSS_MAX_PLANES 4
+#define TIDSS_MAX_OLDI_TXES 2
typedef u32 dispc_irq_t;
+struct tidss_oldi;
struct tidss_device {
struct drm_device ddev; /* DRM device for DSS */
@@ -27,6 +29,9 @@ struct tidss_device {
unsigned int num_planes;
struct drm_plane *planes[TIDSS_MAX_PLANES];
+ unsigned int num_oldis;
+ struct tidss_oldi *oldis[TIDSS_MAX_OLDI_TXES];
+
unsigned int irq;
/* protects the irq masks field and irqenable/irqstatus registers */
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c
index 95b4aeff2775..81a04f767770 100644
--- a/drivers/gpu/drm/tidss/tidss_encoder.c
+++ b/drivers/gpu/drm/tidss/tidss_encoder.c
@@ -90,14 +90,18 @@ int tidss_encoder_create(struct tidss_device *tidss,
struct drm_connector *connector;
int ret;
- t_enc = drmm_simple_encoder_alloc(&tidss->ddev, struct tidss_encoder,
- encoder, encoder_type);
+ t_enc = devm_drm_bridge_alloc(tidss->dev, struct tidss_encoder,
+ bridge, &tidss_bridge_funcs);
if (IS_ERR(t_enc))
return PTR_ERR(t_enc);
+ ret = drm_simple_encoder_init(&tidss->ddev, &t_enc->encoder,
+ encoder_type);
+ if (ret)
+ return ret;
+
t_enc->tidss = tidss;
t_enc->next_bridge = next_bridge;
- t_enc->bridge.funcs = &tidss_bridge_funcs;
enc = &t_enc->encoder;
enc->possible_crtcs = possible_crtcs;
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index f371518f8697..c34eb90cddbe 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -115,7 +115,7 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
const struct dispc_features *feat = tidss->feat;
u32 max_vps = feat->num_vps;
- u32 max_planes = feat->num_planes;
+ u32 max_planes = feat->num_vids;
struct pipe pipes[TIDSS_MAX_PORTS];
u32 num_pipes = 0;
@@ -144,7 +144,7 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
dev_dbg(dev, "Setting up panel for port %d\n", i);
switch (feat->vp_bus_type[i]) {
- case DISPC_VP_OLDI:
+ case DISPC_VP_OLDI_AM65X:
enc_type = DRM_MODE_ENCODER_LVDS;
conn_type = DRM_MODE_CONNECTOR_LVDS;
break;
diff --git a/drivers/gpu/drm/tidss/tidss_oldi.c b/drivers/gpu/drm/tidss/tidss_oldi.c
new file mode 100644
index 000000000000..8f25159d0666
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_oldi.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2025 - Texas Instruments Incorporated
+ *
+ * Aradhya Bhatia <a-bhatia1@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/mfd/syscon.h>
+#include <linux/media-bus-format.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_of.h>
+
+#include "tidss_dispc.h"
+#include "tidss_dispc_regs.h"
+#include "tidss_oldi.h"
+
+struct tidss_oldi {
+ struct tidss_device *tidss;
+ struct device *dev;
+
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+
+ enum tidss_oldi_link_type link_type;
+ const struct oldi_bus_format *bus_format;
+ u32 oldi_instance;
+ int companion_instance; /* -1 when OLDI TX operates in Single-Link */
+ u32 parent_vp;
+
+ struct clk *serial;
+ struct regmap *io_ctrl;
+};
+
+struct oldi_bus_format {
+ u32 bus_fmt;
+ u32 data_width;
+ enum oldi_mode_reg_val oldi_mode_reg_val;
+ u32 input_bus_fmt;
+};
+
+static const struct oldi_bus_format oldi_bus_formats[] = {
+ { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, 18, SPWG_18, MEDIA_BUS_FMT_RGB666_1X18 },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 24, SPWG_24, MEDIA_BUS_FMT_RGB888_1X24 },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, 24, JEIDA_24, MEDIA_BUS_FMT_RGB888_1X24 },
+};
+
+#define OLDI_IDLE_CLK_HZ 25000000 /*25 MHz */
+
+static inline struct tidss_oldi *
+drm_bridge_to_tidss_oldi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct tidss_oldi, bridge);
+}
+
+static int tidss_oldi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
+{
+ struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge);
+
+ if (!oldi->next_bridge) {
+ dev_err(oldi->dev,
+ "%s: OLDI%u Failure attach next bridge\n",
+ __func__, oldi->oldi_instance);
+ return -ENODEV;
+ }
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ dev_err(oldi->dev,
+ "%s: OLDI%u DRM_BRIDGE_ATTACH_NO_CONNECTOR is mandatory.\n",
+ __func__, oldi->oldi_instance);
+ return -EINVAL;
+ }
+
+ return drm_bridge_attach(encoder, oldi->next_bridge, bridge, flags);
+}
+
+static int
+tidss_oldi_set_serial_clk(struct tidss_oldi *oldi, unsigned long rate)
+{
+ unsigned long new_rate;
+ int ret;
+
+ ret = clk_set_rate(oldi->serial, rate);
+ if (ret) {
+ dev_err(oldi->dev,
+ "OLDI%u: failed to set serial clk rate to %lu Hz\n",
+ oldi->oldi_instance, rate);
+ return ret;
+ }
+
+ new_rate = clk_get_rate(oldi->serial);
+
+ if (dispc_pclk_diff(rate, new_rate) > 5)
+ dev_warn(oldi->dev,
+ "OLDI%u Clock rate %lu differs over 5%% from requested %lu\n",
+ oldi->oldi_instance, new_rate, rate);
+
+ dev_dbg(oldi->dev, "OLDI%u: new rate %lu Hz (requested %lu Hz)\n",
+ oldi->oldi_instance, clk_get_rate(oldi->serial), rate);
+
+ return 0;
+}
+
+static void tidss_oldi_tx_power(struct tidss_oldi *oldi, bool enable)
+{
+ u32 mask;
+
+ /*
+ * The power control bits are Active Low, and remain powered off by
+ * default. That is, the bits are set to 1. To power on the OLDI TXes,
+ * the bits must be cleared to 0. Since there are cases where not all
+ * OLDI TXes are being used, the power logic selectively powers them
+ * on.
+ * Setting the variable 'val' to particular bit masks, makes sure that
+ * the undesired OLDI TXes remain powered off.
+ */
+
+ if (enable) {
+ switch (oldi->link_type) {
+ case OLDI_MODE_SINGLE_LINK:
+ /* Power-on only the required OLDI TX's IO*/
+ mask = OLDI_PWRDOWN_TX(oldi->oldi_instance) | OLDI_PWRDN_BG;
+ break;
+ case OLDI_MODE_CLONE_SINGLE_LINK:
+ case OLDI_MODE_DUAL_LINK:
+ /* Power-on both the OLDI TXes' IOs */
+ mask = OLDI_PWRDOWN_TX(oldi->oldi_instance) |
+ OLDI_PWRDOWN_TX(oldi->companion_instance) |
+ OLDI_PWRDN_BG;
+ break;
+ default:
+ /*
+ * This code execution should never reach here as any
+ * OLDI with an unsupported OLDI mode would never get
+ * registered in the first place.
+ * However, power-off the OLDI in concern just in case.
+ */
+ mask = OLDI_PWRDOWN_TX(oldi->oldi_instance);
+ enable = false;
+ break;
+ }
+ } else {
+ switch (oldi->link_type) {
+ case OLDI_MODE_CLONE_SINGLE_LINK:
+ case OLDI_MODE_DUAL_LINK:
+ mask = OLDI_PWRDOWN_TX(oldi->oldi_instance) |
+ OLDI_PWRDOWN_TX(oldi->companion_instance) |
+ OLDI_PWRDN_BG;
+ break;
+ case OLDI_MODE_SINGLE_LINK:
+ default:
+ mask = OLDI_PWRDOWN_TX(oldi->oldi_instance);
+ break;
+ }
+ }
+
+ regmap_update_bits(oldi->io_ctrl, OLDI_PD_CTRL, mask, enable ? 0 : mask);
+}
+
+static int tidss_oldi_config(struct tidss_oldi *oldi)
+{
+ const struct oldi_bus_format *bus_fmt = NULL;
+ u32 oldi_cfg = 0;
+ int ret;
+
+ bus_fmt = oldi->bus_format;
+
+ /*
+ * MASTERSLAVE and SRC bits of OLDI Config are always set to 0.
+ */
+
+ if (bus_fmt->data_width == 24)
+ oldi_cfg |= OLDI_MSB;
+ else if (bus_fmt->data_width != 18)
+ dev_warn(oldi->dev,
+ "OLDI%u: DSS port width %d not supported\n",
+ oldi->oldi_instance, bus_fmt->data_width);
+
+ oldi_cfg |= OLDI_DEPOL;
+
+ oldi_cfg = (oldi_cfg & (~OLDI_MAP)) | (bus_fmt->oldi_mode_reg_val << 1);
+
+ oldi_cfg |= OLDI_SOFTRST;
+
+ oldi_cfg |= OLDI_ENABLE;
+
+ switch (oldi->link_type) {
+ case OLDI_MODE_SINGLE_LINK:
+ /* All configuration is done for this mode. */
+ break;
+
+ case OLDI_MODE_CLONE_SINGLE_LINK:
+ oldi_cfg |= OLDI_CLONE_MODE;
+ break;
+
+ case OLDI_MODE_DUAL_LINK:
+ /* data-mapping field also indicates dual-link mode */
+ oldi_cfg |= BIT(3);
+ oldi_cfg |= OLDI_DUALMODESYNC;
+ break;
+
+ default:
+ dev_err(oldi->dev, "OLDI%u: Unsupported mode.\n",
+ oldi->oldi_instance);
+ return -EINVAL;
+ }
+
+ ret = tidss_configure_oldi(oldi->tidss, oldi->parent_vp, oldi_cfg);
+ if (ret == -ETIMEDOUT)
+ dev_warn(oldi->dev, "OLDI%u: timeout waiting for OLDI reset done.\n",
+ oldi->oldi_instance);
+
+ return ret;
+}
+
+static void tidss_oldi_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_display_mode *mode;
+
+ if (oldi->link_type == OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK)
+ return;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ mode = &crtc_state->adjusted_mode;
+
+ /* Configure the OLDI params*/
+ tidss_oldi_config(oldi);
+
+ /* Set the OLDI serial clock (7 times the pixel clock) */
+ tidss_oldi_set_serial_clk(oldi, mode->clock * 7 * 1000);
+
+ /* Enable OLDI IO power */
+ tidss_oldi_tx_power(oldi, true);
+}
+
+static void tidss_oldi_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge);
+
+ if (oldi->link_type == OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK)
+ return;
+
+ /* Disable OLDI IO power */
+ tidss_oldi_tx_power(oldi, false);
+
+ /* Set the OLDI serial clock to IDLE Frequency */
+ tidss_oldi_set_serial_clk(oldi, OLDI_IDLE_CLK_HZ);
+
+ /* Clear OLDI Config */
+ tidss_disable_oldi(oldi->tidss, oldi->parent_vp);
+}
+
+#define MAX_INPUT_SEL_FORMATS 1
+
+static u32 *tidss_oldi_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge);
+ u32 *input_fmts;
+ int i;
+
+ *num_input_fmts = 0;
+
+ for (i = 0; i < ARRAY_SIZE(oldi_bus_formats); i++)
+ if (oldi_bus_formats[i].bus_fmt == output_fmt)
+ break;
+
+ if (i == ARRAY_SIZE(oldi_bus_formats))
+ return NULL;
+
+ input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ *num_input_fmts = 1;
+ input_fmts[0] = oldi_bus_formats[i].input_bus_fmt;
+ oldi->bus_format = &oldi_bus_formats[i];
+
+ return input_fmts;
+}
+
+static const struct drm_bridge_funcs tidss_oldi_bridge_funcs = {
+ .attach = tidss_oldi_bridge_attach,
+ .atomic_pre_enable = tidss_oldi_atomic_pre_enable,
+ .atomic_post_disable = tidss_oldi_atomic_post_disable,
+ .atomic_get_input_bus_fmts = tidss_oldi_atomic_get_input_bus_fmts,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+};
+
+static int get_oldi_mode(struct device_node *oldi_tx, int *companion_instance)
+{
+ struct device_node *companion;
+ struct device_node *port0, *port1;
+ u32 companion_reg;
+ bool secondary_oldi = false;
+ int pixel_order;
+
+ /*
+ * Find if the OLDI is paired with another OLDI for combined OLDI
+ * operation (dual-link or clone).
+ */
+ companion = of_parse_phandle(oldi_tx, "ti,companion-oldi", 0);
+ if (!companion)
+ /*
+ * The OLDI TX does not have a companion, nor is it a
+ * secondary OLDI. It will operate independently.
+ */
+ return OLDI_MODE_SINGLE_LINK;
+
+ if (of_property_read_u32(companion, "reg", &companion_reg))
+ return OLDI_MODE_UNSUPPORTED;
+
+ if (companion_reg > (TIDSS_MAX_OLDI_TXES - 1))
+ /* Invalid companion OLDI reg value. */
+ return OLDI_MODE_UNSUPPORTED;
+
+ *companion_instance = (int)companion_reg;
+
+ if (of_property_read_bool(oldi_tx, "ti,secondary-oldi"))
+ secondary_oldi = true;
+
+ /*
+ * We need to work out if the sink is expecting us to function in
+ * dual-link mode. We do this by looking at the DT port nodes, the
+ * OLDI TX ports are connected to. If they are marked as expecting
+ * even pixels and odd pixels, then we need to enable dual-link.
+ */
+ port0 = of_graph_get_port_by_id(oldi_tx, 1);
+ port1 = of_graph_get_port_by_id(companion, 1);
+ pixel_order = drm_of_lvds_get_dual_link_pixel_order(port0, port1);
+ of_node_put(port0);
+ of_node_put(port1);
+ of_node_put(companion);
+
+ switch (pixel_order) {
+ case -EINVAL:
+ /*
+ * The dual-link properties were not found in at least
+ * one of the sink nodes. Since 2 OLDI ports are present
+ * in the DT, it can be safely assumed that the required
+ * configuration is Clone Mode.
+ */
+ return (secondary_oldi ? OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK :
+ OLDI_MODE_CLONE_SINGLE_LINK);
+
+ case DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS:
+ /*
+ * Primary OLDI can only support "ODD" pixels. So, from its
+ * perspective, the pixel order has to be ODD-EVEN.
+ */
+ return (secondary_oldi ? OLDI_MODE_UNSUPPORTED :
+ OLDI_MODE_DUAL_LINK);
+
+ case DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS:
+ /*
+ * Secondary OLDI can only support "EVEN" pixels. So, from its
+ * perspective, the pixel order has to be EVEN-ODD.
+ */
+ return (secondary_oldi ? OLDI_MODE_SECONDARY_DUAL_LINK :
+ OLDI_MODE_UNSUPPORTED);
+
+ default:
+ return OLDI_MODE_UNSUPPORTED;
+ }
+}
+
+static int get_parent_dss_vp(struct device_node *oldi_tx, u32 *parent_vp)
+{
+ struct device_node *ep, *dss_port;
+ int ret;
+
+ ep = of_graph_get_endpoint_by_regs(oldi_tx, OLDI_INPUT_PORT, -1);
+ if (ep) {
+ dss_port = of_graph_get_remote_port(ep);
+ if (!dss_port) {
+ ret = -ENODEV;
+ goto err_return_ep_port;
+ }
+
+ ret = of_property_read_u32(dss_port, "reg", parent_vp);
+
+ of_node_put(dss_port);
+err_return_ep_port:
+ of_node_put(ep);
+ return ret;
+ }
+
+ return -ENODEV;
+}
+
+static const struct drm_bridge_timings default_tidss_oldi_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_DE_HIGH,
+};
+
+void tidss_oldi_deinit(struct tidss_device *tidss)
+{
+ for (int i = 0; i < tidss->num_oldis; i++) {
+ if (tidss->oldis[i]) {
+ drm_bridge_remove(&tidss->oldis[i]->bridge);
+ tidss->oldis[i] = NULL;
+ }
+ }
+}
+
+int tidss_oldi_init(struct tidss_device *tidss)
+{
+ struct tidss_oldi *oldi;
+ struct device_node *child;
+ struct drm_bridge *bridge;
+ u32 parent_vp, oldi_instance;
+ int companion_instance = -1;
+ enum tidss_oldi_link_type link_type = OLDI_MODE_UNSUPPORTED;
+ struct device_node *oldi_parent;
+ int ret = 0;
+
+ tidss->num_oldis = 0;
+
+ oldi_parent = of_get_child_by_name(tidss->dev->of_node, "oldi-transmitters");
+ if (!oldi_parent)
+ /* Return gracefully */
+ return 0;
+
+ for_each_available_child_of_node(oldi_parent, child) {
+ ret = get_parent_dss_vp(child, &parent_vp);
+ if (ret) {
+ if (ret == -ENODEV) {
+ /*
+ * ENODEV means that this particular OLDI node
+ * is not connected with the DSS, which is not
+ * a harmful case. There could be another OLDI
+ * which may still be connected.
+ * Continue to search for that.
+ */
+ ret = 0;
+ continue;
+ }
+ goto err_put_node;
+ }
+
+ ret = of_property_read_u32(child, "reg", &oldi_instance);
+ if (ret)
+ goto err_put_node;
+
+ /*
+ * Now that it's confirmed that OLDI is connected with DSS,
+ * let's continue getting the OLDI sinks ahead and other OLDI
+ * properties.
+ */
+ bridge = devm_drm_of_get_bridge(tidss->dev, child,
+ OLDI_OUTPUT_PORT, 0);
+ if (IS_ERR(bridge)) {
+ /*
+ * Either there was no OLDI sink in the devicetree, or
+ * the OLDI sink has not been added yet. In any case,
+ * return.
+ * We don't want to have an OLDI node connected to DSS
+ * but not to any sink.
+ */
+ ret = dev_err_probe(tidss->dev, PTR_ERR(bridge),
+ "no panel/bridge for OLDI%u.\n",
+ oldi_instance);
+ goto err_put_node;
+ }
+
+ link_type = get_oldi_mode(child, &companion_instance);
+ if (link_type == OLDI_MODE_UNSUPPORTED) {
+ ret = dev_err_probe(tidss->dev, -EINVAL,
+ "OLDI%u: Unsupported OLDI connection.\n",
+ oldi_instance);
+ goto err_put_node;
+ } else if ((link_type == OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK) ||
+ (link_type == OLDI_MODE_CLONE_SINGLE_LINK)) {
+ /*
+ * The OLDI driver cannot support OLDI clone mode
+ * properly at present.
+ * The clone mode requires 2 working encoder-bridge
+ * pipelines, generating from the same crtc. The DRM
+ * framework does not support this at present. If
+ * there were to be, say, 2 OLDI sink bridges each
+ * connected to an OLDI TXes, they couldn't both be
+ * supported simultaneously.
+ * This driver still has some code pertaining to OLDI
+ * clone mode configuration in DSS hardware for future,
+ * when there is a better infrastructure in the DRM
+ * framework to support 2 encoder-bridge pipelines
+ * simultaneously.
+ * Till that time, this driver shall error out if it
+ * detects a clone mode configuration.
+ */
+ ret = dev_err_probe(tidss->dev, -EOPNOTSUPP,
+ "The OLDI driver does not support Clone Mode at present.\n");
+ goto err_put_node;
+ } else if (link_type == OLDI_MODE_SECONDARY_DUAL_LINK) {
+ /*
+ * This is the secondary OLDI node, which serves as a
+ * companion to the primary OLDI, when it is configured
+ * for the dual-link mode. Since the primary OLDI will
+ * be a part of bridge chain, no need to put this one
+ * too. Continue onto the next OLDI node.
+ */
+ continue;
+ }
+
+ oldi = devm_drm_bridge_alloc(tidss->dev, struct tidss_oldi, bridge,
+ &tidss_oldi_bridge_funcs);
+ if (IS_ERR(oldi)) {
+ ret = PTR_ERR(oldi);
+ goto err_put_node;
+ }
+
+ oldi->parent_vp = parent_vp;
+ oldi->oldi_instance = oldi_instance;
+ oldi->companion_instance = companion_instance;
+ oldi->link_type = link_type;
+ oldi->dev = tidss->dev;
+ oldi->next_bridge = bridge;
+
+ /*
+ * Only the primary OLDI needs to reference the io-ctrl system
+ * registers, and the serial clock.
+ * We don't require a check for secondary OLDI in dual-link mode
+ * because the driver will not create a drm_bridge instance.
+ * But the driver will need to create a drm_bridge instance,
+ * for secondary OLDI in clone mode (once it is supported).
+ */
+ if (link_type != OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK) {
+ oldi->io_ctrl = syscon_regmap_lookup_by_phandle(child,
+ "ti,oldi-io-ctrl");
+ if (IS_ERR(oldi->io_ctrl)) {
+ ret = dev_err_probe(oldi->dev, PTR_ERR(oldi->io_ctrl),
+ "OLDI%u: syscon_regmap_lookup_by_phandle failed.\n",
+ oldi_instance);
+ goto err_put_node;
+ }
+
+ oldi->serial = of_clk_get_by_name(child, "serial");
+ if (IS_ERR(oldi->serial)) {
+ ret = dev_err_probe(oldi->dev, PTR_ERR(oldi->serial),
+ "OLDI%u: Failed to get serial clock.\n",
+ oldi_instance);
+ goto err_put_node;
+ }
+ }
+
+ /* Register the bridge. */
+ oldi->bridge.of_node = child;
+ oldi->bridge.driver_private = oldi;
+ oldi->bridge.timings = &default_tidss_oldi_timings;
+
+ tidss->oldis[tidss->num_oldis++] = oldi;
+ oldi->tidss = tidss;
+
+ drm_bridge_add(&oldi->bridge);
+ }
+
+ of_node_put(child);
+ of_node_put(oldi_parent);
+
+ return 0;
+
+err_put_node:
+ of_node_put(child);
+ of_node_put(oldi_parent);
+ return ret;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_oldi.h b/drivers/gpu/drm/tidss/tidss_oldi.h
new file mode 100644
index 000000000000..8cd535c5ee65
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_oldi.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2025 - Texas Instruments Incorporated
+ *
+ * Aradhya Bhatia <a-bhatia1@ti.com>
+ */
+
+#ifndef __TIDSS_OLDI_H__
+#define __TIDSS_OLDI_H__
+
+#include "tidss_drv.h"
+
+struct tidss_oldi;
+
+/* OLDI PORTS */
+#define OLDI_INPUT_PORT 0
+#define OLDI_OUTPUT_PORT 1
+
+/* Control MMR Registers */
+
+/* Register offsets */
+#define OLDI_PD_CTRL 0x100
+#define OLDI_LB_CTRL 0x104
+
+/* Power control bits */
+#define OLDI_PWRDOWN_TX(n) BIT(n)
+
+/* LVDS Bandgap reference Enable/Disable */
+#define OLDI_PWRDN_BG BIT(8)
+
+enum tidss_oldi_link_type {
+ OLDI_MODE_UNSUPPORTED,
+ OLDI_MODE_SINGLE_LINK,
+ OLDI_MODE_CLONE_SINGLE_LINK,
+ OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK,
+ OLDI_MODE_DUAL_LINK,
+ OLDI_MODE_SECONDARY_DUAL_LINK,
+};
+
+int tidss_oldi_init(struct tidss_device *tidss);
+void tidss_oldi_deinit(struct tidss_device *tidss);
+
+#endif /* __TIDSS_OLDI_H__ */
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 719412e6c346..142ae81951a0 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -200,7 +200,7 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
struct tidss_plane *tplane;
enum drm_plane_type type;
u32 possible_crtcs;
- u32 num_planes = tidss->feat->num_planes;
+ u32 num_planes = tidss->feat->num_vids;
u32 color_encodings = (BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709));
u32 color_ranges = (BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 8706763af8fb..8d3b7c4fa6a4 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -19,6 +19,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
+#include <drm/drm_panic.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -469,10 +470,28 @@ static void bochs_primary_plane_helper_atomic_update(struct drm_plane *plane,
bochs_hw_setformat(bochs, fb->format);
}
+static int bochs_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct bochs_device *bochs = to_bochs_device(plane->dev);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(bochs->fb_map);
+
+ if (plane->state && plane->state->fb) {
+ sb->format = plane->state->fb->format;
+ sb->width = plane->state->fb->width;
+ sb->height = plane->state->fb->height;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ sb->map[0] = map;
+ return 0;
+ }
+ return -ENODEV;
+}
+
static const struct drm_plane_helper_funcs bochs_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = bochs_primary_plane_helper_atomic_check,
.atomic_update = bochs_primary_plane_helper_atomic_update,
+ .get_scanout_buffer = bochs_primary_plane_helper_get_scanout_buffer,
};
static const struct drm_plane_funcs bochs_primary_plane_funcs = {
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
index 3148f5d3dbd6..1bcc67977f48 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
@@ -542,14 +542,15 @@ static void ttm_bo_validate_no_placement_signaled(struct kunit *test)
bo->ttm = old_tt;
}
- err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
- KUNIT_EXPECT_EQ(test, err, 0);
- KUNIT_ASSERT_EQ(test, man->usage, size);
-
placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, placement);
ttm_bo_reserve(bo, false, false, NULL);
+
+ err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, man->usage, size);
+
err = ttm_bo_validate(bo, placement, &ctx);
ttm_bo_unreserve(bo);
@@ -757,56 +758,6 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test)
ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
}
-static void ttm_bo_validate_swapout(struct kunit *test)
-{
- unsigned long size_big, size = ALIGN(BO_SIZE, PAGE_SIZE);
- enum ttm_bo_type bo_type = ttm_bo_type_device;
- struct ttm_buffer_object *bo_small, *bo_big;
- struct ttm_test_devices *priv = test->priv;
- struct ttm_operation_ctx ctx = { };
- struct ttm_placement *placement;
- u32 mem_type = TTM_PL_TT;
- struct ttm_place *place;
- struct sysinfo si;
- int err;
-
- si_meminfo(&si);
- size_big = ALIGN(((u64)si.totalram * si.mem_unit / 2), PAGE_SIZE);
-
- ttm_mock_manager_init(priv->ttm_dev, mem_type, size_big + size);
-
- place = ttm_place_kunit_init(test, mem_type, 0);
- placement = ttm_placement_kunit_init(test, place, 1);
-
- bo_small = kunit_kzalloc(test, sizeof(*bo_small), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, bo_small);
-
- drm_gem_private_object_init(priv->drm, &bo_small->base, size);
-
- err = ttm_bo_init_reserved(priv->ttm_dev, bo_small, bo_type, placement,
- PAGE_SIZE, &ctx, NULL, NULL,
- &dummy_ttm_bo_destroy);
- KUNIT_EXPECT_EQ(test, err, 0);
- dma_resv_unlock(bo_small->base.resv);
-
- bo_big = ttm_bo_kunit_init(test, priv, size_big, NULL);
-
- dma_resv_lock(bo_big->base.resv, NULL);
- err = ttm_bo_validate(bo_big, placement, &ctx);
- dma_resv_unlock(bo_big->base.resv);
-
- KUNIT_EXPECT_EQ(test, err, 0);
- KUNIT_EXPECT_NOT_NULL(test, bo_big->resource);
- KUNIT_EXPECT_EQ(test, bo_big->resource->mem_type, mem_type);
- KUNIT_EXPECT_EQ(test, bo_small->resource->mem_type, TTM_PL_SYSTEM);
- KUNIT_EXPECT_TRUE(test, bo_small->ttm->page_flags & TTM_TT_FLAG_SWAPPED);
-
- ttm_bo_put(bo_big);
- ttm_bo_put(bo_small);
-
- ttm_mock_manager_fini(priv->ttm_dev, mem_type);
-}
-
static void ttm_bo_validate_happy_evict(struct kunit *test)
{
u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT,
@@ -1201,7 +1152,6 @@ static struct kunit_case ttm_bo_validate_test_cases[] = {
KUNIT_CASE(ttm_bo_validate_move_fence_signaled),
KUNIT_CASE_PARAM(ttm_bo_validate_move_fence_not_signaled,
ttm_bo_validate_wait_gen_params),
- KUNIT_CASE(ttm_bo_validate_swapout),
KUNIT_CASE(ttm_bo_validate_happy_evict),
KUNIT_CASE(ttm_bo_validate_all_pinned_evict),
KUNIT_CASE(ttm_bo_validate_allowed_only_evict),
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
index b91c13f46225..7aaf0d1395ff 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
@@ -2,6 +2,9 @@
/*
* Copyright © 2023 Intel Corporation
*/
+
+#include <linux/export.h>
+
#include <drm/ttm/ttm_tt.h>
#include "ttm_kunit_helpers.h"
diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
index f6d1c8a2845d..d7eb6471f2ed 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
@@ -2,6 +2,9 @@
/*
* Copyright © 2023 Intel Corporation
*/
+
+#include <linux/export.h>
+
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index d27691f2e451..fca0a1a3c6fd 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -36,6 +36,7 @@
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_resource.h>
#include <linux/agp_backend.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c
index ffaab68bd5dd..cb1b8e5dadf5 100644
--- a/drivers/gpu/drm/ttm/ttm_backup.c
+++ b/drivers/gpu/drm/ttm/ttm_backup.c
@@ -4,6 +4,8 @@
*/
#include <drm/ttm/ttm_backup.h>
+
+#include <linux/export.h>
#include <linux/page-flags.h>
#include <linux/swap.h>
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 08a23ab037cb..f4d9e68b21e7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -35,6 +35,7 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
+#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
@@ -46,6 +47,7 @@
#include <linux/dma-resv.h>
#include "ttm_module.h"
+#include "ttm_bo_internal.h"
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
@@ -524,11 +526,11 @@ static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *
return 0;
if (bo->deleted) {
- lret = ttm_bo_wait_ctx(bo, walk->ctx);
+ lret = ttm_bo_wait_ctx(bo, walk->arg.ctx);
if (!lret)
ttm_bo_cleanup_memtype_use(bo);
} else {
- lret = ttm_bo_evict(bo, walk->ctx);
+ lret = ttm_bo_evict(bo, walk->arg.ctx);
}
if (lret)
@@ -564,8 +566,10 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev,
struct ttm_bo_evict_walk evict_walk = {
.walk = {
.ops = &ttm_evict_walk_ops,
- .ctx = ctx,
- .ticket = ticket,
+ .arg = {
+ .ctx = ctx,
+ .ticket = ticket,
+ }
},
.place = place,
.evictor = evictor,
@@ -574,7 +578,7 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev,
};
s64 lret;
- evict_walk.walk.trylock_only = true;
+ evict_walk.walk.arg.trylock_only = true;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
/* One more attempt if we hit low limit? */
@@ -588,12 +592,12 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev,
/* Reset low limit */
evict_walk.try_low = evict_walk.hit_low = false;
/* If ticket-locking, repeat while making progress. */
- evict_walk.walk.trylock_only = false;
+ evict_walk.walk.arg.trylock_only = false;
retry:
do {
/* The walk may clear the evict_walk.walk.ticket field */
- evict_walk.walk.ticket = ticket;
+ evict_walk.walk.arg.ticket = ticket;
evict_walk.evicted = 0;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
} while (!lret && evict_walk.evicted);
@@ -1104,7 +1108,7 @@ ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
struct ttm_place place = {.mem_type = bo->resource->mem_type};
struct ttm_bo_swapout_walk *swapout_walk =
container_of(walk, typeof(*swapout_walk), walk);
- struct ttm_operation_ctx *ctx = walk->ctx;
+ struct ttm_operation_ctx *ctx = walk->arg.ctx;
s64 ret;
/*
@@ -1215,8 +1219,10 @@ s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
struct ttm_bo_swapout_walk swapout_walk = {
.walk = {
.ops = &ttm_swap_ops,
- .ctx = ctx,
- .trylock_only = true,
+ .arg = {
+ .ctx = ctx,
+ .trylock_only = true,
+ },
},
.gfp_flags = gfp_flags,
};
diff --git a/drivers/gpu/drm/ttm/ttm_bo_internal.h b/drivers/gpu/drm/ttm/ttm_bo_internal.h
new file mode 100644
index 000000000000..9d8b747a34db
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_internal.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+
+#ifndef _TTM_BO_INTERNAL_H_
+#define _TTM_BO_INTERNAL_H_
+
+#include <drm/ttm/ttm_bo.h>
+
+/**
+ * ttm_bo_get - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ */
+static inline void ttm_bo_get(struct ttm_buffer_object *bo)
+{
+ kref_get(&bo->kref);
+}
+
+/**
+ * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
+ * its refcount has already reached zero.
+ * @bo: The buffer object.
+ *
+ * Used to reference a TTM buffer object in lookups where the object is removed
+ * from the lookup structure during the destructor and for RCU lookups.
+ *
+ * Returns: @bo if the referencing was successful, NULL otherwise.
+ */
+static inline __must_check struct ttm_buffer_object *
+ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
+{
+ if (!kref_get_unless_zero(&bo->kref))
+ return NULL;
+ return bo;
+}
+
+#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index bd90404ea609..acbbca9d5c92 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -28,6 +28,8 @@
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
+
+#include <linux/export.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
@@ -37,6 +39,8 @@
#include <drm/drm_cache.h>
+#include "ttm_bo_internal.h"
+
struct ttm_transfer_obj {
struct ttm_buffer_object base;
struct ttm_buffer_object *bo;
@@ -379,6 +383,32 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
}
/**
+ * ttm_bo_kmap_try_from_panic
+ *
+ * @bo: The buffer object
+ * @page: The page to map
+ *
+ * Sets up a kernel virtual mapping using kmap_local_page_try_from_panic().
+ * This should only be called from the panic handler, if you make sure the bo
+ * is the one being displayed, so is properly allocated, and protected.
+ *
+ * Returns the vaddr, that you can use to write to the bo, and that you should
+ * pass to kunmap_local() when you're done with this page, or NULL if the bo
+ * is in iomem.
+ */
+void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page)
+{
+ if (page + 1 > PFN_UP(bo->resource->size))
+ return NULL;
+
+ if (!bo->resource->bus.is_iomem && bo->ttm->pages && bo->ttm->pages[page])
+ return kmap_local_page_try_from_panic(bo->ttm->pages[page]);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_bo_kmap_try_from_panic);
+
+/**
* ttm_bo_kmap
*
* @bo: The buffer object.
@@ -770,14 +800,15 @@ error_destroy_tt:
return ret;
}
-static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx,
- struct ttm_buffer_object *bo,
- bool *needs_unlock)
+static bool ttm_lru_walk_trylock(struct ttm_bo_lru_cursor *curs,
+ struct ttm_buffer_object *bo)
{
- *needs_unlock = false;
+ struct ttm_operation_ctx *ctx = curs->arg->ctx;
+
+ curs->needs_unlock = false;
if (dma_resv_trylock(bo->base.resv)) {
- *needs_unlock = true;
+ curs->needs_unlock = true;
return true;
}
@@ -789,27 +820,27 @@ static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx,
return false;
}
-static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk,
- struct ttm_buffer_object *bo,
- bool *needs_unlock)
+static int ttm_lru_walk_ticketlock(struct ttm_bo_lru_cursor *curs,
+ struct ttm_buffer_object *bo)
{
+ struct ttm_lru_walk_arg *arg = curs->arg;
struct dma_resv *resv = bo->base.resv;
int ret;
- if (walk->ctx->interruptible)
- ret = dma_resv_lock_interruptible(resv, walk->ticket);
+ if (arg->ctx->interruptible)
+ ret = dma_resv_lock_interruptible(resv, arg->ticket);
else
- ret = dma_resv_lock(resv, walk->ticket);
+ ret = dma_resv_lock(resv, arg->ticket);
if (!ret) {
- *needs_unlock = true;
+ curs->needs_unlock = true;
/*
* Only a single ticketlock per loop. Ticketlocks are prone
* to return -EDEADLK causing the eviction to fail, so
* after waiting for the ticketlock, revert back to
* trylocking for this walk.
*/
- walk->ticket = NULL;
+ arg->ticket = NULL;
} else if (ret == -EDEADLK) {
/* Caller needs to exit the ww transaction. */
ret = -ENOSPC;
@@ -818,12 +849,6 @@ static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk,
return ret;
}
-static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked)
-{
- if (locked)
- dma_resv_unlock(bo->base.resv);
-}
-
/**
* ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on
* valid items.
@@ -858,64 +883,21 @@ static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked)
s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
struct ttm_resource_manager *man, s64 target)
{
- struct ttm_resource_cursor cursor;
- struct ttm_resource *res;
+ struct ttm_bo_lru_cursor cursor;
+ struct ttm_buffer_object *bo;
s64 progress = 0;
s64 lret;
- spin_lock(&bdev->lru_lock);
- ttm_resource_cursor_init(&cursor, man);
- ttm_resource_manager_for_each_res(&cursor, res) {
- struct ttm_buffer_object *bo = res->bo;
- bool bo_needs_unlock = false;
- bool bo_locked = false;
- int mem_type;
-
- /*
- * Attempt a trylock before taking a reference on the bo,
- * since if we do it the other way around, and the trylock fails,
- * we need to drop the lru lock to put the bo.
- */
- if (ttm_lru_walk_trylock(walk->ctx, bo, &bo_needs_unlock))
- bo_locked = true;
- else if (!walk->ticket || walk->ctx->no_wait_gpu ||
- walk->trylock_only)
- continue;
-
- if (!ttm_bo_get_unless_zero(bo)) {
- ttm_lru_walk_unlock(bo, bo_needs_unlock);
- continue;
- }
-
- mem_type = res->mem_type;
- spin_unlock(&bdev->lru_lock);
-
- lret = 0;
- if (!bo_locked)
- lret = ttm_lru_walk_ticketlock(walk, bo, &bo_needs_unlock);
-
- /*
- * Note that in between the release of the lru lock and the
- * ticketlock, the bo may have switched resource,
- * and also memory type, since the resource may have been
- * freed and allocated again with a different memory type.
- * In that case, just skip it.
- */
- if (!lret && bo->resource && bo->resource->mem_type == mem_type)
- lret = walk->ops->process_bo(walk, bo);
-
- ttm_lru_walk_unlock(bo, bo_needs_unlock);
- ttm_bo_put(bo);
+ ttm_bo_lru_for_each_reserved_guarded(&cursor, man, &walk->arg, bo) {
+ lret = walk->ops->process_bo(walk, bo);
if (lret == -EBUSY || lret == -EALREADY)
lret = 0;
progress = (lret < 0) ? lret : progress + lret;
-
- spin_lock(&bdev->lru_lock);
if (progress < 0 || progress >= target)
break;
}
- ttm_resource_cursor_fini(&cursor);
- spin_unlock(&bdev->lru_lock);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
return progress;
}
@@ -953,44 +935,87 @@ EXPORT_SYMBOL(ttm_bo_lru_cursor_fini);
* ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor
* @curs: The ttm_bo_lru_cursor to initialize.
* @man: The ttm resource_manager whose LRU lists to iterate over.
- * @ctx: The ttm_operation_ctx to govern the locking.
+ * @arg: The ttm_lru_walk_arg to govern the walk.
*
- * Initialize a struct ttm_bo_lru_cursor. Currently only trylocking
- * or prelocked buffer objects are available as detailed by
- * @ctx::resv and @ctx::allow_res_evict. Ticketlocking is not
- * supported.
+ * Initialize a struct ttm_bo_lru_cursor.
*
* Return: Pointer to @curs. The function does not fail.
*/
struct ttm_bo_lru_cursor *
ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
struct ttm_resource_manager *man,
- struct ttm_operation_ctx *ctx)
+ struct ttm_lru_walk_arg *arg)
{
memset(curs, 0, sizeof(*curs));
ttm_resource_cursor_init(&curs->res_curs, man);
- curs->ctx = ctx;
+ curs->arg = arg;
return curs;
}
EXPORT_SYMBOL(ttm_bo_lru_cursor_init);
static struct ttm_buffer_object *
-ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *curs)
+__ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
{
- struct ttm_buffer_object *bo = res->bo;
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+ struct ttm_resource *res = NULL;
+ struct ttm_buffer_object *bo;
+ struct ttm_lru_walk_arg *arg = curs->arg;
+ bool first = !curs->bo;
- if (!ttm_lru_walk_trylock(curs->ctx, bo, &curs->needs_unlock))
- return NULL;
+ ttm_bo_lru_cursor_cleanup_bo(curs);
- if (!ttm_bo_get_unless_zero(bo)) {
- if (curs->needs_unlock)
- dma_resv_unlock(bo->base.resv);
- return NULL;
+ spin_lock(lru_lock);
+ for (;;) {
+ int mem_type, ret = 0;
+ bool bo_locked = false;
+
+ if (first) {
+ res = ttm_resource_manager_first(&curs->res_curs);
+ first = false;
+ } else {
+ res = ttm_resource_manager_next(&curs->res_curs);
+ }
+ if (!res)
+ break;
+
+ bo = res->bo;
+ if (ttm_lru_walk_trylock(curs, bo))
+ bo_locked = true;
+ else if (!arg->ticket || arg->ctx->no_wait_gpu || arg->trylock_only)
+ continue;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (curs->needs_unlock)
+ dma_resv_unlock(bo->base.resv);
+ continue;
+ }
+
+ mem_type = res->mem_type;
+ spin_unlock(lru_lock);
+ if (!bo_locked)
+ ret = ttm_lru_walk_ticketlock(curs, bo);
+
+ /*
+ * Note that in between the release of the lru lock and the
+ * ticketlock, the bo may have switched resource,
+ * and also memory type, since the resource may have been
+ * freed and allocated again with a different memory type.
+ * In that case, just skip it.
+ */
+ curs->bo = bo;
+ if (!ret && bo->resource && bo->resource->mem_type == mem_type)
+ return bo;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ if (ret && ret != -EALREADY)
+ return ERR_PTR(ret);
+
+ spin_lock(lru_lock);
}
- curs->bo = bo;
- return bo;
+ spin_unlock(lru_lock);
+ return res ? bo : NULL;
}
/**
@@ -1004,25 +1029,7 @@ ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *cur
*/
struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
{
- spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
- struct ttm_resource *res = NULL;
- struct ttm_buffer_object *bo;
-
- ttm_bo_lru_cursor_cleanup_bo(curs);
-
- spin_lock(lru_lock);
- for (;;) {
- res = ttm_resource_manager_next(&curs->res_curs);
- if (!res)
- break;
-
- bo = ttm_bo_from_res_reserved(res, curs);
- if (bo)
- break;
- }
-
- spin_unlock(lru_lock);
- return res ? bo : NULL;
+ return __ttm_bo_lru_cursor_next(curs);
}
EXPORT_SYMBOL(ttm_bo_lru_cursor_next);
@@ -1036,21 +1043,8 @@ EXPORT_SYMBOL(ttm_bo_lru_cursor_next);
*/
struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs)
{
- spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
- struct ttm_buffer_object *bo;
- struct ttm_resource *res;
-
- spin_lock(lru_lock);
- res = ttm_resource_manager_first(&curs->res_curs);
- if (!res) {
- spin_unlock(lru_lock);
- return NULL;
- }
-
- bo = ttm_bo_from_res_reserved(res, curs);
- spin_unlock(lru_lock);
-
- return bo ? bo : ttm_bo_lru_cursor_next(curs);
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ return __ttm_bo_lru_cursor_next(curs);
}
EXPORT_SYMBOL(ttm_bo_lru_cursor_first);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index bdfa6ecfef05..b47020fca199 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -31,6 +31,8 @@
#define pr_fmt(fmt) "[TTM] " fmt
+#include <linux/export.h>
+
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 02e797fd1891..c3e2fcbdd2cc 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -28,6 +28,7 @@
#define pr_fmt(fmt) "[TTM DEVICE] " fmt
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <drm/ttm/ttm_bo.h>
@@ -36,6 +37,7 @@
#include <drm/ttm/ttm_placement.h>
#include "ttm_module.h"
+#include "ttm_bo_internal.h"
/*
* ttm_global_mutex - protecting the global state
@@ -123,6 +125,28 @@ out:
return ret;
}
+/**
+ * ttm_device_prepare_hibernation - move GTT BOs to shmem for hibernation.
+ *
+ * @bdev: A pointer to a struct ttm_device to prepare hibernation for.
+ *
+ * Return: 0 on success, negative number on failure.
+ */
+int ttm_device_prepare_hibernation(struct ttm_device *bdev)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false,
+ };
+ int ret;
+
+ do {
+ ret = ttm_device_swapout(bdev, &ctx, GFP_KERNEL);
+ } while (ret > 0);
+ return ret;
+}
+EXPORT_SYMBOL(ttm_device_prepare_hibernation);
+
/*
* A buffer object shrink method that tries to swap out the first
* buffer object on the global::swap_lru list.
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index f1c60fa80c2d..bc7a83a9fe44 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -26,6 +26,8 @@
*
**************************************************************************/
+#include <linux/export.h>
+
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_bo.h>
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index c2ea865be657..baf27c70a419 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -31,6 +31,7 @@
* cause they are rather slow compared to alloc_pages+map.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
@@ -1132,7 +1133,9 @@ void ttm_pool_fini(struct ttm_pool *pool)
}
EXPORT_SYMBOL(ttm_pool_fini);
-/* As long as pages are available make sure to release at least one */
+/* Free average pool number of pages. */
+#define TTM_SHRINKER_BATCH ((1 << (MAX_PAGE_ORDER / 2)) * NR_PAGE_ORDERS)
+
static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
@@ -1140,9 +1143,12 @@ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
do
num_freed += ttm_pool_shrink();
- while (!num_freed && atomic_long_read(&allocated_pages));
+ while (num_freed < sc->nr_to_scan &&
+ atomic_long_read(&allocated_pages));
+
+ sc->nr_scanned = num_freed;
- return num_freed;
+ return num_freed ?: SHRINK_STOP;
}
/* Return the number of pages available or SHRINK_EMPTY if we have none */
@@ -1233,7 +1239,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
{
unsigned int i;
- if (!pool->use_dma_alloc) {
+ if (!pool->use_dma_alloc && pool->nid == NUMA_NO_NODE) {
seq_puts(m, "unused\n");
return 0;
}
@@ -1242,7 +1248,12 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
spin_lock(&shrinker_lock);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
- seq_puts(m, "DMA ");
+ if (!ttm_pool_select_type(pool, i, 0))
+ continue;
+ if (pool->use_dma_alloc)
+ seq_puts(m, "DMA ");
+ else
+ seq_printf(m, "N%d ", pool->nid);
switch (i) {
case ttm_cached:
seq_puts(m, "\t:");
@@ -1266,10 +1277,15 @@ EXPORT_SYMBOL(ttm_pool_debugfs);
/* Test the shrinker functions and dump the result */
static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
{
- struct shrink_control sc = { .gfp_mask = GFP_NOFS };
+ struct shrink_control sc = {
+ .gfp_mask = GFP_NOFS,
+ .nr_to_scan = TTM_SHRINKER_BATCH,
+ };
+ unsigned long count;
fs_reclaim_acquire(GFP_KERNEL);
- seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(mm_shrinker, &sc),
+ count = ttm_pool_shrinker_count(mm_shrinker, &sc);
+ seq_printf(m, "%lu/%lu\n", count,
ttm_pool_shrinker_scan(mm_shrinker, &sc));
fs_reclaim_release(GFP_KERNEL);
@@ -1324,6 +1340,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
mm_shrinker->count_objects = ttm_pool_shrinker_count;
mm_shrinker->scan_objects = ttm_pool_shrinker_scan;
+ mm_shrinker->batch = TTM_SHRINKER_BATCH;
mm_shrinker->seeks = 1;
shrinker_register(mm_shrinker);
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index ae11d07eb63a..db854b581d83 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -34,6 +34,8 @@
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/drm_mm.h>
+
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 769b0ca9be47..e2c82ad07eb4 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -23,6 +23,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/io-mapping.h>
#include <linux/iosys-map.h>
#include <linux/scatterlist.h>
@@ -557,6 +558,9 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
cond_resched();
} while (!ret);
+ if (ret && ret != -ENOENT)
+ return ret;
+
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 698cd4bf5e46..506e257dfba8 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -33,6 +33,7 @@
#include <linux/cc_platform.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/sched.h>
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 42df9d3567e7..cb9df8822472 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -745,17 +745,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
mutex_unlock(&v3d->reset_lock);
- return DRM_GPU_SCHED_STAT_NOMINAL;
-}
-
-static void
-v3d_sched_skip_reset(struct drm_sched_job *sched_job)
-{
- struct drm_gpu_scheduler *sched = sched_job->sched;
-
- spin_lock(&sched->job_list_lock);
- list_add(&sched_job->list, &sched->pending_list);
- spin_unlock(&sched->job_list_lock);
+ return DRM_GPU_SCHED_STAT_RESET;
}
static enum drm_gpu_sched_stat
@@ -776,8 +766,7 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
*timedout_ctca = ctca;
*timedout_ctra = ctra;
- v3d_sched_skip_reset(sched_job);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
return v3d_gpu_reset_for_timeout(v3d, sched_job);
@@ -822,8 +811,7 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
if (job->timedout_batches != batches) {
job->timedout_batches = batches;
- v3d_sched_skip_reset(sched_job);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
return v3d_gpu_reset_for_timeout(v3d, sched_job);
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index 4ff5de46fb22..5171ffe9012d 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -169,7 +169,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
job->file = file_priv;
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
- 1, v3d_priv);
+ 1, v3d_priv, file_priv->client_id);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 163d092bd973..07c91b450f93 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -560,6 +560,12 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
if (ret)
return ret;
+ ret = drm_connector_hdmi_audio_init(connector, dev->dev,
+ &vc4_hdmi_audio_funcs,
+ 8, 0, false, -1);
+ if (ret)
+ return ret;
+
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
/*
@@ -2286,7 +2292,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
}
ret = drm_connector_hdmi_audio_init(&vc4_hdmi->connector, dev,
- &vc4_hdmi_audio_funcs, 8, false,
+ &vc4_hdmi_audio_funcs, 8, 0, false,
-1);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index f5b167417428..8f983edb81ff 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -530,6 +530,7 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -568,7 +569,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
mode_cmd = &mode_cmd_local;
}
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
}
/* Our CTM has some peculiar limitations: we can only enable it for one CRTC
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 2752ab4f1c97..260c64733972 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -32,7 +32,7 @@
#include <linux/dma-buf.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/shmem_fs.h>
#include <linux/vmalloc.h>
@@ -52,7 +52,7 @@
static struct vgem_device {
struct drm_device drm;
- struct platform_device *platform;
+ struct faux_device *faux_dev;
} *vgem_device;
static int vgem_open(struct drm_device *dev, struct drm_file *file)
@@ -127,27 +127,27 @@ static const struct drm_driver vgem_driver = {
static int __init vgem_init(void)
{
int ret;
- struct platform_device *pdev;
+ struct faux_device *fdev;
- pdev = platform_device_register_simple("vgem", -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ fdev = faux_device_create("vgem", NULL, NULL);
+ if (!fdev)
+ return -ENODEV;
- if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ if (!devres_open_group(&fdev->dev, NULL, GFP_KERNEL)) {
ret = -ENOMEM;
goto out_unregister;
}
- dma_coerce_mask_and_coherent(&pdev->dev,
+ dma_coerce_mask_and_coherent(&fdev->dev,
DMA_BIT_MASK(64));
- vgem_device = devm_drm_dev_alloc(&pdev->dev, &vgem_driver,
+ vgem_device = devm_drm_dev_alloc(&fdev->dev, &vgem_driver,
struct vgem_device, drm);
if (IS_ERR(vgem_device)) {
ret = PTR_ERR(vgem_device);
goto out_devres;
}
- vgem_device->platform = pdev;
+ vgem_device->faux_dev = fdev;
/* Final step: expose the device/driver to userspace */
ret = drm_dev_register(&vgem_device->drm, 0);
@@ -157,19 +157,19 @@ static int __init vgem_init(void)
return 0;
out_devres:
- devres_release_group(&pdev->dev, NULL);
+ devres_release_group(&fdev->dev, NULL);
out_unregister:
- platform_device_unregister(pdev);
+ faux_device_destroy(fdev);
return ret;
}
static void __exit vgem_exit(void)
{
- struct platform_device *pdev = vgem_device->platform;
+ struct faux_device *fdev = vgem_device->faux_dev;
drm_dev_unregister(&vgem_device->drm);
- devres_release_group(&pdev->dev, NULL);
- platform_device_unregister(pdev);
+ devres_release_group(&fdev->dev, NULL);
+ faux_device_destroy(fdev);
}
module_init(vgem_init);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 59a45e74a641..e5805ca646c7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -66,6 +66,7 @@ static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
static int
virtio_gpu_framebuffer_init(struct drm_device *dev,
struct virtio_gpu_framebuffer *vgfb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
@@ -73,7 +74,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
vgfb->base.obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &vgfb->base, info, mode_cmd);
ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
if (ret) {
@@ -293,6 +294,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
static struct drm_framebuffer *
virtio_gpu_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj = NULL;
@@ -314,7 +316,7 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
+ ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, info, mode_cmd, obj);
if (ret) {
kfree(virtio_gpu_fb);
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/vkms/tests/Makefile b/drivers/gpu/drm/vkms/tests/Makefile
index 9ded37b67a46..5750f0bd9d40 100644
--- a/drivers/gpu/drm/vkms/tests/Makefile
+++ b/drivers/gpu/drm/vkms/tests/Makefile
@@ -1,3 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms_config_test.o
+vkms-kunit-tests-y := \
+ vkms_config_test.o \
+ vkms_format_test.o
+
+obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms-kunit-tests.o
diff --git a/drivers/gpu/drm/vkms/tests/vkms_format_test.c b/drivers/gpu/drm/vkms/tests/vkms_format_test.c
new file mode 100644
index 000000000000..2e1daef94831
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/vkms_format_test.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include <drm/drm_fixed.h>
+#include <drm/drm_fourcc.h>
+
+#include "../../drm_crtc_internal.h"
+
+#include "../vkms_formats.h"
+
+#define TEST_BUFF_SIZE 50
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+/**
+ * struct pixel_yuv_u8 - Internal representation of a pixel color.
+ * @y: Luma value, stored in 8 bits, without padding, using
+ * machine endianness
+ * @u: Blue difference chroma value, stored in 8 bits, without padding, using
+ * machine endianness
+ * @v: Red difference chroma value, stored in 8 bits, without padding, using
+ * machine endianness
+ */
+struct pixel_yuv_u8 {
+ u8 y, u, v;
+};
+
+/*
+ * struct yuv_u8_to_argb_u16_case - Reference values to test the color
+ * conversions in VKMS between YUV to ARGB
+ *
+ * @encoding: Encoding used to convert RGB to YUV
+ * @range: Range used to convert RGB to YUV
+ * @n_colors: Count of test colors in this case
+ * @format_pair.name: Name used for this color conversion, used to
+ * clarify the test results
+ * @format_pair.rgb: RGB color tested
+ * @format_pair.yuv: Same color as @format_pair.rgb, but converted to
+ * YUV using @encoding and @range.
+ */
+struct yuv_u8_to_argb_u16_case {
+ enum drm_color_encoding encoding;
+ enum drm_color_range range;
+ size_t n_colors;
+ struct format_pair {
+ char *name;
+ struct pixel_yuv_u8 yuv;
+ struct pixel_argb_u16 argb;
+ } colors[TEST_BUFF_SIZE];
+};
+
+/*
+ * The YUV color representation were acquired via the colour python framework.
+ * Below are the function calls used for generating each case.
+ *
+ * For more information got to the docs:
+ * https://colour.readthedocs.io/en/master/generated/colour.RGB_to_YCbCr.html
+ */
+static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = False,
+ * out_int = True)
+ *
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT601 full range using the ITU-R BT.601 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT601,
+ .range = DRM_COLOR_YCBCR_FULL_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4c, 0x55, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0x96, 0x2c, 0x15 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1d, 0xff, 0x6b }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = True,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT601 limited range using the ITU-R BT.601 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT601,
+ .range = DRM_COLOR_YCBCR_LIMITED_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x51, 0x5a, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0x91, 0x36, 0x22 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x29, 0xf0, 0x6e }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = False,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT709 full range using the ITU-R BT.709 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT709,
+ .range = DRM_COLOR_YCBCR_FULL_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x36, 0x63, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xb6, 0x1e, 0x0c }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x12, 0xff, 0x74 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * in_bits = 16,
+ * int_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = True,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT709 limited range using the ITU-R BT.709 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT709,
+ .range = DRM_COLOR_YCBCR_LIMITED_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x3f, 0x66, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xad, 0x2a, 0x1a }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x20, 0xf0, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = False,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT2020 full range using the ITU-R BT.2020 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT2020,
+ .range = DRM_COLOR_YCBCR_FULL_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x43, 0x5c, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xad, 0x24, 0x0b }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x0f, 0xff, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = True,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT2020 limited range using the ITU-R BT.2020 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT2020,
+ .range = DRM_COLOR_YCBCR_LIMITED_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4a, 0x61, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xa4, 0x2f, 0x19 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1d, 0xf0, 0x77 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+};
+
+/*
+ * vkms_format_test_yuv_u8_to_argb_u16 - Testing the conversion between YUV
+ * colors to ARGB colors in VKMS
+ *
+ * This test will use the functions get_conversion_matrix_to_argb_u16 and
+ * argb_u16_from_yuv888 to convert YUV colors (stored in
+ * yuv_u8_to_argb_u16_cases) into ARGB colors.
+ *
+ * The conversion between YUV and RGB is not totally reversible, so there may be
+ * some difference between the expected value and the result.
+ * In addition, there may be some rounding error as the input color is 8 bits
+ * and output color is 16 bits.
+ */
+static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test)
+{
+ const struct yuv_u8_to_argb_u16_case *param = test->param_value;
+ struct pixel_argb_u16 argb;
+
+ for (size_t i = 0; i < param->n_colors; i++) {
+ const struct format_pair *color = &param->colors[i];
+ struct conversion_matrix matrix;
+
+ get_conversion_matrix_to_argb_u16
+ (DRM_FORMAT_NV12, param->encoding, param->range, &matrix);
+
+ argb = argb_u16_from_yuv888(color->yuv.y, color->yuv.u, color->yuv.v, &matrix);
+
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.a, color->argb.a), 0x1ff,
+ "On the A channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.a, argb.a);
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.r, color->argb.r), 0x1ff,
+ "On the R channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.r, argb.r);
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.g, color->argb.g), 0x1ff,
+ "On the G channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.g, argb.g);
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.b, color->argb.b), 0x1ff,
+ "On the B channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.b, argb.b);
+ }
+}
+
+static void vkms_format_test_yuv_u8_to_argb_u16_case_desc(struct yuv_u8_to_argb_u16_case *t,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s - %s",
+ drm_get_color_encoding_name(t->encoding), drm_get_color_range_name(t->range));
+}
+
+KUNIT_ARRAY_PARAM(yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_cases,
+ vkms_format_test_yuv_u8_to_argb_u16_case_desc
+);
+
+static struct kunit_case vkms_format_test_cases[] = {
+ KUNIT_CASE_PARAM(vkms_format_test_yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_gen_params),
+ {}
+};
+
+static struct kunit_suite vkms_format_test_suite = {
+ .name = "vkms-format",
+ .test_cases = vkms_format_test_cases,
+};
+
+kunit_test_suite(vkms_format_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kunit test for vkms format conversion");
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 8c9898b9055d..e60573e0f3e9 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -302,8 +302,6 @@ struct vkms_output *vkms_crtc_init(struct drm_device *dev, struct drm_plane *pri
vkms_out->composer_workq = drmm_alloc_ordered_workqueue(dev, "vkms_composer", 0);
if (IS_ERR(vkms_out->composer_workq))
return ERR_CAST(vkms_out->composer_workq);
- if (!vkms_out->composer_workq)
- return ERR_PTR(-ENOMEM);
return vkms_out;
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index a24d1655f7b8..e8472d9b6e3b 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -10,7 +10,7 @@
*/
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/dma-mapping.h>
#include <drm/clients/drm_client_setup.h>
@@ -149,27 +149,27 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
static int vkms_create(struct vkms_config *config)
{
int ret;
- struct platform_device *pdev;
+ struct faux_device *fdev;
struct vkms_device *vkms_device;
const char *dev_name;
dev_name = vkms_config_get_device_name(config);
- pdev = platform_device_register_simple(dev_name, -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ fdev = faux_device_create(dev_name, NULL, NULL);
+ if (!fdev)
+ return -ENODEV;
- if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ if (!devres_open_group(&fdev->dev, NULL, GFP_KERNEL)) {
ret = -ENOMEM;
goto out_unregister;
}
- vkms_device = devm_drm_dev_alloc(&pdev->dev, &vkms_driver,
+ vkms_device = devm_drm_dev_alloc(&fdev->dev, &vkms_driver,
struct vkms_device, drm);
if (IS_ERR(vkms_device)) {
ret = PTR_ERR(vkms_device);
goto out_devres;
}
- vkms_device->platform = pdev;
+ vkms_device->faux_dev = fdev;
vkms_device->config = config;
config->dev = vkms_device;
@@ -203,9 +203,9 @@ static int vkms_create(struct vkms_config *config)
return 0;
out_devres:
- devres_release_group(&pdev->dev, NULL);
+ devres_release_group(&fdev->dev, NULL);
out_unregister:
- platform_device_unregister(pdev);
+ faux_device_destroy(fdev);
return ret;
}
@@ -231,19 +231,19 @@ static int __init vkms_init(void)
static void vkms_destroy(struct vkms_config *config)
{
- struct platform_device *pdev;
+ struct faux_device *fdev;
if (!config->dev) {
DRM_INFO("vkms_device is NULL.\n");
return;
}
- pdev = config->dev->platform;
+ fdev = config->dev->faux_dev;
drm_dev_unregister(&config->dev->drm);
drm_atomic_helper_shutdown(&config->dev->drm);
- devres_release_group(&pdev->dev, NULL);
- platform_device_unregister(pdev);
+ devres_release_group(&fdev->dev, NULL);
+ faux_device_destroy(fdev);
config->dev = NULL;
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index a74a7fc3a056..8013c31efe3b 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -45,6 +45,23 @@ struct vkms_frame_info {
unsigned int rotation;
};
+/**
+ * struct pixel_argb_u16 - Internal representation of a pixel color.
+ * @a: Alpha component value, stored in 16 bits, without padding, using
+ * machine endianness
+ * @r: Red component value, stored in 16 bits, without padding, using
+ * machine endianness
+ * @g: Green component value, stored in 16 bits, without padding, using
+ * machine endianness
+ * @b: Blue component value, stored in 16 bits, without padding, using
+ * machine endianness
+ *
+ * The goal of this structure is to keep enough precision to ensure
+ * correct composition results in VKMS and simplifying color
+ * manipulation by splitting each component into its own field.
+ * Caution: the byte ordering of this structure is machine-dependent,
+ * you can't cast it directly to AR48 or xR48.
+ */
struct pixel_argb_u16 {
u16 a, r, g, b;
};
@@ -103,16 +120,34 @@ typedef void (*pixel_read_line_t)(const struct vkms_plane_state *plane, int x_st
struct pixel_argb_u16 out_pixel[]);
/**
+ * struct conversion_matrix - Matrix to use for a specific encoding and range
+ *
+ * @matrix: Conversion matrix from yuv to rgb. The matrix is stored in a row-major manner and is
+ * used to compute rgb values from yuv values:
+ * [[r],[g],[b]] = @matrix * [[y],[u],[v]]
+ * OR for yvu formats:
+ * [[r],[g],[b]] = @matrix * [[y],[v],[u]]
+ * The values of the matrix are signed fixed-point values with 32 bits fractional part.
+ * @y_offset: Offset to apply on the y value.
+ */
+struct conversion_matrix {
+ s64 matrix[3][3];
+ int y_offset;
+};
+
+/**
* struct vkms_plane_state - Driver specific plane state
* @base: base plane state
* @frame_info: data required for composing computation
* @pixel_read_line: function to read a pixel line in this plane. The creator of a
* struct vkms_plane_state must ensure that this pointer is valid
+ * @conversion_matrix: matrix used for yuv formats to convert to rgb
*/
struct vkms_plane_state {
struct drm_shadow_plane_state base;
struct vkms_frame_info *frame_info;
pixel_read_line_t pixel_read_line;
+ struct conversion_matrix conversion_matrix;
};
struct vkms_plane {
@@ -197,13 +232,13 @@ struct vkms_config;
* struct vkms_device - Description of a VKMS device
*
* @drm - Base device in DRM
- * @platform - Associated platform device
+ * @faux_dev - Associated faux device
* @output - Configuration and sub-components of the VKMS device
* @config: Configuration used in this VKMS device
*/
struct vkms_device {
struct drm_device drm;
- struct platform_device *platform;
+ struct faux_device *faux_dev;
const struct vkms_config *config;
};
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index 30a64ecca87c..6d0227c6635a 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -7,6 +7,8 @@
#include <drm/drm_rect.h>
#include <drm/drm_fixed.h>
+#include <kunit/visibility.h>
+
#include "vkms_formats.h"
/**
@@ -140,6 +142,51 @@ static void packed_pixels_addr_1x1(const struct vkms_frame_info *frame_info,
*addr = (u8 *)frame_info->map[0].vaddr + offset;
}
+/**
+ * get_subsampling() - Get the subsampling divisor value on a specific direction
+ *
+ * @format: format to extarct the subsampling from
+ * @direction: direction of the subsampling requested
+ */
+static int get_subsampling(const struct drm_format_info *format,
+ enum pixel_read_direction direction)
+{
+ switch (direction) {
+ case READ_BOTTOM_TO_TOP:
+ case READ_TOP_TO_BOTTOM:
+ return format->vsub;
+ case READ_RIGHT_TO_LEFT:
+ case READ_LEFT_TO_RIGHT:
+ return format->hsub;
+ }
+ WARN_ONCE(true, "Invalid direction for pixel reading: %d\n", direction);
+ return 1;
+}
+
+/**
+ * get_subsampling_offset() - An offset for keeping the chroma siting consistent regardless of
+ * x_start and y_start values
+ *
+ * @direction: direction of the reading to properly compute this offset
+ * @x_start: x coordinate of the starting point of the readed line
+ * @y_start: y coordinate of the starting point of the readed line
+ */
+static int get_subsampling_offset(enum pixel_read_direction direction, int x_start, int y_start)
+{
+ switch (direction) {
+ case READ_BOTTOM_TO_TOP:
+ return -y_start - 1;
+ case READ_TOP_TO_BOTTOM:
+ return y_start;
+ case READ_RIGHT_TO_LEFT:
+ return -x_start - 1;
+ case READ_LEFT_TO_RIGHT:
+ return x_start;
+ }
+ WARN_ONCE(true, "Invalid direction for pixel reading: %d\n", direction);
+ return 0;
+}
+
/*
* The following functions take pixel data (a, r, g, b, pixel, ...) and convert them to
* &struct pixel_argb_u16
@@ -202,11 +249,54 @@ static struct pixel_argb_u16 argb_u16_from_RGB565(const __le16 *pixel)
return out_pixel;
}
+static struct pixel_argb_u16 argb_u16_from_gray8(u8 gray)
+{
+ return argb_u16_from_u8888(255, gray, gray, gray);
+}
+
+static struct pixel_argb_u16 argb_u16_from_grayu16(u16 gray)
+{
+ return argb_u16_from_u16161616(0xFFFF, gray, gray, gray);
+}
+
+VISIBLE_IF_KUNIT struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2,
+ const struct conversion_matrix *matrix)
+{
+ u16 r, g, b;
+ s64 fp_y, fp_channel_1, fp_channel_2;
+ s64 fp_r, fp_g, fp_b;
+
+ fp_y = drm_int2fixp(((int)y - matrix->y_offset) * 257);
+ fp_channel_1 = drm_int2fixp(((int)channel_1 - 128) * 257);
+ fp_channel_2 = drm_int2fixp(((int)channel_2 - 128) * 257);
+
+ fp_r = drm_fixp_mul(matrix->matrix[0][0], fp_y) +
+ drm_fixp_mul(matrix->matrix[0][1], fp_channel_1) +
+ drm_fixp_mul(matrix->matrix[0][2], fp_channel_2);
+ fp_g = drm_fixp_mul(matrix->matrix[1][0], fp_y) +
+ drm_fixp_mul(matrix->matrix[1][1], fp_channel_1) +
+ drm_fixp_mul(matrix->matrix[1][2], fp_channel_2);
+ fp_b = drm_fixp_mul(matrix->matrix[2][0], fp_y) +
+ drm_fixp_mul(matrix->matrix[2][1], fp_channel_1) +
+ drm_fixp_mul(matrix->matrix[2][2], fp_channel_2);
+
+ fp_r = drm_fixp2int_round(fp_r);
+ fp_g = drm_fixp2int_round(fp_g);
+ fp_b = drm_fixp2int_round(fp_b);
+
+ r = clamp(fp_r, 0, 0xffff);
+ g = clamp(fp_g, 0, 0xffff);
+ b = clamp(fp_b, 0, 0xffff);
+
+ return argb_u16_from_u16161616(0xffff, r, g, b);
+}
+EXPORT_SYMBOL_IF_KUNIT(argb_u16_from_yuv888);
+
/*
* The following functions are read_line function for each pixel format supported by VKMS.
*
* They read a line starting at the point @x_start,@y_start following the @direction. The result
- * is stored in @out_pixel and in the format ARGB16161616.
+ * is stored in @out_pixel and in a 64 bits format, see struct pixel_argb_u16.
*
* These functions are very repetitive, but the innermost pixel loops must be kept inside these
* functions for performance reasons. Some benchmarking was done in [1] where having the innermost
@@ -215,6 +305,96 @@ static struct pixel_argb_u16 argb_u16_from_RGB565(const __le16 *pixel)
* [1]: https://lore.kernel.org/dri-devel/d258c8dc-78e9-4509-9037-a98f7f33b3a3@riseup.net/
*/
+static void Rx_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ int bits_per_pixel = drm_format_info_bpp(plane->frame_info->fb->format, 0);
+ u8 *src_pixels;
+ int rem_x, rem_y;
+
+ WARN_ONCE(drm_format_info_block_height(plane->frame_info->fb->format, 0) != 1,
+ "%s() only support formats with block_h == 1", __func__);
+
+ packed_pixels_addr(plane->frame_info, x_start, y_start, 0, &src_pixels, &rem_x, &rem_y);
+ int bit_offset = (8 - bits_per_pixel) - rem_x * bits_per_pixel;
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+ int mask = (0x1 << bits_per_pixel) - 1;
+ int lum_per_level = 0xFFFF / mask;
+
+ if (direction == READ_LEFT_TO_RIGHT || direction == READ_RIGHT_TO_LEFT) {
+ int restart_bit_offset;
+ int step_bit_offset;
+
+ if (direction == READ_LEFT_TO_RIGHT) {
+ restart_bit_offset = 8 - bits_per_pixel;
+ step_bit_offset = -bits_per_pixel;
+ } else {
+ restart_bit_offset = 0;
+ step_bit_offset = bits_per_pixel;
+ }
+
+ while (out_pixel < end) {
+ u8 val = ((*src_pixels) >> bit_offset) & mask;
+
+ *out_pixel = argb_u16_from_grayu16((int)val * lum_per_level);
+
+ bit_offset += step_bit_offset;
+ if (bit_offset < 0 || 8 <= bit_offset) {
+ bit_offset = restart_bit_offset;
+ src_pixels += step;
+ }
+ out_pixel += 1;
+ }
+ } else if (direction == READ_TOP_TO_BOTTOM || direction == READ_BOTTOM_TO_TOP) {
+ while (out_pixel < end) {
+ u8 val = (*src_pixels >> bit_offset) & mask;
+ *out_pixel = argb_u16_from_grayu16((int)val * lum_per_level);
+ src_pixels += step;
+ out_pixel += 1;
+ }
+ }
+}
+
+static void R1_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ Rx_read_line(plane, x_start, y_start, direction, count, out_pixel);
+}
+
+static void R2_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ Rx_read_line(plane, x_start, y_start, direction, count, out_pixel);
+}
+
+static void R4_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ Rx_read_line(plane, x_start, y_start, direction, count, out_pixel);
+}
+
+static void R8_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ while (out_pixel < end) {
+ *out_pixel = argb_u16_from_gray8(*src_pixels);
+ src_pixels += step;
+ out_pixel += 1;
+ }
+}
+
static void ARGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
enum pixel_read_direction direction, int count,
struct pixel_argb_u16 out_pixel[])
@@ -332,6 +512,92 @@ static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start,
}
/*
+ * This callback can be used for YUV formats where U and V values are
+ * stored in the same plane (often called semi-planar formats). It will
+ * correctly handle subsampling as described in the drm_format_info of the plane.
+ *
+ * The conversion matrix stored in the @plane is used to:
+ * - Apply the correct color range and encoding
+ * - Convert YUV and YVU with the same function (a column swap is needed when setting up
+ * plane->conversion_matrix)
+ */
+static void semi_planar_yuv_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ u8 *y_plane;
+ u8 *uv_plane;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0,
+ &y_plane);
+ packed_pixels_addr_1x1(plane->frame_info,
+ x_start / plane->frame_info->fb->format->hsub,
+ y_start / plane->frame_info->fb->format->vsub, 1,
+ &uv_plane);
+ int step_y = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+ int step_uv = get_block_step_bytes(plane->frame_info->fb, direction, 1);
+ int subsampling = get_subsampling(plane->frame_info->fb->format, direction);
+ int subsampling_offset = get_subsampling_offset(direction, x_start, y_start);
+ const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix;
+
+ for (int i = 0; i < count; i++) {
+ *out_pixel = argb_u16_from_yuv888(y_plane[0], uv_plane[0], uv_plane[1],
+ conversion_matrix);
+ out_pixel += 1;
+ y_plane += step_y;
+ if ((i + subsampling_offset + 1) % subsampling == 0)
+ uv_plane += step_uv;
+ }
+}
+
+/*
+ * This callback can be used for YUV format where each color component is
+ * stored in a different plane (often called planar formats). It will
+ * correctly handle subsampling as described in the drm_format_info of the plane.
+ *
+ * The conversion matrix stored in the @plane is used to:
+ * - Apply the correct color range and encoding
+ * - Convert YUV and YVU with the same function (a column swap is needed when setting up
+ * plane->conversion_matrix)
+ */
+static void planar_yuv_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ u8 *y_plane;
+ u8 *channel_1_plane;
+ u8 *channel_2_plane;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0,
+ &y_plane);
+ packed_pixels_addr_1x1(plane->frame_info,
+ x_start / plane->frame_info->fb->format->hsub,
+ y_start / plane->frame_info->fb->format->vsub, 1,
+ &channel_1_plane);
+ packed_pixels_addr_1x1(plane->frame_info,
+ x_start / plane->frame_info->fb->format->hsub,
+ y_start / plane->frame_info->fb->format->vsub, 2,
+ &channel_2_plane);
+ int step_y = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+ int step_channel_1 = get_block_step_bytes(plane->frame_info->fb, direction, 1);
+ int step_channel_2 = get_block_step_bytes(plane->frame_info->fb, direction, 2);
+ int subsampling = get_subsampling(plane->frame_info->fb->format, direction);
+ int subsampling_offset = get_subsampling_offset(direction, x_start, y_start);
+ const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix;
+
+ for (int i = 0; i < count; i++) {
+ *out_pixel = argb_u16_from_yuv888(*y_plane, *channel_1_plane, *channel_2_plane,
+ conversion_matrix);
+ out_pixel += 1;
+ y_plane += step_y;
+ if ((i + subsampling_offset + 1) % subsampling == 0) {
+ channel_1_plane += step_channel_1;
+ channel_2_plane += step_channel_2;
+ }
+ }
+}
+
+/*
* The following functions take one &struct pixel_argb_u16 and convert it to a specific format.
* The result is stored in @out_pixel.
*
@@ -456,6 +722,28 @@ pixel_read_line_t get_pixel_read_line_function(u32 format)
return &XRGB16161616_read_line;
case DRM_FORMAT_RGB565:
return &RGB565_read_line;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV42:
+ return &semi_planar_yuv_read_line;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YVU444:
+ return &planar_yuv_read_line;
+ case DRM_FORMAT_R1:
+ return &R1_read_line;
+ case DRM_FORMAT_R2:
+ return &R2_read_line;
+ case DRM_FORMAT_R4:
+ return &R4_read_line;
+ case DRM_FORMAT_R8:
+ return &R8_read_line;
default:
/*
* This is a bug in vkms_plane_atomic_check(). All the supported
@@ -469,6 +757,183 @@ pixel_read_line_t get_pixel_read_line_function(u32 format)
}
}
+/*
+ * Those matrices were generated using the colour python framework
+ *
+ * Below are the function calls used to generate each matrix, go to
+ * https://colour.readthedocs.io/en/develop/generated/colour.matrix_YCbCr.html
+ * for more info:
+ *
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * is_legal = False,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix no_operation = {
+ .matrix = {
+ { 4294967296, 0, 0, },
+ { 0, 4294967296, 0, },
+ { 0, 0, 4294967296, },
+ },
+ .y_offset = 0,
+};
+
+static const struct conversion_matrix yuv_bt601_full = {
+ .matrix = {
+ { 4294967296, 0, 6021544149 },
+ { 4294967296, -1478054095, -3067191994 },
+ { 4294967296, 7610682049, 0 },
+ },
+ .y_offset = 0,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * is_legal = True,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt601_limited = {
+ .matrix = {
+ { 5020601039, 0, 6881764740 },
+ { 5020601039, -1689204679, -3505362278 },
+ { 5020601039, 8697922339, 0 },
+ },
+ .y_offset = 16,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * is_legal = False,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt709_full = {
+ .matrix = {
+ { 4294967296, 0, 6763714498 },
+ { 4294967296, -804551626, -2010578443 },
+ { 4294967296, 7969741314, 0 },
+ },
+ .y_offset = 0,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * is_legal = True,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt709_limited = {
+ .matrix = {
+ { 5020601039, 0, 7729959424 },
+ { 5020601039, -919487572, -2297803934 },
+ { 5020601039, 9108275786, 0 },
+ },
+ .y_offset = 16,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * is_legal = False,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt2020_full = {
+ .matrix = {
+ { 4294967296, 0, 6333358775 },
+ { 4294967296, -706750298, -2453942994 },
+ { 4294967296, 8080551471, 0 },
+ },
+ .y_offset = 0,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * is_legal = True,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt2020_limited = {
+ .matrix = {
+ { 5020601039, 0, 7238124312 },
+ { 5020601039, -807714626, -2804506279 },
+ { 5020601039, 9234915964, 0 },
+ },
+ .y_offset = 16,
+};
+
+/**
+ * swap_uv_columns() - Swap u and v column of a given matrix
+ *
+ * @matrix: Matrix in which column are swapped
+ */
+static void swap_uv_columns(struct conversion_matrix *matrix)
+{
+ swap(matrix->matrix[0][2], matrix->matrix[0][1]);
+ swap(matrix->matrix[1][2], matrix->matrix[1][1]);
+ swap(matrix->matrix[2][2], matrix->matrix[2][1]);
+}
+
+/**
+ * get_conversion_matrix_to_argb_u16() - Retrieve the correct yuv to rgb conversion matrix for a
+ * given encoding and range.
+ *
+ * @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h])
+ * @encoding: DRM_COLOR_* value for which to obtain a conversion matrix
+ * @range: DRM_COLOR_*_RANGE value for which to obtain a conversion matrix
+ * @matrix: Pointer to store the value into
+ */
+void get_conversion_matrix_to_argb_u16(u32 format,
+ enum drm_color_encoding encoding,
+ enum drm_color_range range,
+ struct conversion_matrix *matrix)
+{
+ const struct conversion_matrix *matrix_to_copy;
+ bool limited_range;
+
+ switch (range) {
+ case DRM_COLOR_YCBCR_LIMITED_RANGE:
+ limited_range = true;
+ break;
+ case DRM_COLOR_YCBCR_FULL_RANGE:
+ limited_range = false;
+ break;
+ case DRM_COLOR_RANGE_MAX:
+ limited_range = false;
+ WARN_ONCE(true, "The requested range is not supported.");
+ break;
+ }
+
+ switch (encoding) {
+ case DRM_COLOR_YCBCR_BT601:
+ matrix_to_copy = limited_range ? &yuv_bt601_limited :
+ &yuv_bt601_full;
+ break;
+ case DRM_COLOR_YCBCR_BT709:
+ matrix_to_copy = limited_range ? &yuv_bt709_limited :
+ &yuv_bt709_full;
+ break;
+ case DRM_COLOR_YCBCR_BT2020:
+ matrix_to_copy = limited_range ? &yuv_bt2020_limited :
+ &yuv_bt2020_full;
+ break;
+ case DRM_COLOR_ENCODING_MAX:
+ matrix_to_copy = &no_operation;
+ WARN_ONCE(true, "The requested encoding is not supported.");
+ break;
+ }
+
+ memcpy(matrix, matrix_to_copy, sizeof(*matrix_to_copy));
+
+ switch (format) {
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YVU444:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV42:
+ swap_uv_columns(matrix);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(get_conversion_matrix_to_argb_u16);
+
/**
* get_pixel_write_function() - Retrieve the correct write_pixel function for a specific format.
* The returned pointer is NULL for unsupported pixel formats. The caller must ensure that the
diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
index 8d2bef95ff79..b4fe62ab9c65 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.h
+++ b/drivers/gpu/drm/vkms/vkms_formats.h
@@ -9,4 +9,13 @@ pixel_read_line_t get_pixel_read_line_function(u32 format);
pixel_write_t get_pixel_write_function(u32 format);
+void get_conversion_matrix_to_argb_u16(u32 format, enum drm_color_encoding encoding,
+ enum drm_color_range range,
+ struct conversion_matrix *matrix);
+
+#if IS_ENABLED(CONFIG_KUNIT)
+struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2,
+ const struct conversion_matrix *matrix);
+#endif
+
#endif /* _VKMS_FORMATS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index e34f8c7f83c3..e3fdd161d0f0 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -18,7 +18,23 @@ static const u32 vkms_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB16161616,
DRM_FORMAT_ARGB16161616,
- DRM_FORMAT_RGB565
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV24,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_NV42,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_YVU444,
+ DRM_FORMAT_R1,
+ DRM_FORMAT_R2,
+ DRM_FORMAT_R4,
+ DRM_FORMAT_R8,
};
static struct drm_plane_state *
@@ -119,6 +135,8 @@ static void vkms_plane_atomic_update(struct drm_plane *plane,
frame_info->rotation = new_state->rotation;
vkms_plane_state->pixel_read_line = get_pixel_read_line_function(fmt);
+ get_conversion_matrix_to_argb_u16(fmt, new_state->color_encoding, new_state->color_range,
+ &vkms_plane_state->conversion_matrix);
}
static int vkms_plane_atomic_check(struct drm_plane *plane,
@@ -205,5 +223,14 @@ struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_MASK | DRM_MODE_REFLECT_MASK);
+ drm_plane_create_color_properties(&plane->base,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709) |
+ BIT(DRM_COLOR_YCBCR_BT2020),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_FULL_RANGE);
+
return plane;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index dd4ca6a9c690..8fe02131a6c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -544,7 +544,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno;
vmw_cmd_commit_flush(dev_priv, bytes);
- vmw_update_seqno(dev_priv);
+ vmw_fences_update(dev_priv->fman);
out_err:
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 5205552b1970..8ff958d119be 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -440,8 +440,10 @@ static int vmw_device_init(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
}
- dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+ u32 seqno = vmw_fence_read(dev_priv);
+
+ atomic_set(&dev_priv->last_read_seqno, seqno);
+ atomic_set(&dev_priv->marker_seq, seqno);
return 0;
}
@@ -454,7 +456,7 @@ static void vmw_device_fini(struct vmw_private *vmw)
while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
;
- vmw->last_read_seqno = vmw_fence_read(vmw);
+ atomic_set(&vmw->last_read_seqno, vmw_fence_read(vmw));
vmw_write(vmw, SVGA_REG_CONFIG_DONE,
vmw->config_done_state);
@@ -713,7 +715,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
pci_set_master(pdev);
- ret = pci_request_regions(pdev, "vmwgfx probe");
+ ret = pcim_request_all_regions(pdev, "vmwgfx probe");
if (ret)
return ret;
@@ -733,7 +735,6 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
if (!dev->rmmio) {
drm_err(&dev->drm,
"Failed mapping registers mmio memory.\n");
- pci_release_regions(pdev);
return -ENOMEM;
}
} else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
@@ -754,11 +755,9 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
if (IS_ERR(dev->fifo_mem)) {
drm_err(&dev->drm,
"Failed mapping FIFO memory.\n");
- pci_release_regions(pdev);
return PTR_ERR(dev->fifo_mem);
}
} else {
- pci_release_regions(pdev);
return -EINVAL;
}
@@ -836,7 +835,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
int ret;
enum vmw_res_type i;
bool refuse_dma = false;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
vmw_sw_context_init(dev_priv);
@@ -852,7 +850,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
return ret;
ret = vmw_detect_version(dev_priv);
if (ret)
- goto out_no_pci_or_version;
+ return ret;
for (i = vmw_res_context; i < vmw_res_max; ++i) {
@@ -1152,15 +1150,13 @@ out_err0:
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
-out_no_pci_or_version:
- pci_release_regions(pdev);
+
return ret;
}
static void vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
enum vmw_res_type i;
unregister_pm_notifier(&dev_priv->pm_nb);
@@ -1196,8 +1192,6 @@ static void vmw_driver_unload(struct drm_device *dev)
idr_destroy(&dev_priv->res_idr[i]);
vmw_mksstat_remove_all(dev_priv);
-
- pci_release_regions(pdev);
}
static void vmw_postclose(struct drm_device *dev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 594af8eb04c6..eda5b6f8f4c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -522,7 +522,7 @@ struct vmw_private {
int cmdbuf_waiters; /* Protected by waiter_lock */
int error_waiters; /* Protected by waiter_lock */
int fifo_queue_waiters; /* Protected by waiter_lock */
- uint32_t last_read_seqno;
+ atomic_t last_read_seqno;
struct vmw_fence_manager *fman;
uint32_t irq_mask; /* Updates protected by waiter_lock */
@@ -1006,15 +1006,14 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
uint32_t seqno,
bool interruptible,
unsigned long timeout);
-extern void vmw_update_seqno(struct vmw_private *dev_priv);
-extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
-extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
-extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
-extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
-extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
- int *waiter_count);
-extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
- u32 flag, int *waiter_count);
+bool vmw_seqno_waiter_add(struct vmw_private *dev_priv);
+bool vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
+bool vmw_goal_waiter_add(struct vmw_private *dev_priv);
+bool vmw_goal_waiter_remove(struct vmw_private *dev_priv);
+bool vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
+ int *waiter_count);
+bool vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+ u32 flag, int *waiter_count);
/**
* Kernel modesetting - vmwgfx_kms.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index e831e324e737..819704ac675d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3878,8 +3878,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
fence_rep.handle = fence_handle;
fence_rep.seqno = fence->base.seqno;
- vmw_update_seqno(dev_priv);
- fence_rep.passed_seqno = dev_priv->last_read_seqno;
+ fence_rep.passed_seqno = vmw_fences_update(dev_priv->fman);
}
/*
@@ -4068,23 +4067,6 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
return 0;
}
-/*
- * DMA fence callback to remove a seqno_waiter
- */
-struct seqno_waiter_rm_context {
- struct dma_fence_cb base;
- struct vmw_private *dev_priv;
-};
-
-static void seqno_waiter_rm_cb(struct dma_fence *f, struct dma_fence_cb *cb)
-{
- struct seqno_waiter_rm_context *ctx =
- container_of(cb, struct seqno_waiter_rm_context, base);
-
- vmw_seqno_waiter_remove(ctx->dev_priv);
- kfree(ctx);
-}
-
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands, void *kernel_commands,
@@ -4265,15 +4247,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else {
/* Link the fence with the FD created earlier */
fd_install(out_fence_fd, sync_file->file);
- struct seqno_waiter_rm_context *ctx =
- kmalloc(sizeof(*ctx), GFP_KERNEL);
- ctx->dev_priv = dev_priv;
- vmw_seqno_waiter_add(dev_priv);
- if (dma_fence_add_callback(&fence->base, &ctx->base,
- seqno_waiter_rm_cb) < 0) {
- vmw_seqno_waiter_remove(dev_priv);
- kfree(ctx);
- }
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 588d50ababf6..c2294abbe753 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,32 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
-#include <linux/sched/signal.h>
-
#include "vmwgfx_drv.h"
#define VMW_FENCE_WRAP (1 << 31)
@@ -35,14 +14,7 @@ struct vmw_fence_manager {
struct vmw_private *dev_priv;
spinlock_t lock;
struct list_head fence_list;
- struct work_struct work;
bool fifo_down;
- struct list_head cleanup_list;
- uint32_t pending_actions[VMW_ACTION_MAX];
- struct mutex goal_irq_mutex;
- bool goal_irq_on; /* Protected by @goal_irq_mutex */
- bool seqno_valid; /* Protected by @lock, and may not be set to true
- without the @goal_irq_mutex held. */
u64 ctx;
};
@@ -52,12 +24,10 @@ struct vmw_user_fence {
};
/**
- * struct vmw_event_fence_action - fence action that delivers a drm event.
+ * struct vmw_event_fence_action - fence callback that delivers a DRM event.
*
- * @action: A struct vmw_fence_action to hook up to a fence.
+ * @base: For use with dma_fence_add_callback(...)
* @event: A pointer to the pending event.
- * @fence: A referenced pointer to the fence to keep it alive while @action
- * hangs on it.
* @dev: Pointer to a struct drm_device so we can access the event stuff.
* @tv_sec: If non-null, the variable pointed to will be assigned
* current time tv_sec val when the fence signals.
@@ -65,10 +35,9 @@ struct vmw_user_fence {
* be assigned the current time tv_usec val when the fence signals.
*/
struct vmw_event_fence_action {
- struct vmw_fence_action action;
+ struct dma_fence_cb base;
struct drm_pending_event *event;
- struct vmw_fence_obj *fence;
struct drm_device *dev;
uint32_t *tv_sec;
@@ -81,44 +50,6 @@ fman_from_fence(struct vmw_fence_obj *fence)
return container_of(fence->base.lock, struct vmw_fence_manager, lock);
}
-static u32 vmw_fence_goal_read(struct vmw_private *vmw)
-{
- if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
- return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
- else
- return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
-}
-
-static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
-{
- if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
- vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
- else
- vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
-}
-
-/*
- * Note on fencing subsystem usage of irqs:
- * Typically the vmw_fences_update function is called
- *
- * a) When a new fence seqno has been submitted by the fifo code.
- * b) On-demand when we have waiters. Sleeping waiters will switch on the
- * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
- * irq is received. When the last fence waiter is gone, that IRQ is masked
- * away.
- *
- * In situations where there are no waiters and we don't submit any new fences,
- * fence objects may not be signaled. This is perfectly OK, since there are
- * no consumers of the signaled data, but that is NOT ok when there are fence
- * actions attached to a fence. The fencing subsystem then makes use of the
- * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
- * which has an action attached, and each time vmw_fences_update is called,
- * the subsystem makes sure the fence goal seqno is updated.
- *
- * The fence goal seqno irq is on as long as there are unsignaled fence
- * objects with actions attached to them.
- */
-
static void vmw_fence_obj_destroy(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
@@ -126,8 +57,21 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
struct vmw_fence_manager *fman = fman_from_fence(fence);
if (!list_empty(&fence->head)) {
+ /* The fence manager still has an implicit reference to this
+ * fence via the fence list if head is set. Because the lock is
+ * required to be held when the fence manager updates the fence
+ * list either the fence will have been removed after we get
+ * the lock below or we can safely remove it and the fence
+ * manager will never see it. This implies the fence is being
+ * deleted without being signaled which is dubious but valid
+ * if there are no callbacks. The dma_fence code that calls
+ * this hook will warn about deleted unsignaled with callbacks
+ * so no need to warn again here.
+ */
spin_lock(&fman->lock);
list_del_init(&fence->head);
+ if (fence->waiter_added)
+ vmw_seqno_waiter_remove(fman->dev_priv);
spin_unlock(&fman->lock);
}
fence->destroy(fence);
@@ -143,165 +87,46 @@ static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
return "svga";
}
+/* When we toggle signaling for the SVGA device there is a race period from
+ * the time we first read the fence seqno to the time we enable interrupts.
+ * If we miss the interrupt for a fence during this period its likely the driver
+ * will stall. As a result we need to re-read the seqno after interrupts are
+ * enabled. If interrupts were already enabled we just increment the number of
+ * seqno waiters.
+ */
static bool vmw_fence_enable_signaling(struct dma_fence *f)
{
+ u32 seqno;
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;
-
- u32 seqno = vmw_fence_read(dev_priv);
- if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
+check_for_race:
+ seqno = vmw_fence_read(dev_priv);
+ if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
+ if (fence->waiter_added) {
+ vmw_seqno_waiter_remove(dev_priv);
+ fence->waiter_added = false;
+ }
return false;
-
+ } else if (!fence->waiter_added) {
+ fence->waiter_added = true;
+ if (vmw_seqno_waiter_add(dev_priv))
+ goto check_for_race;
+ }
return true;
}
-struct vmwgfx_wait_cb {
- struct dma_fence_cb base;
- struct task_struct *task;
-};
-
-static void
-vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
-{
- struct vmwgfx_wait_cb *wait =
- container_of(cb, struct vmwgfx_wait_cb, base);
-
- wake_up_process(wait->task);
-}
-
-static void __vmw_fences_update(struct vmw_fence_manager *fman);
-
-static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
-{
- struct vmw_fence_obj *fence =
- container_of(f, struct vmw_fence_obj, base);
-
- struct vmw_fence_manager *fman = fman_from_fence(fence);
- struct vmw_private *dev_priv = fman->dev_priv;
- struct vmwgfx_wait_cb cb;
- long ret = timeout;
-
- if (likely(vmw_fence_obj_signaled(fence)))
- return timeout;
-
- vmw_seqno_waiter_add(dev_priv);
-
- spin_lock(f->lock);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
- goto out;
-
- if (intr && signal_pending(current)) {
- ret = -ERESTARTSYS;
- goto out;
- }
-
- cb.base.func = vmwgfx_wait_cb;
- cb.task = current;
- list_add(&cb.base.node, &f->cb_list);
-
- for (;;) {
- __vmw_fences_update(fman);
-
- /*
- * We can use the barrier free __set_current_state() since
- * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
- * fence spinlock.
- */
- if (intr)
- __set_current_state(TASK_INTERRUPTIBLE);
- else
- __set_current_state(TASK_UNINTERRUPTIBLE);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
- if (ret == 0 && timeout > 0)
- ret = 1;
- break;
- }
-
- if (intr && signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
-
- if (ret == 0)
- break;
-
- spin_unlock(f->lock);
-
- ret = schedule_timeout(ret);
-
- spin_lock(f->lock);
- }
- __set_current_state(TASK_RUNNING);
- if (!list_empty(&cb.base.node))
- list_del(&cb.base.node);
-
-out:
- spin_unlock(f->lock);
-
- vmw_seqno_waiter_remove(dev_priv);
-
- return ret;
-}
+static u32 __vmw_fences_update(struct vmw_fence_manager *fman);
static const struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
- .wait = vmw_fence_wait,
.release = vmw_fence_obj_destroy,
};
-/*
- * Execute signal actions on fences recently signaled.
- * This is done from a workqueue so we don't have to execute
- * signal actions from atomic context.
- */
-
-static void vmw_fence_work_func(struct work_struct *work)
-{
- struct vmw_fence_manager *fman =
- container_of(work, struct vmw_fence_manager, work);
- struct list_head list;
- struct vmw_fence_action *action, *next_action;
- bool seqno_valid;
-
- do {
- INIT_LIST_HEAD(&list);
- mutex_lock(&fman->goal_irq_mutex);
-
- spin_lock(&fman->lock);
- list_splice_init(&fman->cleanup_list, &list);
- seqno_valid = fman->seqno_valid;
- spin_unlock(&fman->lock);
-
- if (!seqno_valid && fman->goal_irq_on) {
- fman->goal_irq_on = false;
- vmw_goal_waiter_remove(fman->dev_priv);
- }
- mutex_unlock(&fman->goal_irq_mutex);
-
- if (list_empty(&list))
- return;
-
- /*
- * At this point, only we should be able to manipulate the
- * list heads of the actions we have on the private list.
- * hence fman::lock not held.
- */
-
- list_for_each_entry_safe(action, next_action, &list, head) {
- list_del_init(&action->head);
- if (action->cleanup)
- action->cleanup(action);
- }
- } while (1);
-}
-
struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
{
struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
@@ -312,10 +137,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
fman->dev_priv = dev_priv;
spin_lock_init(&fman->lock);
INIT_LIST_HEAD(&fman->fence_list);
- INIT_LIST_HEAD(&fman->cleanup_list);
- INIT_WORK(&fman->work, &vmw_fence_work_func);
fman->fifo_down = true;
- mutex_init(&fman->goal_irq_mutex);
fman->ctx = dma_fence_context_alloc(1);
return fman;
@@ -325,11 +147,8 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
{
bool lists_empty;
- (void) cancel_work_sync(&fman->work);
-
spin_lock(&fman->lock);
- lists_empty = list_empty(&fman->fence_list) &&
- list_empty(&fman->cleanup_list);
+ lists_empty = list_empty(&fman->fence_list);
spin_unlock(&fman->lock);
BUG_ON(!lists_empty);
@@ -344,7 +163,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
fman->ctx, seqno);
- INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->destroy = destroy;
spin_lock(&fman->lock);
@@ -352,6 +170,11 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
ret = -EBUSY;
goto out_unlock;
}
+ /* This creates an implicit reference to the fence from the fence
+ * manager. It will be dropped when the fence is signaled which is
+ * expected to happen before deletion. The dtor has code to catch
+ * the rare deletion before signaling case.
+ */
list_add_tail(&fence->head, &fman->fence_list);
out_unlock:
@@ -360,148 +183,35 @@ out_unlock:
}
-static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
- struct list_head *list)
-{
- struct vmw_fence_action *action, *next_action;
-
- list_for_each_entry_safe(action, next_action, list, head) {
- list_del_init(&action->head);
- fman->pending_actions[action->type]--;
- if (action->seq_passed != NULL)
- action->seq_passed(action);
-
- /*
- * Add the cleanup action to the cleanup list so that
- * it will be performed by a worker task.
- */
-
- list_add_tail(&action->head, &fman->cleanup_list);
- }
-}
-
-/**
- * vmw_fence_goal_new_locked - Figure out a new device fence goal
- * seqno if needed.
- *
- * @fman: Pointer to a fence manager.
- * @passed_seqno: The seqno the device currently signals as passed.
- *
- * This function should be called with the fence manager lock held.
- * It is typically called when we have a new passed_seqno, and
- * we might need to update the fence goal. It checks to see whether
- * the current fence goal has already passed, and, in that case,
- * scans through all unsignaled fences to get the next fence object with an
- * action attached, and sets the seqno of that fence as a new fence goal.
- *
- * returns true if the device goal seqno was updated. False otherwise.
- */
-static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
- u32 passed_seqno)
-{
- u32 goal_seqno;
- struct vmw_fence_obj *fence, *next_fence;
-
- if (likely(!fman->seqno_valid))
- return false;
-
- goal_seqno = vmw_fence_goal_read(fman->dev_priv);
- if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
- return false;
-
- fman->seqno_valid = false;
- list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
- if (!list_empty(&fence->seq_passed_actions)) {
- fman->seqno_valid = true;
- vmw_fence_goal_write(fman->dev_priv,
- fence->base.seqno);
- break;
- }
- }
-
- return true;
-}
-
-
-/**
- * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
- * needed.
- *
- * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
- * considered as a device fence goal.
- *
- * This function should be called with the fence manager lock held.
- * It is typically called when an action has been attached to a fence to
- * check whether the seqno of that fence should be used for a fence
- * goal interrupt. This is typically needed if the current fence goal is
- * invalid, or has a higher seqno than that of the current fence object.
- *
- * returns true if the device goal seqno was updated. False otherwise.
- */
-static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
-{
- struct vmw_fence_manager *fman = fman_from_fence(fence);
- u32 goal_seqno;
-
- if (dma_fence_is_signaled_locked(&fence->base))
- return false;
-
- goal_seqno = vmw_fence_goal_read(fman->dev_priv);
- if (likely(fman->seqno_valid &&
- goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
- return false;
-
- vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
- fman->seqno_valid = true;
-
- return true;
-}
-
-static void __vmw_fences_update(struct vmw_fence_manager *fman)
+static u32 __vmw_fences_update(struct vmw_fence_manager *fman)
{
struct vmw_fence_obj *fence, *next_fence;
- struct list_head action_list;
- bool needs_rerun;
- uint32_t seqno, new_seqno;
+ const bool cookie = dma_fence_begin_signalling();
+ const u32 seqno = vmw_fence_read(fman->dev_priv);
- seqno = vmw_fence_read(fman->dev_priv);
-rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
list_del_init(&fence->head);
+ if (fence->waiter_added) {
+ vmw_seqno_waiter_remove(fman->dev_priv);
+ fence->waiter_added = false;
+ }
dma_fence_signal_locked(&fence->base);
- INIT_LIST_HEAD(&action_list);
- list_splice_init(&fence->seq_passed_actions,
- &action_list);
- vmw_fences_perform_actions(fman, &action_list);
} else
break;
}
-
- /*
- * Rerun if the fence goal seqno was updated, and the
- * hardware might have raced with that update, so that
- * we missed a fence_goal irq.
- */
-
- needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
- if (unlikely(needs_rerun)) {
- new_seqno = vmw_fence_read(fman->dev_priv);
- if (new_seqno != seqno) {
- seqno = new_seqno;
- goto rerun;
- }
- }
-
- if (!list_empty(&fman->cleanup_list))
- (void) schedule_work(&fman->work);
+ dma_fence_end_signalling(cookie);
+ atomic_set_release(&fman->dev_priv->last_read_seqno, seqno);
+ return seqno;
}
-void vmw_fences_update(struct vmw_fence_manager *fman)
+u32 vmw_fences_update(struct vmw_fence_manager *fman)
{
+ u32 seqno;
spin_lock(&fman->lock);
- __vmw_fences_update(fman);
+ seqno = __vmw_fences_update(fman);
spin_unlock(&fman->lock);
+ return seqno;
}
bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
@@ -539,14 +249,13 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
struct vmw_fence_obj **p_fence)
{
struct vmw_fence_obj *fence;
- int ret;
+ int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(!fence))
return -ENOMEM;
- ret = vmw_fence_obj_init(fman, fence, seqno,
- vmw_fence_destroy);
+ ret = vmw_fence_obj_init(fman, fence, seqno, vmw_fence_destroy);
if (unlikely(ret != 0))
goto out_err_init;
@@ -638,7 +347,6 @@ out_no_object:
void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
{
- struct list_head action_list;
int ret;
/*
@@ -661,10 +369,6 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
if (unlikely(ret != 0)) {
list_del_init(&fence->head);
dma_fence_signal(&fence->base);
- INIT_LIST_HEAD(&action_list);
- list_splice_init(&fence->seq_passed_actions,
- &action_list);
- vmw_fences_perform_actions(fman, &action_list);
}
BUG_ON(!list_empty(&fence->head));
@@ -778,7 +482,6 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_signaled_arg *) data;
struct ttm_base_object *base;
struct vmw_fence_obj *fence;
- struct vmw_fence_manager *fman;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
@@ -787,14 +490,11 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
- fman = fman_from_fence(fence);
arg->signaled = vmw_fence_obj_signaled(fence);
arg->signaled_flags = arg->flags;
- spin_lock(&fman->lock);
- arg->passed_seqno = dev_priv->last_read_seqno;
- spin_unlock(&fman->lock);
+ arg->passed_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
ttm_base_object_unref(&base);
@@ -822,10 +522,11 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
* attached has passed. It queues the event on the submitter's event list.
* This function is always called from atomic context.
*/
-static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
+static void vmw_event_fence_action_seq_passed(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
struct vmw_event_fence_action *eaction =
- container_of(action, struct vmw_event_fence_action, action);
+ container_of(cb, struct vmw_event_fence_action, base);
struct drm_device *dev = eaction->dev;
struct drm_pending_event *event = eaction->event;
@@ -837,7 +538,7 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
if (likely(eaction->tv_sec != NULL)) {
struct timespec64 ts;
- ktime_get_ts64(&ts);
+ ktime_to_timespec64(f->timestamp);
/* monotonic time, so no y2038 overflow */
*eaction->tv_sec = ts.tv_sec;
*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
@@ -846,75 +547,10 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
drm_send_event_locked(dev, eaction->event);
eaction->event = NULL;
spin_unlock_irq(&dev->event_lock);
-}
-
-/**
- * vmw_event_fence_action_cleanup
- *
- * @action: The struct vmw_fence_action embedded in a struct
- * vmw_event_fence_action.
- *
- * This function is the struct vmw_fence_action destructor. It's typically
- * called from a workqueue.
- */
-static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
-{
- struct vmw_event_fence_action *eaction =
- container_of(action, struct vmw_event_fence_action, action);
-
- vmw_fence_obj_unreference(&eaction->fence);
+ dma_fence_put(f);
kfree(eaction);
}
-
-/**
- * vmw_fence_obj_add_action - Add an action to a fence object.
- *
- * @fence: The fence object.
- * @action: The action to add.
- *
- * Note that the action callbacks may be executed before this function
- * returns.
- */
-static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
- struct vmw_fence_action *action)
-{
- struct vmw_fence_manager *fman = fman_from_fence(fence);
- bool run_update = false;
-
- mutex_lock(&fman->goal_irq_mutex);
- spin_lock(&fman->lock);
-
- fman->pending_actions[action->type]++;
- if (dma_fence_is_signaled_locked(&fence->base)) {
- struct list_head action_list;
-
- INIT_LIST_HEAD(&action_list);
- list_add_tail(&action->head, &action_list);
- vmw_fences_perform_actions(fman, &action_list);
- } else {
- list_add_tail(&action->head, &fence->seq_passed_actions);
-
- /*
- * This function may set fman::seqno_valid, so it must
- * be run with the goal_irq_mutex held.
- */
- run_update = vmw_fence_goal_check_locked(fence);
- }
-
- spin_unlock(&fman->lock);
-
- if (run_update) {
- if (!fman->goal_irq_on) {
- fman->goal_irq_on = true;
- vmw_goal_waiter_add(fman->dev_priv);
- }
- vmw_fences_update(fman);
- }
- mutex_unlock(&fman->goal_irq_mutex);
-
-}
-
/**
* vmw_event_fence_action_queue - Post an event for sending when a fence
* object seqno has passed.
@@ -949,18 +585,14 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
return -ENOMEM;
eaction->event = event;
-
- eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
- eaction->action.cleanup = vmw_event_fence_action_cleanup;
- eaction->action.type = VMW_ACTION_EVENT;
-
- eaction->fence = vmw_fence_obj_reference(fence);
eaction->dev = &fman->dev_priv->drm;
eaction->tv_sec = tv_sec;
eaction->tv_usec = tv_usec;
- vmw_fence_obj_add_action(fence, &eaction->action);
-
+ vmw_fence_obj_reference(fence); // Dropped in CB
+ if (dma_fence_add_callback(&fence->base, &eaction->base,
+ vmw_event_fence_action_seq_passed) < 0)
+ vmw_event_fence_action_seq_passed(&fence->base, &eaction->base);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index a7eee579c76a..e897cccae1ae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -39,27 +39,10 @@ struct drm_pending_event;
struct vmw_private;
struct vmw_fence_manager;
-/**
- *
- *
- */
-enum vmw_action_type {
- VMW_ACTION_EVENT = 0,
- VMW_ACTION_MAX
-};
-
-struct vmw_fence_action {
- struct list_head head;
- enum vmw_action_type type;
- void (*seq_passed) (struct vmw_fence_action *action);
- void (*cleanup) (struct vmw_fence_action *action);
-};
-
struct vmw_fence_obj {
struct dma_fence base;
-
+ bool waiter_added;
struct list_head head;
- struct list_head seq_passed_actions;
void (*destroy)(struct vmw_fence_obj *fence);
};
@@ -86,7 +69,7 @@ vmw_fence_obj_reference(struct vmw_fence_obj *fence)
return fence;
}
-extern void vmw_fences_update(struct vmw_fence_manager *fman);
+u32 vmw_fences_update(struct vmw_fence_manager *fman);
extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index e417921af584..eedf1fe60be7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -284,11 +284,10 @@ static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m)
seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s",
id, bo->tbo.base.size, placement, type);
- seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d",
+ seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d",
bo->tbo.priority,
bo->tbo.pin_count,
- kref_read(&bo->tbo.base.refcount),
- kref_read(&bo->tbo.kref));
+ kref_read(&bo->tbo.base.refcount));
seq_puts(m, "\n");
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 086e69a130d4..05773eb394d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -123,26 +123,17 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
-void vmw_update_seqno(struct vmw_private *dev_priv)
-{
- uint32_t seqno = vmw_fence_read(dev_priv);
-
- if (dev_priv->last_read_seqno != seqno) {
- dev_priv->last_read_seqno = seqno;
- vmw_fences_update(dev_priv->fman);
- }
-}
-
bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno)
{
bool ret;
+ u32 last_read_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ if (last_read_seqno - seqno < VMW_FENCE_WRAP)
return true;
- vmw_update_seqno(dev_priv);
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ last_read_seqno = vmw_fences_update(dev_priv->fman);
+ if (last_read_seqno - seqno < VMW_FENCE_WRAP)
return true;
if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
@@ -239,51 +230,59 @@ out_err:
return ret;
}
-void vmw_generic_waiter_add(struct vmw_private *dev_priv,
+bool vmw_generic_waiter_add(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
- spin_lock_bh(&dev_priv->waiter_lock);
+ bool hw_programmed = false;
+
+ spin_lock(&dev_priv->waiter_lock);
if ((*waiter_count)++ == 0) {
vmw_irq_status_write(dev_priv, flag);
dev_priv->irq_mask |= flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ hw_programmed = true;
}
- spin_unlock_bh(&dev_priv->waiter_lock);
+ spin_unlock(&dev_priv->waiter_lock);
+ return hw_programmed;
}
-void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+bool vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
- spin_lock_bh(&dev_priv->waiter_lock);
+ bool hw_programmed = false;
+
+ spin_lock(&dev_priv->waiter_lock);
if (--(*waiter_count) == 0) {
dev_priv->irq_mask &= ~flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ hw_programmed = true;
}
- spin_unlock_bh(&dev_priv->waiter_lock);
+ spin_unlock(&dev_priv->waiter_lock);
+ return hw_programmed;
}
-void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
+bool vmw_seqno_waiter_add(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
- &dev_priv->fence_queue_waiters);
+ return vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
+ &dev_priv->fence_queue_waiters);
}
-void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
+bool vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
- &dev_priv->fence_queue_waiters);
+ return vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
+ &dev_priv->fence_queue_waiters);
}
-void vmw_goal_waiter_add(struct vmw_private *dev_priv)
+bool vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
- &dev_priv->goal_queue_waiters);
+ return vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
+ &dev_priv->goal_queue_waiters);
}
-void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
+bool vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
- &dev_priv->goal_queue_waiters);
+ return vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
+ &dev_priv->goal_queue_waiters);
}
static void vmw_irq_preinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 05b1c54a070c..54ea1b513950 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -500,6 +500,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_user_object *uo,
struct vmw_framebuffer **out,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2
*mode_cmd)
@@ -548,7 +549,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
goto out_err1;
}
- drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, info, mode_cmd);
memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
vmw_user_object_ref(&vfbs->uo);
@@ -602,6 +603,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
struct vmw_bo *bo,
struct vmw_framebuffer **out,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2
*mode_cmd)
@@ -634,7 +636,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
}
vfbd->base.base.obj[0] = &bo->tbo.base;
- drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, info, mode_cmd);
vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo);
*out = &vfbd->base;
@@ -679,11 +681,13 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
* @dev_priv: Pointer to device private struct.
* @uo: Pointer to user object to wrap the kms framebuffer around.
* Either the buffer or surface inside the user object must be NULL.
+ * @info: pixel format information.
* @mode_cmd: Frame-buffer metadata.
*/
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_user_object *uo,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_framebuffer *vfb = NULL;
@@ -692,10 +696,10 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
/* Create the new framebuffer depending one what we have */
if (vmw_user_object_surface(uo)) {
ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
- mode_cmd);
+ info, mode_cmd);
} else if (uo->buffer) {
ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
- mode_cmd);
+ info, mode_cmd);
} else {
BUG();
}
@@ -712,6 +716,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_private *dev_priv = vmw_priv(dev);
@@ -741,7 +746,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
}
- vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd);
+ vfb = vmw_kms_new_framebuffer(dev_priv, &uo, info, mode_cmd);
if (IS_ERR(vfb)) {
ret = PTR_ERR(vfb);
goto err_out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 511e29cdb987..445471fe9be6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -399,6 +399,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_user_object *uo,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 7fb1c88bcc47..69dfe69ce0f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -896,7 +896,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
- .pin = true,
+ .pin = false,
.keep_resv = true,
};
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 99a91355842e..2bb2bc052120 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -32,7 +32,6 @@ config DRM_XE
select ACPI_VIDEO if X86 && ACPI
select ACPI_WMI if X86 && ACPI
select SYNC_FILE
- select IOSF_MBI
select CRC32
select SND_HDA_I915 if SND_HDA_CORE
select CEC_CORE if CEC_NOTIFIER
@@ -46,6 +45,7 @@ config DRM_XE
select WANT_DEV_COREDUMP
select AUXILIARY_BUS
select HMM_MIRROR
+ select REGMAP if I2C
help
Driver for Intel Xe2 series GPUs and later. Experimental support
for Xe series is also available.
@@ -87,16 +87,18 @@ config DRM_XE_GPUSVM
Enable this option if you want support for CPU to GPU address
mirroring.
- If in doubut say "Y".
+ If in doubt say "Y".
-config DRM_XE_DEVMEM_MIRROR
- bool "Enable device memory mirror"
+config DRM_XE_PAGEMAP
+ bool "Enable device memory pool for SVM"
depends on DRM_XE_GPUSVM
select GET_FREE_REGION
default y
help
- Disable this option only if you want to compile out without device
- memory mirror. Will reduce KMD memory footprint when disabled.
+ Disable this option only if you don't want to expose local device
+ memory for SVM. Will reduce KMD memory footprint when disabled.
+
+ If in doubt say "Y".
config DRM_XE_FORCE_PROBE
string "Force probe xe for selected Intel hardware IDs"
diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
index 0d749ed44878..01735c6ece8b 100644
--- a/drivers/gpu/drm/xe/Kconfig.debug
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -86,12 +86,17 @@ config DRM_XE_KUNIT_TEST
If in doubt, say "N".
-config DRM_XE_LARGE_GUC_BUFFER
- bool "Enable larger guc log buffer"
+config DRM_XE_DEBUG_GUC
+ bool "Enable extra GuC related debug options"
+ depends on DRM_XE_DEBUG
default n
+ select STACKDEPOT
help
Choose this option when debugging guc issues.
- Buffer should be large enough for complex issues.
+ The GuC log buffer is increased to the maximum allowed, which should
+ be large enough for complex issues. The tracking of FAST_REQ messages
+ is extended to include a record of the calling stack, which is then
+ dumped on a FAST_REQ error notification.
Recommended for driver developers only.
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index e4bf484d4121..07c71a29963d 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -21,6 +21,13 @@ $(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \
$(src)/xe_wa_oob.rules
$(call cmd,wa_oob)
+generated_device_oob := $(obj)/generated/xe_device_wa_oob.c $(obj)/generated/xe_device_wa_oob.h
+quiet_cmd_device_wa_oob = GEN $(notdir $(generated_device_oob))
+ cmd_device_wa_oob = mkdir -p $(@D); $^ $(generated_device_oob)
+$(obj)/generated/%_device_wa_oob.c $(obj)/generated/%_device_wa_oob.h: $(obj)/xe_gen_wa_oob \
+ $(src)/xe_device_wa_oob.rules
+ $(call cmd,device_wa_oob)
+
# Please keep these build lists sorted!
# core driver code
@@ -80,6 +87,7 @@ xe-y += xe_bb.o \
xe_mmio.o \
xe_mocs.o \
xe_module.o \
+ xe_nvm.o \
xe_oa.o \
xe_observation.o \
xe_pat.o \
@@ -124,6 +132,7 @@ xe-y += xe_bb.o \
xe_wait_user_fence.o \
xe_wopcm.o
+xe-$(CONFIG_I2C) += xe_i2c.o
xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
xe-$(CONFIG_DRM_XE_GPUSVM) += xe_svm.o
@@ -139,7 +148,8 @@ xe-y += \
xe_guc_relay.o \
xe_memirq.o \
xe_sriov.o \
- xe_sriov_vf.o
+ xe_sriov_vf.o \
+ xe_tile_sriov_vf.o
xe-$(CONFIG_PCI_IOV) += \
xe_gt_sriov_pf.o \
@@ -153,7 +163,8 @@ xe-$(CONFIG_PCI_IOV) += \
xe_lmtt_2l.o \
xe_lmtt_ml.o \
xe_pci_sriov.o \
- xe_sriov_pf.o
+ xe_sriov_pf.o \
+ xe_sriov_pf_service.o
# include helpers for tests even when XE is built-in
ifdef CONFIG_DRM_XE_KUNIT_TEST
@@ -204,7 +215,6 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/icl_dsi.o \
i915-display/intel_alpm.o \
i915-display/intel_atomic.o \
- i915-display/intel_atomic_plane.o \
i915-display/intel_audio.o \
i915-display/intel_backlight.o \
i915-display/intel_bios.o \
@@ -254,6 +264,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_fbc.o \
i915-display/intel_fdi.o \
i915-display/intel_fifo_underrun.o \
+ i915-display/intel_flipq.o \
i915-display/intel_frontbuffer.o \
i915-display/intel_global_state.o \
i915-display/intel_gmbus.o \
@@ -270,6 +281,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_modeset_verify.o \
i915-display/intel_panel.o \
i915-display/intel_pfit.o \
+ i915-display/intel_plane.o \
i915-display/intel_pmdemand.o \
i915-display/intel_pch.o \
i915-display/intel_pps.o \
@@ -337,4 +349,4 @@ $(obj)/%.hdrtest: $(src)/%.h FORCE
$(call if_changed_dep,hdrtest)
uses_generated_oob := $(addprefix $(obj)/, $(xe-y))
-$(uses_generated_oob): $(obj)/generated/xe_wa_oob.h
+$(uses_generated_oob): $(obj)/generated/xe_wa_oob.h $(obj)/generated/xe_device_wa_oob.h
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index 448afb86e05c..81eb046aeebf 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -142,6 +142,7 @@ enum xe_guc_action {
XE_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER = 0x550C,
XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER = 0x550D,
+ XE_GUC_ACTION_OPT_IN_FEATURE_KLV = 0x550E,
XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR = 0x6000,
XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC = 0x6002,
XE_GUC_ACTION_PAGE_FAULT_RES_DESC = 0x6003,
@@ -161,6 +162,37 @@ enum xe_guc_preempt_options {
XE_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8,
};
+enum xe_guc_register_context_param_offsets {
+ XE_GUC_REGISTER_CONTEXT_DATA_0_MBZ = 0,
+ XE_GUC_REGISTER_CONTEXT_DATA_1_FLAGS,
+ XE_GUC_REGISTER_CONTEXT_DATA_2_CONTEXT_INDEX,
+ XE_GUC_REGISTER_CONTEXT_DATA_3_ENGINE_CLASS,
+ XE_GUC_REGISTER_CONTEXT_DATA_4_ENGINE_SUBMIT_MASK,
+ XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER,
+ XE_GUC_REGISTER_CONTEXT_DATA_6_WQ_DESC_ADDR_UPPER,
+ XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER,
+ XE_GUC_REGISTER_CONTEXT_DATA_8_WQ_BUF_BASE_UPPER,
+ XE_GUC_REGISTER_CONTEXT_DATA_9_WQ_BUF_SIZE,
+ XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR,
+ XE_GUC_REGISTER_CONTEXT_MSG_LEN,
+};
+
+enum xe_guc_register_context_multi_lrc_param_offsets {
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_0_MBZ = 0,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_1_FLAGS,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_2_PARENT_CONTEXT,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_3_ENGINE_CLASS,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_4_ENGINE_SUBMIT_MASK,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_6_WQ_DESC_ADDR_UPPER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_8_WQ_BUF_BASE_UPPER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_9_WQ_BUF_SIZE,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN = 11,
+};
+
enum xe_guc_report_status {
XE_GUC_REPORT_STATUS_UNKNOWN = 0x0,
XE_GUC_REPORT_STATUS_ACKED = 0x1,
@@ -240,4 +272,7 @@ enum xe_guc_g2g_type {
#define XE_G2G_DEREGISTER_TILE REG_GENMASK(15, 12)
#define XE_G2G_DEREGISTER_TYPE REG_GENMASK(11, 8)
+/* invalid type for XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR */
+#define XE_GUC_CAT_ERR_TYPE_INVALID 0xdeadbeef
+
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_errors_abi.h b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
index 2c627a21648f..ecf748fd87df 100644
--- a/drivers/gpu/drm/xe/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
@@ -6,8 +6,7 @@
#ifndef _ABI_GUC_ERRORS_ABI_H
#define _ABI_GUC_ERRORS_ABI_H
-enum xe_guc_response_status {
- XE_GUC_RESPONSE_STATUS_SUCCESS = 0x0,
+enum xe_guc_response {
XE_GUC_RESPONSE_ERROR_PROTOCOL = 0x04,
XE_GUC_RESPONSE_INVALID_STATE = 0x0A,
XE_GUC_RESPONSE_UNSUPPORTED_VERSION = 0x0B,
@@ -21,12 +20,20 @@ enum xe_guc_response_status {
XE_GUC_RESPONSE_CANNOT_COMPLETE_ACTION = 0x41,
XE_GUC_RESPONSE_INVALID_KLV_DATA = 0x50,
XE_GUC_RESPONSE_INVALID_PARAMS = 0x60,
+ XE_GUC_RESPONSE_INVALID_CONTEXT_INDEX = 0x61,
+ XE_GUC_RESPONSE_INVALID_CONTEXT_REGISTRATION = 0x62,
+ XE_GUC_RESPONSE_INVALID_DOORBELL_ID = 0x63,
+ XE_GUC_RESPONSE_INVALID_ENGINE_ID = 0x64,
XE_GUC_RESPONSE_INVALID_BUFFER_RANGE = 0x70,
XE_GUC_RESPONSE_INVALID_BUFFER = 0x71,
+ XE_GUC_RESPONSE_BUFFER_ALREADY_REGISTERED = 0x72,
XE_GUC_RESPONSE_INVALID_GGTT_ADDRESS = 0x80,
XE_GUC_RESPONSE_PENDING_ACTION = 0x90,
+ XE_GUC_RESPONSE_CONTEXT_NOT_REGISTERED = 0x100,
+ XE_GUC_RESPONSE_CONTEXT_ALREADY_REGISTERED = 0X101,
XE_GUC_RESPONSE_INVALID_SIZE = 0x102,
XE_GUC_RESPONSE_MALFORMED_KLV = 0x103,
+ XE_GUC_RESPONSE_INVALID_CONTEXT = 0x104,
XE_GUC_RESPONSE_INVALID_KLV_KEY = 0x105,
XE_GUC_RESPONSE_DATA_TOO_LARGE = 0x106,
XE_GUC_RESPONSE_VF_MIGRATED = 0x107,
@@ -40,10 +47,11 @@ enum xe_guc_response_status {
XE_GUC_RESPONSE_CTB_NOT_REGISTERED = 0x304,
XE_GUC_RESPONSE_CTB_IN_USE = 0x305,
XE_GUC_RESPONSE_CTB_INVALID_DESC = 0x306,
+ XE_GUC_RESPONSE_HW_TIMEOUT = 0x30C,
XE_GUC_RESPONSE_CTB_SOURCE_INVALID_DESCRIPTOR = 0x30D,
XE_GUC_RESPONSE_CTB_DESTINATION_INVALID_DESCRIPTOR = 0x30E,
XE_GUC_RESPONSE_INVALID_CONFIG_STATE = 0x30F,
- XE_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
+ XE_GUC_RESPONSE_GENERIC_FAIL = 0xF000,
};
enum xe_guc_load_status {
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 7de8f827281f..0366a9da5977 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -16,6 +16,7 @@
* +===+=======+==============================================================+
* | 0 | 31:16 | **KEY** - KLV key identifier |
* | | | - `GuC Self Config KLVs`_ |
+ * | | | - `GuC Opt In Feature KLVs`_ |
* | | | - `GuC VGT Policy KLVs`_ |
* | | | - `GuC VF Configuration KLVs`_ |
* | | | |
@@ -125,6 +126,33 @@ enum {
};
/**
+ * DOC: GuC Opt In Feature KLVs
+ *
+ * `GuC KLV`_ keys available for use with OPT_IN_FEATURE_KLV
+ *
+ * _`GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE` : 0x4001
+ * Adds an extra dword to the XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR G2H
+ * containing the type of the CAT error. On HW that does not support
+ * reporting the CAT error type, the extra dword is set to 0xdeadbeef.
+ *
+ * _`GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH` : 0x4003
+ * This KLV enables the Dynamic Inhibit Context Switch optimization, which
+ * consists in the GuC setting the CTX_CTRL_INHIBIT_SYN_CTX_SWITCH bit to
+ * zero in the CTX_CONTEXT_CONTROL register of LRCs that are submitted
+ * to an oversubscribed engine. This will cause those contexts to be
+ * switched out immediately if they hit an unsatisfied semaphore wait
+ * (instead of waiting the full timeslice duration). The bit is instead set
+ * to one if a single context is queued on the engine, to avoid it being
+ * switched out if there isn't another context that can run in its place.
+ */
+
+#define GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE_KEY 0x4001
+#define GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE_LEN 0u
+
+#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_KEY 0x4003
+#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_LEN 0u
+
+/**
* DOC: GuC VGT Policy KLVs
*
* `GuC KLV`_ keys available for use with PF2GUC_UPDATE_VGT_POLICY.
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
index a473aa6697d0..4fcd3bf6b76f 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
@@ -6,37 +6,6 @@
#ifndef __INTEL_PCODE_H__
#define __INTEL_PCODE_H__
-#include "intel_uncore.h"
#include "xe_pcode.h"
-static inline int
-snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
- int fast_timeout_us, int slow_timeout_ms)
-{
- return xe_pcode_write_timeout(__compat_uncore_to_tile(uncore), mbox, val,
- slow_timeout_ms ?: 1);
-}
-
-static inline int
-snb_pcode_write(struct intel_uncore *uncore, u32 mbox, u32 val)
-{
-
- return xe_pcode_write(__compat_uncore_to_tile(uncore), mbox, val);
-}
-
-static inline int
-snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
-{
- return xe_pcode_read(__compat_uncore_to_tile(uncore), mbox, val, val1);
-}
-
-static inline int
-skl_pcode_request(struct intel_uncore *uncore, u32 mbox,
- u32 request, u32 reply_mask, u32 reply,
- int timeout_base_ms)
-{
- return xe_pcode_request(__compat_uncore_to_tile(uncore), mbox, request, reply_mask, reply,
- timeout_base_ms);
-}
-
#endif /* __INTEL_PCODE_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index 0c1e88e36a1e..d012f02bc84f 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -24,13 +24,6 @@ static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncor
return xe_root_tile_mmio(xe);
}
-static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore)
-{
- struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
-
- return xe_device_get_root_tile(xe);
-}
-
static inline u32 intel_uncore_read(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
@@ -110,12 +103,13 @@ static inline int intel_wait_for_register(struct intel_uncore *uncore,
static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t i915_reg, u32 mask,
- u32 value, unsigned int timeout)
+ u32 value, unsigned int timeout,
+ u32 *out_value)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
- timeout * USEC_PER_MSEC, NULL, false);
+ timeout * USEC_PER_MSEC, out_value, false);
}
static inline int
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h b/drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h
new file mode 100644
index 000000000000..69e1935e9cdf
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#ifndef _VLV_IOSF_SB_H_
+#define _VLV_IOSF_SB_H_
+
+#include <linux/types.h>
+
+#include "vlv_iosf_sb_reg.h"
+
+struct drm_device;
+
+enum vlv_iosf_sb_unit {
+ VLV_IOSF_SB_BUNIT,
+ VLV_IOSF_SB_CCK,
+ VLV_IOSF_SB_CCU,
+ VLV_IOSF_SB_DPIO,
+ VLV_IOSF_SB_DPIO_2,
+ VLV_IOSF_SB_FLISDSI,
+ VLV_IOSF_SB_GPIO,
+ VLV_IOSF_SB_NC,
+ VLV_IOSF_SB_PUNIT,
+};
+
+static inline void vlv_iosf_sb_get(struct drm_device *drm, unsigned long ports)
+{
+}
+static inline u32 vlv_iosf_sb_read(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr)
+{
+ return 0;
+}
+static inline int vlv_iosf_sb_write(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr, u32 val)
+{
+ return 0;
+}
+static inline void vlv_iosf_sb_put(struct drm_device *drm, unsigned long ports)
+{
+}
+
+#endif /* _VLV_IOSF_SB_H_ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h b/drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb_reg.h
index 949f134ce3cf..cb7fa8e794a6 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb_reg.h
@@ -3,4 +3,4 @@
* Copyright © 2023 Intel Corporation
*/
-#include "../../i915/vlv_sideband_reg.h"
+#include "../../i915/vlv_iosf_sb_reg.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h b/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h
deleted file mode 100644
index ec6f12de5727..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2013-2021 Intel Corporation
- */
-
-#ifndef _VLV_SIDEBAND_H_
-#define _VLV_SIDEBAND_H_
-
-#include <linux/types.h>
-
-#include "vlv_sideband_reg.h"
-
-enum pipe;
-struct drm_i915_private;
-
-enum {
- VLV_IOSF_SB_BUNIT,
- VLV_IOSF_SB_CCK,
- VLV_IOSF_SB_CCU,
- VLV_IOSF_SB_DPIO,
- VLV_IOSF_SB_FLISDSI,
- VLV_IOSF_SB_GPIO,
- VLV_IOSF_SB_NC,
- VLV_IOSF_SB_PUNIT,
-};
-
-static inline void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
-{
-}
-static inline u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
-{
- return 0;
-}
-static inline void vlv_iosf_sb_write(struct drm_i915_private *i915,
- u8 port, u32 reg, u32 val)
-{
-}
-static inline void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
-{
-}
-static inline void vlv_bunit_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
-{
- return 0;
-}
-static inline void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
-}
-static inline void vlv_bunit_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_cck_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
-{
- return 0;
-}
-static inline void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
-}
-static inline void vlv_cck_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_ccu_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
-{
- return 0;
-}
-static inline void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
-}
-static inline void vlv_ccu_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_dpio_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_dpio_read(struct drm_i915_private *i915, int pipe, int reg)
-{
- return 0;
-}
-static inline void vlv_dpio_write(struct drm_i915_private *i915,
- int pipe, int reg, u32 val)
-{
-}
-static inline void vlv_dpio_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_flisdsi_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
-{
- return 0;
-}
-static inline void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
-}
-static inline void vlv_flisdsi_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_nc_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
-{
- return 0;
-}
-static inline void vlv_nc_put(struct drm_i915_private *i915)
-{
-}
-static inline void vlv_punit_get(struct drm_i915_private *i915)
-{
-}
-static inline u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
-{
- return 0;
-}
-static inline int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
-{
- return 0;
-}
-static inline void vlv_punit_put(struct drm_i915_private *i915)
-{
-}
-
-#endif /* _VLV_SIDEBAND_H_ */
diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c
index 27437c22bd70..910632f57c3d 100644
--- a/drivers/gpu/drm/xe/display/intel_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_bo.c
@@ -1,7 +1,12 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
+#include <drm/drm_cache.h>
#include <drm/drm_gem.h>
+#include <drm/drm_panic.h>
+
+#include "intel_fb.h"
+#include "intel_display_types.h"
#include "xe_bo.h"
#include "intel_bo.h"
@@ -59,3 +64,89 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
/* FIXME */
}
+
+struct xe_panic_data {
+ struct page **pages;
+ int page;
+ void *vaddr;
+};
+
+struct xe_framebuffer {
+ struct intel_framebuffer base;
+ struct xe_panic_data panic;
+};
+
+static inline struct xe_panic_data *to_xe_panic_data(struct intel_framebuffer *fb)
+{
+ return &container_of_const(fb, struct xe_framebuffer, base)->panic;
+}
+
+static void xe_panic_kunmap(struct xe_panic_data *panic)
+{
+ if (panic->vaddr) {
+ drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
+ kunmap_local(panic->vaddr);
+ panic->vaddr = NULL;
+ }
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color)
+{
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct xe_panic_data *panic = to_xe_panic_data(fb);
+ struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
+ unsigned int new_page;
+ unsigned int offset;
+
+ if (fb->panic_tiling)
+ offset = fb->panic_tiling(sb->width, x, y);
+ else
+ offset = y * sb->pitch[0] + x * sb->format->cpp[0];
+
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != panic->page) {
+ xe_panic_kunmap(panic);
+ panic->page = new_page;
+ panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm,
+ panic->page);
+ }
+ if (panic->vaddr) {
+ u32 *pix = panic->vaddr + offset;
+ *pix = color;
+ }
+}
+
+struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
+{
+ struct xe_framebuffer *xe_fb;
+
+ xe_fb = kzalloc(sizeof(*xe_fb), GFP_KERNEL);
+ if (xe_fb)
+ return &xe_fb->base;
+ return NULL;
+}
+
+int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
+{
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct xe_panic_data *panic = to_xe_panic_data(fb);
+
+ panic->page = -1;
+ sb->set_pixel = xe_panic_page_set_pixel;
+ return 0;
+}
+
+void intel_bo_panic_finish(struct intel_framebuffer *fb)
+{
+ struct xe_panic_data *panic = to_xe_panic_data(fb);
+
+ xe_panic_kunmap(panic);
+ panic->page = -1;
+}
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index e8191562d122..fba9617a75a5 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -5,6 +5,7 @@
#include <drm/drm_fb_helper.h>
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fbdev_fb.h"
@@ -65,7 +66,11 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
goto err;
}
- fb = intel_framebuffer_create(&obj->ttm.base, &mode_cmd);
+ fb = intel_framebuffer_create(&obj->ttm.base,
+ drm_get_format_info(dev,
+ mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd);
if (IS_ERR(fb)) {
xe_bo_unpin_map_no_vm(obj);
goto err;
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 9f4ade25787a..e2e0771cf274 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -20,6 +20,7 @@
#include "intel_audio.h"
#include "intel_bw.h"
#include "intel_display.h"
+#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
#include "intel_display_types.h"
@@ -38,7 +39,9 @@
static bool has_display(struct xe_device *xe)
{
- return HAS_DISPLAY(&xe->display);
+ struct intel_display *display = xe->display;
+
+ return HAS_DISPLAY(display);
}
/**
@@ -46,6 +49,8 @@ static bool has_display(struct xe_device *xe)
* early on
* @pdev: PCI device
*
+ * Note: This is called before xe or display device creation.
+ *
* Returns: true if probe needs to be deferred, false otherwise
*/
bool xe_display_driver_probe_defer(struct pci_dev *pdev)
@@ -63,6 +68,8 @@ bool xe_display_driver_probe_defer(struct pci_dev *pdev)
* Set features and function hooks in @driver that are needed for driving the
* display IP. This sets the driver's capability of driving display, regardless
* if the device has it enabled
+ *
+ * Note: This is called before xe or display device creation.
*/
void xe_display_driver_set_hooks(struct drm_driver *driver)
{
@@ -81,39 +88,10 @@ static void unset_display_features(struct xe_device *xe)
xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
}
-static void display_destroy(struct drm_device *dev, void *dummy)
-{
- struct xe_device *xe = to_xe_device(dev);
-
- destroy_workqueue(xe->display.hotplug.dp_wq);
-}
-
-/**
- * xe_display_create - create display struct
- * @xe: XE device instance
- *
- * Initialize all fields used by the display part.
- *
- * TODO: once everything can be inside a single struct, make the struct opaque
- * to the rest of xe and return it to be xe->display.
- *
- * Returns: 0 on success
- */
-int xe_display_create(struct xe_device *xe)
-{
- spin_lock_init(&xe->display.fb_tracking.lock);
-
- xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
- if (!xe->display.hotplug.dp_wq)
- return -ENOMEM;
-
- return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
-}
-
static void xe_display_fini_early(void *arg)
{
struct xe_device *xe = arg;
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -126,7 +104,7 @@ static void xe_display_fini_early(void *arg)
int xe_display_init_early(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
int err;
if (!xe->info.probe_display)
@@ -144,7 +122,9 @@ int xe_display_init_early(struct xe_device *xe)
* Fill the dram structure to get the system dram info. This will be
* used for memory latency calculation.
*/
- intel_dram_detect(xe);
+ err = intel_dram_detect(xe);
+ if (err)
+ goto err_opregion;
intel_bw_init_hw(display);
@@ -170,7 +150,7 @@ err_opregion:
static void xe_display_fini(void *arg)
{
struct xe_device *xe = arg;
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
intel_hpd_poll_fini(display);
intel_hdcp_component_fini(display);
@@ -180,7 +160,7 @@ static void xe_display_fini(void *arg)
int xe_display_init(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
int err;
if (!xe->info.probe_display)
@@ -195,7 +175,7 @@ int xe_display_init(struct xe_device *xe)
void xe_display_register(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -206,7 +186,7 @@ void xe_display_register(struct xe_device *xe)
void xe_display_unregister(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -219,7 +199,7 @@ void xe_display_unregister(struct xe_device *xe)
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -230,7 +210,7 @@ void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -241,7 +221,7 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
void xe_display_irq_reset(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -251,7 +231,7 @@ void xe_display_irq_reset(struct xe_device *xe)
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -292,7 +272,7 @@ static void xe_display_flush_cleanup_work(struct xe_device *xe)
static void xe_display_enable_d3cold(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -315,7 +295,7 @@ static void xe_display_enable_d3cold(struct xe_device *xe)
static void xe_display_disable_d3cold(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -339,7 +319,7 @@ static void xe_display_disable_d3cold(struct xe_device *xe)
void xe_display_pm_suspend(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
bool s2idle = suspend_to_idle();
if (!xe->info.probe_display)
@@ -364,7 +344,7 @@ void xe_display_pm_suspend(struct xe_device *xe)
if (has_display(xe)) {
intel_display_driver_suspend_access(display);
- intel_encoder_suspend_all(&xe->display);
+ intel_encoder_suspend_all(display);
}
intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
@@ -374,7 +354,7 @@ void xe_display_pm_suspend(struct xe_device *xe)
void xe_display_pm_shutdown(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -405,7 +385,7 @@ void xe_display_pm_shutdown(struct xe_device *xe)
void xe_display_pm_runtime_suspend(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -420,7 +400,7 @@ void xe_display_pm_runtime_suspend(struct xe_device *xe)
void xe_display_pm_suspend_late(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
bool s2idle = suspend_to_idle();
if (!xe->info.probe_display)
@@ -431,7 +411,7 @@ void xe_display_pm_suspend_late(struct xe_device *xe)
void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -449,7 +429,7 @@ void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
void xe_display_pm_shutdown_late(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -464,7 +444,7 @@ void xe_display_pm_shutdown_late(struct xe_device *xe)
void xe_display_pm_resume_early(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -474,7 +454,7 @@ void xe_display_pm_resume_early(struct xe_device *xe)
void xe_display_pm_resume(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -509,7 +489,7 @@ void xe_display_pm_resume(struct xe_device *xe)
void xe_display_pm_runtime_resume(struct xe_device *xe)
{
- struct intel_display *display = &xe->display;
+ struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
@@ -532,6 +512,17 @@ static void display_device_remove(struct drm_device *dev, void *arg)
intel_display_device_remove(display);
}
+/**
+ * xe_display_probe - probe display and create display struct
+ * @xe: XE device instance
+ *
+ * Initialize all fields used by the display part.
+ *
+ * TODO: once everything can be inside a single struct, make the struct opaque
+ * to the rest of xe and return it to be xe->display.
+ *
+ * Returns: 0 on success
+ */
int xe_display_probe(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -542,11 +533,15 @@ int xe_display_probe(struct xe_device *xe)
goto no_display;
display = intel_display_device_probe(pdev);
+ if (IS_ERR(display))
+ return PTR_ERR(display);
err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
if (err)
return err;
+ xe->display = display;
+
if (has_display(xe))
return 0;
diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index 46e14f8dee28..e533aa4750bc 100644
--- a/drivers/gpu/drm/xe/display/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
@@ -15,8 +15,6 @@ struct drm_driver;
bool xe_display_driver_probe_defer(struct pci_dev *pdev);
void xe_display_driver_set_hooks(struct drm_driver *driver);
-int xe_display_create(struct xe_device *xe);
-
int xe_display_probe(struct xe_device *xe);
int xe_display_init_early(struct xe_device *xe);
@@ -46,8 +44,6 @@ static inline int xe_display_driver_probe_defer(struct pci_dev *pdev) { return 0
static inline void xe_display_driver_set_hooks(struct drm_driver *driver) { }
static inline void xe_display_driver_remove(struct xe_device *xe) {}
-static inline int xe_display_create(struct xe_device *xe) { return 0; }
-
static inline int xe_display_probe(struct xe_device *xe) { return 0; }
static inline int xe_display_init_early(struct xe_device *xe) { return 0; }
diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.c b/drivers/gpu/drm/xe/display/xe_display_rpm.c
index 1955153aadba..3825376e98cc 100644
--- a/drivers/gpu/drm/xe/display/xe_display_rpm.c
+++ b/drivers/gpu/drm/xe/display/xe_display_rpm.c
@@ -1,13 +1,15 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2025 Intel Corporation */
+#include "intel_display_core.h"
#include "intel_display_rpm.h"
+#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_pm.h"
static struct xe_device *display_to_xe(struct intel_display *display)
{
- return container_of(display, struct xe_device, display);
+ return to_xe_device(display->drm);
}
struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c
index 2933ca97d673..68d1387d81a0 100644
--- a/drivers/gpu/drm/xe/display/xe_display_wa.c
+++ b/drivers/gpu/drm/xe/display/xe_display_wa.c
@@ -3,8 +3,8 @@
* Copyright © 2024 Intel Corporation
*/
+#include "intel_display_core.h"
#include "intel_display_wa.h"
-
#include "xe_device.h"
#include "xe_wa.h"
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index 55259969480b..c38fba18effe 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -6,6 +6,7 @@
#include <drm/ttm/ttm_bo.h>
#include "i915_vma.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -23,6 +24,7 @@ write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_
struct xe_device *xe = xe_bo_device(bo);
struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
u32 column, row;
+ u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]);
/* TODO: Maybe rewrite so we can traverse the bo addresses sequentially,
* by writing dpt/ggtt in a different order?
@@ -32,10 +34,9 @@ write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_
u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
for (row = 0; row < height; row++) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]);
+ u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE);
- iosys_map_wr(map, *dpt_ofs, u64, pte);
+ iosys_map_wr(map, *dpt_ofs, u64, pte | addr);
*dpt_ofs += 8;
src_idx -= src_stride;
}
@@ -55,17 +56,15 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs,
{
struct xe_device *xe = xe_bo_device(bo);
struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
- u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index)
- = ggtt->pt_ops->pte_encode_bo;
u32 column, row;
+ u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]);
for (row = 0; row < height; row++) {
u32 src_idx = src_stride * row + bo_ofs;
for (column = 0; column < width; column++) {
- iosys_map_wr(map, *dpt_ofs, u64,
- pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]));
+ u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE);
+ iosys_map_wr(map, *dpt_ofs, u64, pte | addr);
*dpt_ofs += 8;
src_idx++;
@@ -129,13 +128,13 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
return PTR_ERR(dpt);
if (view->type == I915_GTT_VIEW_NORMAL) {
+ u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]);
u32 x;
for (x = 0; x < size / XE_PAGE_SIZE; x++) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]);
+ u64 addr = xe_bo_addr(bo, x * XE_PAGE_SIZE, XE_PAGE_SIZE);
- iosys_map_wr(&dpt->vmap, x * 8, u64, pte);
+ iosys_map_wr(&dpt->vmap, x * 8, u64, pte | addr);
}
} else if (view->type == I915_GTT_VIEW_REMAPPED) {
const struct intel_remapped_info *remap_info = &view->remapped;
@@ -176,15 +175,15 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo
{
struct xe_device *xe = xe_bo_device(bo);
u32 column, row;
+ u64 pte = ggtt->pt_ops->pte_encode_flags(bo, xe->pat.idx[XE_CACHE_NONE]);
for (column = 0; column < width; column++) {
u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
for (row = 0; row < height; row++) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]);
+ u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE);
- ggtt->pt_ops->ggtt_set_pte(ggtt, *ggtt_ofs, pte);
+ ggtt->pt_ops->ggtt_set_pte(ggtt, *ggtt_ofs, pte | addr);
*ggtt_ofs += XE_PAGE_SIZE;
src_idx -= src_stride;
}
@@ -202,14 +201,15 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
struct drm_gem_object *obj = intel_fb_bo(&fb->base);
struct xe_bo *bo = gem_to_xe_bo(obj);
struct xe_device *xe = to_xe_device(fb->base.dev);
- struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
+ struct xe_tile *tile0 = xe_device_get_root_tile(xe);
+ struct xe_ggtt *ggtt = tile0->mem.ggtt;
u32 align;
int ret;
/* TODO: Consider sharing framebuffer mapping?
* embed i915_vma inside intel_framebuffer
*/
- xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_get_noresume(xe);
ret = mutex_lock_interruptible(&ggtt->lock);
if (ret)
goto out;
@@ -218,29 +218,22 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
align = max_t(u32, align, SZ_64K);
- if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) {
- vma->node = bo->ggtt_node[ggtt->tile->id];
+ if (bo->ggtt_node[tile0->id] && view->type == I915_GTT_VIEW_NORMAL) {
+ vma->node = bo->ggtt_node[tile0->id];
} else if (view->type == I915_GTT_VIEW_NORMAL) {
- u32 x, size = bo->ttm.base.size;
-
vma->node = xe_ggtt_node_init(ggtt);
if (IS_ERR(vma->node)) {
ret = PTR_ERR(vma->node);
goto out_unlock;
}
- ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0);
+ ret = xe_ggtt_node_insert_locked(vma->node, xe_bo_size(bo), align, 0);
if (ret) {
xe_ggtt_node_fini(vma->node);
goto out_unlock;
}
- for (x = 0; x < size; x += XE_PAGE_SIZE) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x,
- xe->pat.idx[XE_CACHE_NONE]);
-
- ggtt->pt_ops->ggtt_set_pte(ggtt, vma->node->base.start + x, pte);
- }
+ xe_ggtt_map_bo(ggtt, vma->node, bo, xe->pat.idx[XE_CACHE_NONE]);
} else {
u32 i, ggtt_ofs;
const struct intel_rotation_info *rot_info = &view->rotated;
@@ -274,7 +267,7 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
out_unlock:
mutex_unlock(&ggtt->lock);
out:
- xe_pm_runtime_put(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_put(xe);
return ret;
}
@@ -349,7 +342,7 @@ err:
static void __xe_unpin_fb_vma(struct i915_vma *vma)
{
- u8 tile_id = vma->node->ggtt->tile->id;
+ u8 tile_id = xe_device_get_root_tile(xe_bo_device(vma->bo))->id;
if (!refcount_dec_and_test(&vma->ref))
return;
@@ -390,6 +383,7 @@ static bool reuse_vma(struct intel_plane_state *new_plane_state,
{
struct intel_framebuffer *fb = to_intel_framebuffer(new_plane_state->hw.fb);
struct xe_device *xe = to_xe_device(fb->base.dev);
+ struct intel_display *display = xe->display;
struct i915_vma *vma;
if (old_plane_state->hw.fb == new_plane_state->hw.fb &&
@@ -400,8 +394,8 @@ static bool reuse_vma(struct intel_plane_state *new_plane_state,
goto found;
}
- if (fb == intel_fbdev_framebuffer(xe->display.fbdev.fbdev)) {
- vma = intel_fbdev_vma_pointer(xe->display.fbdev.fbdev);
+ if (fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) {
+ vma = intel_fbdev_vma_pointer(display->fbdev.fbdev);
if (vma)
goto found;
}
@@ -464,3 +458,8 @@ u64 intel_dpt_offset(struct i915_vma *dpt_vma)
{
return 0;
}
+
+void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map)
+{
+ *map = vma->bo->vmap;
+}
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index b35a6f201d4a..30f1073141fc 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -85,7 +85,7 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
cmd_in = xe_bo_ggtt_addr(bo);
cmd_out = cmd_in + PAGE_SIZE;
- xe_map_memset(xe, &bo->vmap, 0, 0, bo->size);
+ xe_map_memset(xe, &bo->vmap, 0, 0, xe_bo_size(bo));
gsc_context->hdcp_bo = bo;
gsc_context->hdcp_cmd_in = cmd_in;
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 6502b8274173..dcbc4b2d3fd9 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -10,14 +10,15 @@
#include "xe_ggtt.h"
#include "xe_mmio.h"
-#include "i915_reg.h"
-#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_display.h"
+#include "intel_display_core.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
+#include "intel_plane.h"
#include "intel_plane_initial.h"
#include "xe_bo.h"
#include "xe_wa.h"
@@ -87,12 +88,8 @@ initial_plane_bo(struct xe_device *xe,
base = round_down(plane_config->base, page_size);
if (IS_DGFX(xe)) {
- u64 __iomem *gte = tile0->mem.ggtt->gsm;
- u64 pte;
+ u64 pte = xe_ggtt_read_pte(tile0->mem.ggtt, base);
- gte += base / XE_PAGE_SIZE;
-
- pte = ioread64(gte);
if (!(pte & XE_GGTT_PTE_DM)) {
drm_err(&xe->drm,
"Initial plane programming missing DM bit\n");
@@ -187,7 +184,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
return false;
if (intel_framebuffer_init(to_intel_framebuffer(fb),
- &bo->ttm.base, &mode_cmd)) {
+ &bo->ttm.base, fb->format, &mode_cmd)) {
drm_dbg_kms(&xe->drm, "intel fb init failed\n");
goto err_bo;
}
diff --git a/drivers/gpu/drm/xe/display/xe_tdf.c b/drivers/gpu/drm/xe/display/xe_tdf.c
index 2a7fccbeb1d5..78bda4c47874 100644
--- a/drivers/gpu/drm/xe/display/xe_tdf.c
+++ b/drivers/gpu/drm/xe/display/xe_tdf.c
@@ -3,9 +3,9 @@
* Copyright © 2024 Intel Corporation
*/
-#include "xe_device.h"
-#include "intel_display_types.h"
+#include "intel_display_core.h"
#include "intel_tdf.h"
+#include "xe_device.h"
void intel_td_flush(struct intel_display *display)
{
diff --git a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
index 7702364b65f1..9b66cc972a63 100644
--- a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
@@ -16,6 +16,10 @@
#define MTL_GSC_HECI1_BASE 0x00116000
#define MTL_GSC_HECI2_BASE 0x00117000
+#define DG1_GSC_HECI2_BASE 0x00259000
+#define PVC_GSC_HECI2_BASE 0x00285000
+#define DG2_GSC_HECI2_BASE 0x00374000
+
#define HECI_H_CSR(base) XE_REG((base) + 0x4)
#define HECI_H_CSR_IE REG_BIT(0)
#define HECI_H_CSR_IS REG_BIT(1)
diff --git a/drivers/gpu/drm/xe/regs/xe_i2c_regs.h b/drivers/gpu/drm/xe/regs/xe_i2c_regs.h
new file mode 100644
index 000000000000..af781c8e4a80
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_i2c_regs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef _XE_I2C_REGS_H_
+#define _XE_I2C_REGS_H_
+
+#include <linux/pci_regs.h>
+
+#include "xe_reg_defs.h"
+#include "xe_regs.h"
+
+#define I2C_BRIDGE_OFFSET (SOC_BASE + 0xd9000)
+#define I2C_CONFIG_SPACE_OFFSET (SOC_BASE + 0xf6000)
+#define I2C_MEM_SPACE_OFFSET (SOC_BASE + 0xf7400)
+
+#define REG_SG_REMAP_ADDR_PREFIX XE_REG(SOC_BASE + 0x0164)
+#define REG_SG_REMAP_ADDR_POSTFIX XE_REG(SOC_BASE + 0x0168)
+
+#define I2C_CONFIG_CMD XE_REG(I2C_CONFIG_SPACE_OFFSET + PCI_COMMAND)
+#define I2C_CONFIG_PMCSR XE_REG(I2C_CONFIG_SPACE_OFFSET + 0x84)
+
+#endif /* _XE_I2C_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/regs/xe_irq_regs.h b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
index f0ecfcac4003..13635e4331d4 100644
--- a/drivers/gpu/drm/xe/regs/xe_irq_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
@@ -19,6 +19,7 @@
#define MASTER_IRQ REG_BIT(31)
#define GU_MISC_IRQ REG_BIT(29)
#define DISPLAY_IRQ REG_BIT(16)
+#define I2C_IRQ REG_BIT(12)
#define GT_DW_IRQ(x) REG_BIT(x)
/*
diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
index 994af591a2e8..1b101edb838b 100644
--- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
+++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
@@ -12,9 +12,13 @@
#define CTX_RING_START (0x08 + 1)
#define CTX_RING_CTL (0x0a + 1)
#define CTX_BB_PER_CTX_PTR (0x12 + 1)
+#define CTX_CS_INDIRECT_CTX (0x14 + 1)
+#define CTX_CS_INDIRECT_CTX_OFFSET (0x16 + 1)
#define CTX_TIMESTAMP (0x22 + 1)
#define CTX_TIMESTAMP_UDW (0x24 + 1)
#define CTX_INDIRECT_RING_STATE (0x26 + 1)
+#define CTX_ACC_CTR_THOLD (0x2a + 1)
+#define CTX_ASID (0x2e + 1)
#define CTX_PDP0_UDW (0x30 + 1)
#define CTX_PDP0_LDW (0x32 + 1)
@@ -36,4 +40,7 @@
#define INDIRECT_CTX_RING_START_UDW (0x08 + 1)
#define INDIRECT_CTX_RING_CTL (0x0a + 1)
+#define CTX_INDIRECT_CTX_OFFSET_MASK REG_GENMASK(15, 6)
+#define CTX_INDIRECT_CTX_OFFSET_DEFAULT REG_FIELD_PREP(CTX_INDIRECT_CTX_OFFSET_MASK, 0xd)
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
index a79ad2da070c..e693a50706f8 100644
--- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
@@ -97,4 +97,7 @@
#define OAM_STATUS(base) XE_REG((base) + OAM_STATUS_OFFSET)
#define OAM_MMIO_TRG(base) XE_REG((base) + OAM_MMIO_TRG_OFFSET)
+#define OAM_COMPRESSION_T3_CONTROL XE_REG(0x1c2e00)
+#define OAM_LAT_MEASURE_ENABLE REG_BIT(4)
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
index c556a04670ee..fb097607b86c 100644
--- a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
@@ -18,12 +18,10 @@
#define PVC_GT0_PLATFORM_ENERGY_STATUS XE_REG(0x28106c)
#define PVC_GT0_PACKAGE_POWER_SKU XE_REG(0x281080)
-#define BMG_PACKAGE_ENERGY_STATUS XE_REG(0x138120)
#define BMG_FAN_1_SPEED XE_REG(0x138140)
#define BMG_FAN_2_SPEED XE_REG(0x138170)
#define BMG_FAN_3_SPEED XE_REG(0x1381a0)
#define BMG_VRAM_TEMPERATURE XE_REG(0x1382c0)
#define BMG_PACKAGE_TEMPERATURE XE_REG(0x138434)
-#define BMG_PLATFORM_ENERGY_STATUS XE_REG(0x138458)
#endif /* _XE_PCODE_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/regs/xe_pmt.h b/drivers/gpu/drm/xe/regs/xe_pmt.h
index f45abcd96ba8..2995d72c3f78 100644
--- a/drivers/gpu/drm/xe/regs/xe_pmt.h
+++ b/drivers/gpu/drm/xe/regs/xe_pmt.h
@@ -5,11 +5,16 @@
#ifndef _XE_PMT_H_
#define _XE_PMT_H_
-#define SOC_BASE 0x280000
+#include "xe_regs.h"
#define BMG_PMT_BASE_OFFSET 0xDB000
#define BMG_DISCOVERY_OFFSET (SOC_BASE + BMG_PMT_BASE_OFFSET)
+#define PUNIT_TELEMETRY_GUID XE_REG(BMG_DISCOVERY_OFFSET + 0x4)
+#define BMG_ENERGY_STATUS_PMT_OFFSET (0x30)
+#define ENERGY_PKG REG_GENMASK64(31, 0)
+#define ENERGY_CARD REG_GENMASK64(63, 32)
+
#define BMG_TELEMETRY_BASE_OFFSET 0xE0000
#define BMG_TELEMETRY_OFFSET (SOC_BASE + BMG_TELEMETRY_BASE_OFFSET)
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
index 3abb17d2ca33..1926b4044314 100644
--- a/drivers/gpu/drm/xe/regs/xe_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -7,6 +7,8 @@
#include "regs/xe_reg_defs.h"
+#define SOC_BASE 0x280000
+
#define GU_CNTL_PROTECTED XE_REG(0x10100C)
#define DRIVERINT_FLR_DIS REG_BIT(31)
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 378dcd0fb414..bb469096d072 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -106,7 +106,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
}
/* Check last CCS value, or at least last value in page. */
- offset = xe_device_ccs_bytes(tile_to_xe(tile), bo->size);
+ offset = xe_device_ccs_bytes(tile_to_xe(tile), xe_bo_size(bo));
offset = min_t(u32, offset, PAGE_SIZE) / sizeof(u64) - 1;
if (cpu_map[offset] != get_val) {
KUNIT_FAIL(test,
@@ -514,9 +514,9 @@ static int shrink_test_run_device(struct xe_device *xe)
* other way around, they may not be subject to swapping...
*/
if (alloced < purgeable) {
- xe_ttm_tt_account_subtract(&xe_tt->ttm);
+ xe_ttm_tt_account_subtract(xe, &xe_tt->ttm);
xe_tt->purgeable = true;
- xe_ttm_tt_account_add(&xe_tt->ttm);
+ xe_ttm_tt_account_add(xe, &xe_tt->ttm);
bo->ttm.priority = 0;
spin_lock(&bo->ttm.bdev->lru_lock);
ttm_bo_move_to_lru_tail(&bo->ttm);
diff --git a/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c
deleted file mode 100644
index b683585db852..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c
+++ /dev/null
@@ -1,232 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 AND MIT
-/*
- * Copyright © 2024 Intel Corporation
- */
-
-#include <kunit/test.h>
-
-#include "xe_device.h"
-#include "xe_kunit_helpers.h"
-#include "xe_pci_test.h"
-
-static int pf_service_test_init(struct kunit *test)
-{
- struct xe_pci_fake_data fake = {
- .sriov_mode = XE_SRIOV_MODE_PF,
- .platform = XE_TIGERLAKE, /* some random platform */
- .subplatform = XE_SUBPLATFORM_NONE,
- };
- struct xe_device *xe;
- struct xe_gt *gt;
-
- test->priv = &fake;
- xe_kunit_helper_xe_device_test_init(test);
-
- xe = test->priv;
- KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0);
-
- gt = xe_device_get_gt(xe, 0);
- pf_init_versions(gt);
-
- /*
- * sanity check:
- * - all supported platforms VF/PF ABI versions must be defined
- * - base version can't be newer than latest
- */
- KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.base.major);
- KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.major,
- gt->sriov.pf.service.version.latest.major);
- if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major)
- KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.minor,
- gt->sriov.pf.service.version.latest.minor);
-
- test->priv = gt;
- return 0;
-}
-
-static void pf_negotiate_any(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt, VF2PF_HANDSHAKE_MAJOR_ANY,
- VF2PF_HANDSHAKE_MINOR_ANY,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
-}
-
-static void pf_negotiate_base_match(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.base.major,
- gt->sriov.pf.service.version.base.minor,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.base.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.base.minor);
-}
-
-static void pf_negotiate_base_newer(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.base.major,
- gt->sriov.pf.service.version.base.minor + 1,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.base.major);
- KUNIT_ASSERT_GE(test, minor, gt->sriov.pf.service.version.base.minor);
- if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major)
- KUNIT_ASSERT_LE(test, minor, gt->sriov.pf.service.version.latest.minor);
- else
- KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
-}
-
-static void pf_negotiate_base_next(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.base.major + 1, 0,
- &major, &minor));
- KUNIT_ASSERT_GE(test, major, gt->sriov.pf.service.version.base.major);
- KUNIT_ASSERT_LE(test, major, gt->sriov.pf.service.version.latest.major);
- if (major == gt->sriov.pf.service.version.latest.major)
- KUNIT_ASSERT_LE(test, minor, gt->sriov.pf.service.version.latest.minor);
- else
- KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
-}
-
-static void pf_negotiate_base_older(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- if (!gt->sriov.pf.service.version.base.minor)
- kunit_skip(test, "no older minor\n");
-
- KUNIT_ASSERT_NE(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.base.major,
- gt->sriov.pf.service.version.base.minor - 1,
- &major, &minor));
-}
-
-static void pf_negotiate_base_prev(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_NE(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.base.major - 1, 1,
- &major, &minor));
-}
-
-static void pf_negotiate_latest_match(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.latest.major,
- gt->sriov.pf.service.version.latest.minor,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
-}
-
-static void pf_negotiate_latest_newer(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.latest.major,
- gt->sriov.pf.service.version.latest.minor + 1,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
-}
-
-static void pf_negotiate_latest_next(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.latest.major + 1, 0,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
-}
-
-static void pf_negotiate_latest_older(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- if (!gt->sriov.pf.service.version.latest.minor)
- kunit_skip(test, "no older minor\n");
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.latest.major,
- gt->sriov.pf.service.version.latest.minor - 1,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
- KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor - 1);
-}
-
-static void pf_negotiate_latest_prev(struct kunit *test)
-{
- struct xe_gt *gt = test->priv;
- u32 major, minor;
-
- if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major)
- kunit_skip(test, "no prev major");
-
- KUNIT_ASSERT_EQ(test, 0,
- pf_negotiate_version(gt,
- gt->sriov.pf.service.version.latest.major - 1,
- gt->sriov.pf.service.version.base.minor + 1,
- &major, &minor));
- KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major - 1);
- KUNIT_ASSERT_GE(test, major, gt->sriov.pf.service.version.base.major);
-}
-
-static struct kunit_case pf_service_test_cases[] = {
- KUNIT_CASE(pf_negotiate_any),
- KUNIT_CASE(pf_negotiate_base_match),
- KUNIT_CASE(pf_negotiate_base_newer),
- KUNIT_CASE(pf_negotiate_base_next),
- KUNIT_CASE(pf_negotiate_base_older),
- KUNIT_CASE(pf_negotiate_base_prev),
- KUNIT_CASE(pf_negotiate_latest_match),
- KUNIT_CASE(pf_negotiate_latest_newer),
- KUNIT_CASE(pf_negotiate_latest_next),
- KUNIT_CASE(pf_negotiate_latest_older),
- KUNIT_CASE(pf_negotiate_latest_prev),
- {}
-};
-
-static struct kunit_suite pf_service_suite = {
- .name = "pf_service",
- .test_cases = pf_service_test_cases,
- .init = pf_service_test_init,
-};
-
-kunit_test_suite(pf_service_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
index 6faffcd74869..d266882adc0e 100644
--- a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
+++ b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
@@ -32,7 +32,7 @@ static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device *
bo->tile = tile;
bo->ttm.bdev = &xe->ttm;
- bo->size = size;
+ bo->ttm.base.size = size;
iosys_map_set_vaddr(&bo->vmap, buf);
if (flags & XE_BO_FLAG_GGTT) {
@@ -42,10 +42,8 @@ static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device *
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo->ggtt_node[tile->id]);
KUNIT_ASSERT_EQ(test, 0,
- drm_mm_insert_node_in_range(&ggtt->mm,
- &bo->ggtt_node[tile->id]->base,
- bo->size, SZ_4K,
- 0, 0, U64_MAX, 0));
+ xe_ggtt_node_insert(bo->ggtt_node[tile->id],
+ xe_bo_size(bo), SZ_4K));
}
return bo;
@@ -67,8 +65,9 @@ static int guc_buf_test_init(struct kunit *test)
ggtt = xe_device_get_root_tile(test->priv)->mem.ggtt;
guc = &xe_device_get_gt(test->priv, 0)->uc.guc;
- drm_mm_init(&ggtt->mm, DUT_GGTT_START, DUT_GGTT_SIZE);
- mutex_init(&ggtt->lock);
+ KUNIT_ASSERT_EQ(test, 0,
+ xe_ggtt_init_kunit(ggtt, DUT_GGTT_START,
+ DUT_GGTT_START + DUT_GGTT_SIZE));
kunit_activate_static_stub(test, xe_managed_bo_create_pin_map,
replacement_xe_managed_bo_create_pin_map);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index 4a65e3103f77..edd1e701aa1c 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -74,13 +74,13 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
{
struct xe_device *xe = tile_to_xe(m->tile);
u64 retval, expected = 0;
- bool big = bo->size >= SZ_2M;
+ bool big = xe_bo_size(bo) >= SZ_2M;
struct dma_fence *fence;
const char *str = big ? "Copying big bo" : "Copying small bo";
int err;
struct xe_bo *remote = xe_bo_create_locked(xe, m->tile, NULL,
- bo->size,
+ xe_bo_size(bo),
ttm_bo_type_kernel,
region |
XE_BO_FLAG_NEEDS_CPU_ACCESS |
@@ -105,7 +105,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
goto out_unlock;
}
- xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
+ xe_map_memset(xe, &remote->vmap, 0, 0xd0, xe_bo_size(remote));
fence = xe_migrate_clear(m, remote, remote->ttm.resource,
XE_MIGRATE_CLEAR_FLAG_FULL);
if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" :
@@ -113,15 +113,15 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
retval = xe_map_rd(xe, &remote->vmap, 0, u64);
check(retval, expected, "remote first offset should be cleared",
test);
- retval = xe_map_rd(xe, &remote->vmap, remote->size - 8, u64);
+ retval = xe_map_rd(xe, &remote->vmap, xe_bo_size(remote) - 8, u64);
check(retval, expected, "remote last offset should be cleared",
test);
}
dma_fence_put(fence);
/* Try to copy 0xc0 from remote to vram with 2MB or 64KiB/4KiB pages */
- xe_map_memset(xe, &remote->vmap, 0, 0xc0, remote->size);
- xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size);
+ xe_map_memset(xe, &remote->vmap, 0, 0xc0, xe_bo_size(remote));
+ xe_map_memset(xe, &bo->vmap, 0, 0xd0, xe_bo_size(bo));
expected = 0xc0c0c0c0c0c0c0c0;
fence = xe_migrate_copy(m, remote, bo, remote->ttm.resource,
@@ -131,15 +131,15 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
retval = xe_map_rd(xe, &bo->vmap, 0, u64);
check(retval, expected,
"remote -> vram bo first offset should be copied", test);
- retval = xe_map_rd(xe, &bo->vmap, bo->size - 8, u64);
+ retval = xe_map_rd(xe, &bo->vmap, xe_bo_size(bo) - 8, u64);
check(retval, expected,
"remote -> vram bo offset should be copied", test);
}
dma_fence_put(fence);
/* And other way around.. slightly hacky.. */
- xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
- xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size);
+ xe_map_memset(xe, &remote->vmap, 0, 0xd0, xe_bo_size(remote));
+ xe_map_memset(xe, &bo->vmap, 0, 0xc0, xe_bo_size(bo));
fence = xe_migrate_copy(m, bo, remote, bo->ttm.resource,
remote->ttm.resource, false);
@@ -148,7 +148,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
retval = xe_map_rd(xe, &remote->vmap, 0, u64);
check(retval, expected,
"vram -> remote bo first offset should be copied", test);
- retval = xe_map_rd(xe, &remote->vmap, bo->size - 8, u64);
+ retval = xe_map_rd(xe, &remote->vmap, xe_bo_size(bo) - 8, u64);
check(retval, expected,
"vram -> remote bo last offset should be copied", test);
}
@@ -245,9 +245,9 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
if (m->q->vm->flags & XE_VM_FLAG_64K)
expected |= XE_PTE_PS64;
if (xe_bo_is_vram(pt))
- xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
+ xe_res_first(pt->ttm.resource, 0, xe_bo_size(pt), &src_it);
else
- xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it);
+ xe_res_first_sg(xe_bo_sg(pt), 0, xe_bo_size(pt), &src_it);
emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false,
&src_it, XE_PAGE_SIZE, pt->ttm.resource);
@@ -276,7 +276,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
/* Clear a small bo */
kunit_info(test, "Clearing small buffer object\n");
- xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size);
+ xe_map_memset(xe, &tiny->vmap, 0, 0x22, xe_bo_size(tiny));
expected = 0;
fence = xe_migrate_clear(m, tiny, tiny->ttm.resource,
XE_MIGRATE_CLEAR_FLAG_FULL);
@@ -286,7 +286,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
dma_fence_put(fence);
retval = xe_map_rd(xe, &tiny->vmap, 0, u32);
check(retval, expected, "Command clear small first value", test);
- retval = xe_map_rd(xe, &tiny->vmap, tiny->size - 4, u32);
+ retval = xe_map_rd(xe, &tiny->vmap, xe_bo_size(tiny) - 4, u32);
check(retval, expected, "Command clear small last value", test);
kunit_info(test, "Copying small buffer object to system\n");
@@ -298,7 +298,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
/* Clear a big bo */
kunit_info(test, "Clearing big buffer object\n");
- xe_map_memset(xe, &big->vmap, 0, 0x11, big->size);
+ xe_map_memset(xe, &big->vmap, 0, 0x11, xe_bo_size(big));
expected = 0;
fence = xe_migrate_clear(m, big, big->ttm.resource,
XE_MIGRATE_CLEAR_FLAG_FULL);
@@ -308,7 +308,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
dma_fence_put(fence);
retval = xe_map_rd(xe, &big->vmap, 0, u32);
check(retval, expected, "Command clear big first value", test);
- retval = xe_map_rd(xe, &big->vmap, big->size - 4, u32);
+ retval = xe_map_rd(xe, &big->vmap, xe_bo_size(big) - 4, u32);
check(retval, expected, "Command clear big last value", test);
kunit_info(test, "Copying big buffer object to system\n");
@@ -370,7 +370,7 @@ static struct dma_fence *blt_copy(struct xe_tile *tile,
struct xe_migrate *m = tile->migrate;
struct xe_device *xe = gt_to_xe(gt);
struct dma_fence *fence = NULL;
- u64 size = src_bo->size;
+ u64 size = xe_bo_size(src_bo);
struct xe_res_cursor src_it, dst_it;
struct ttm_resource *src = src_bo->ttm.resource, *dst = dst_bo->ttm.resource;
u64 src_L0_ofs, dst_L0_ofs;
@@ -498,7 +498,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
long ret;
expected = 0xd0d0d0d0d0d0d0d0;
- xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
+ xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, xe_bo_size(sys_bo));
fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
@@ -523,7 +523,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
check(retval, expected, "Clear evicted vram data first value", test);
- retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &vram_bo->vmap, xe_bo_size(vram_bo) - 8, u64);
check(retval, expected, "Clear evicted vram data last value", test);
fence = blt_copy(tile, vram_bo, ccs_bo,
@@ -532,7 +532,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
check(retval, 0, "Clear ccs data first value", test);
- retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &ccs_bo->vmap, xe_bo_size(ccs_bo) - 8, u64);
check(retval, 0, "Clear ccs data last value", test);
}
dma_fence_put(fence);
@@ -562,7 +562,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
check(retval, expected, "Restored value must be equal to initial value", test);
- retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &vram_bo->vmap, xe_bo_size(vram_bo) - 8, u64);
check(retval, expected, "Restored value must be equal to initial value", test);
fence = blt_copy(tile, vram_bo, ccs_bo,
@@ -570,7 +570,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
check(retval, 0, "Clear ccs data first value", test);
- retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &ccs_bo->vmap, xe_bo_size(ccs_bo) - 8, u64);
check(retval, 0, "Clear ccs data last value", test);
}
dma_fence_put(fence);
@@ -583,7 +583,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile,
u64 expected, retval;
expected = 0xd0d0d0d0d0d0d0d0;
- xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
+ xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, xe_bo_size(sys_bo));
fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
@@ -597,7 +597,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile,
if (!sanity_fence_failed(xe, fence, "Blit copy from vram to sysmem", test)) {
retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
check(retval, expected, "Decompressed value must be equal to initial value", test);
- retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64);
check(retval, expected, "Decompressed value must be equal to initial value", test);
}
dma_fence_put(fence);
@@ -615,7 +615,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile,
if (!sanity_fence_failed(xe, fence, "Clear main buffer data", test)) {
retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
check(retval, expected, "Clear main buffer first value", test);
- retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64);
check(retval, expected, "Clear main buffer last value", test);
}
dma_fence_put(fence);
@@ -625,7 +625,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile,
if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
check(retval, expected, "Clear ccs data first value", test);
- retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64);
check(retval, expected, "Clear ccs data last value", test);
}
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index 1d3e2e50c355..9c715e59f030 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -12,49 +12,79 @@
#include <kunit/test-bug.h>
#include <kunit/visibility.h>
+static void xe_ip_kunit_desc(const struct xe_ip *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%u.%02u %s",
+ param->verx100 / 100, param->verx100 % 100, param->name);
+}
+
+KUNIT_ARRAY_PARAM(graphics_ip, graphics_ips, xe_ip_kunit_desc);
+KUNIT_ARRAY_PARAM(media_ip, media_ips, xe_ip_kunit_desc);
+
+static void xe_pci_id_kunit_desc(const struct pci_device_id *param, char *desc)
+{
+ const struct xe_device_desc *dev_desc =
+ (const struct xe_device_desc *)param->driver_data;
+
+ if (dev_desc)
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "0x%X (%s)",
+ param->device, dev_desc->platform_name);
+}
+
+KUNIT_ARRAY_PARAM(pci_id, pciidlist, xe_pci_id_kunit_desc);
+
/**
- * xe_call_for_each_graphics_ip - Iterate over all recognized graphics IPs
- * @xe_fn: Function to call for each device.
+ * xe_pci_graphics_ip_gen_param - Generate graphics struct xe_ip parameters
+ * @prev: the pointer to the previous parameter to iterate from or NULL
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
*
- * This function iterates over the descriptors for all graphics IPs recognized
- * by the driver and calls @xe_fn: for each one of them.
+ * This function prepares struct xe_ip parameter.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
+ *
+ * Return: pointer to the next parameter or NULL if no more parameters
*/
-void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn)
+const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc)
{
- const struct xe_graphics_desc *desc, *last = NULL;
-
- for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) {
- desc = graphics_ips[i].desc;
- if (desc == last)
- continue;
-
- xe_fn(desc);
- last = desc;
- }
+ return graphics_ip_gen_params(prev, desc);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_graphics_ip);
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param);
/**
- * xe_call_for_each_media_ip - Iterate over all recognized media IPs
- * @xe_fn: Function to call for each device.
+ * xe_pci_media_ip_gen_param - Generate media struct xe_ip parameters
+ * @prev: the pointer to the previous parameter to iterate from or NULL
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
+ *
+ * This function prepares struct xe_ip parameter.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
*
- * This function iterates over the descriptors for all media IPs recognized
- * by the driver and calls @xe_fn: for each one of them.
+ * Return: pointer to the next parameter or NULL if no more parameters
*/
-void xe_call_for_each_media_ip(xe_media_fn xe_fn)
+const void *xe_pci_media_ip_gen_param(const void *prev, char *desc)
{
- const struct xe_media_desc *desc, *last = NULL;
+ return media_ip_gen_params(prev, desc);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param);
- for (int i = 0; i < ARRAY_SIZE(media_ips); i++) {
- desc = media_ips[i].desc;
- if (desc == last)
- continue;
+/**
+ * xe_pci_id_gen_param - Generate struct pci_device_id parameters
+ * @prev: the pointer to the previous parameter to iterate from or NULL
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
+ *
+ * This function prepares struct pci_device_id parameter.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
+ *
+ * Return: pointer to the next parameter or NULL if no more parameters
+ */
+const void *xe_pci_id_gen_param(const void *prev, char *desc)
+{
+ const struct pci_device_id *pci = pci_id_gen_params(prev, desc);
- xe_fn(desc);
- last = desc;
- }
+ return pci->driver_data ? pci : NULL;
}
-EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_media_ip);
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_id_gen_param);
static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
u32 *ver, u32 *revid)
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c
index 744a37583d2d..37b344df2dc3 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c
@@ -14,9 +14,10 @@
#include "xe_pci_test.h"
#include "xe_pci_types.h"
-static void check_graphics_ip(const struct xe_graphics_desc *graphics)
+static void check_graphics_ip(struct kunit *test)
{
- struct kunit *test = kunit_get_current_test();
+ const struct xe_ip *param = test->param_value;
+ const struct xe_graphics_desc *graphics = param->desc;
u64 mask = graphics->hw_engine_mask;
/* RCS, CCS, and BCS engines are allowed on the graphics IP */
@@ -28,9 +29,10 @@ static void check_graphics_ip(const struct xe_graphics_desc *graphics)
KUNIT_ASSERT_EQ(test, mask, 0);
}
-static void check_media_ip(const struct xe_media_desc *media)
+static void check_media_ip(struct kunit *test)
{
- struct kunit *test = kunit_get_current_test();
+ const struct xe_ip *param = test->param_value;
+ const struct xe_media_desc *media = param->desc;
u64 mask = media->hw_engine_mask;
/* VCS, VECS and GSCCS engines are allowed on the media IP */
@@ -42,19 +44,21 @@ static void check_media_ip(const struct xe_media_desc *media)
KUNIT_ASSERT_EQ(test, mask, 0);
}
-static void xe_gmdid_graphics_ip(struct kunit *test)
+static void check_platform_gt_count(struct kunit *test)
{
- xe_call_for_each_graphics_ip(check_graphics_ip);
-}
+ const struct pci_device_id *pci = test->param_value;
+ const struct xe_device_desc *desc =
+ (const struct xe_device_desc *)pci->driver_data;
+ int max_gt = desc->max_gt_per_tile;
-static void xe_gmdid_media_ip(struct kunit *test)
-{
- xe_call_for_each_media_ip(check_media_ip);
+ KUNIT_ASSERT_GT(test, max_gt, 0);
+ KUNIT_ASSERT_LE(test, max_gt, XE_MAX_GT_PER_TILE);
}
static struct kunit_case xe_pci_tests[] = {
- KUNIT_CASE(xe_gmdid_graphics_ip),
- KUNIT_CASE(xe_gmdid_media_ip),
+ KUNIT_CASE_PARAM(check_graphics_ip, xe_pci_graphics_ip_gen_param),
+ KUNIT_CASE_PARAM(check_media_ip, xe_pci_media_ip_gen_param),
+ KUNIT_CASE_PARAM(check_platform_gt_count, xe_pci_id_gen_param),
{}
};
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h
index ede46800aff1..ce4d2b86b778 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h
@@ -12,15 +12,6 @@
#include "xe_sriov_types.h"
struct xe_device;
-struct xe_graphics_desc;
-struct xe_media_desc;
-
-typedef int (*xe_device_fn)(struct xe_device *);
-typedef void (*xe_graphics_fn)(const struct xe_graphics_desc *);
-typedef void (*xe_media_fn)(const struct xe_media_desc *);
-
-void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn);
-void xe_call_for_each_media_ip(xe_media_fn xe_fn);
struct xe_pci_fake_data {
enum xe_sriov_mode sriov_mode;
@@ -34,6 +25,9 @@ struct xe_pci_fake_data {
int xe_pci_fake_device_init(struct xe_device *xe);
+const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc);
+const void *xe_pci_media_ip_gen_param(const void *prev, char *desc);
+const void *xe_pci_id_gen_param(const void *prev, char *desc);
const void *xe_pci_live_device_gen_param(const void *prev, char *desc);
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c b/drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c
new file mode 100644
index 000000000000..ba95e29b597d
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2024-2025 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+#include "xe_device.h"
+#include "xe_kunit_helpers.h"
+#include "xe_pci_test.h"
+
+static int pf_service_test_init(struct kunit *test)
+{
+ struct xe_pci_fake_data fake = {
+ .sriov_mode = XE_SRIOV_MODE_PF,
+ .platform = XE_TIGERLAKE, /* some random platform */
+ .subplatform = XE_SUBPLATFORM_NONE,
+ };
+ struct xe_device *xe;
+
+ test->priv = &fake;
+ xe_kunit_helper_xe_device_test_init(test);
+
+ xe = test->priv;
+ KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0);
+
+ xe_sriov_pf_service_init(xe);
+ /*
+ * sanity check:
+ * - all supported platforms VF/PF ABI versions must be defined
+ * - base version can't be newer than latest
+ */
+ KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.major,
+ xe->sriov.pf.service.version.latest.major);
+ if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major)
+ KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.minor,
+ xe->sriov.pf.service.version.latest.minor);
+ return 0;
+}
+
+static void pf_negotiate_any(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe, VF2PF_HANDSHAKE_MAJOR_ANY,
+ VF2PF_HANDSHAKE_MINOR_ANY,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_base_match(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.base.major,
+ xe->sriov.pf.service.version.base.minor,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.base.minor);
+}
+
+static void pf_negotiate_base_newer(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.base.major,
+ xe->sriov.pf.service.version.base.minor + 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_GE(test, minor, xe->sriov.pf.service.version.base.minor);
+ if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major)
+ KUNIT_ASSERT_LE(test, minor, xe->sriov.pf.service.version.latest.minor);
+ else
+ KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
+}
+
+static void pf_negotiate_base_next(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.base.major + 1, 0,
+ &major, &minor));
+ KUNIT_ASSERT_GE(test, major, xe->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_LE(test, major, xe->sriov.pf.service.version.latest.major);
+ if (major == xe->sriov.pf.service.version.latest.major)
+ KUNIT_ASSERT_LE(test, minor, xe->sriov.pf.service.version.latest.minor);
+ else
+ KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
+}
+
+static void pf_negotiate_base_older(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ if (!xe->sriov.pf.service.version.base.minor)
+ kunit_skip(test, "no older minor\n");
+
+ KUNIT_ASSERT_NE(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.base.major,
+ xe->sriov.pf.service.version.base.minor - 1,
+ &major, &minor));
+}
+
+static void pf_negotiate_base_prev(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_NE(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.base.major - 1, 1,
+ &major, &minor));
+}
+
+static void pf_negotiate_latest_match(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.latest.major,
+ xe->sriov.pf.service.version.latest.minor,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_latest_newer(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.latest.major,
+ xe->sriov.pf.service.version.latest.minor + 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_latest_next(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.latest.major + 1, 0,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_latest_older(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ if (!xe->sriov.pf.service.version.latest.minor)
+ kunit_skip(test, "no older minor\n");
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.latest.major,
+ xe->sriov.pf.service.version.latest.minor - 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor - 1);
+}
+
+static void pf_negotiate_latest_prev(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ u32 major, minor;
+
+ if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major)
+ kunit_skip(test, "no prev major");
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(xe,
+ xe->sriov.pf.service.version.latest.major - 1,
+ xe->sriov.pf.service.version.base.minor + 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major - 1);
+ KUNIT_ASSERT_GE(test, major, xe->sriov.pf.service.version.base.major);
+}
+
+static struct kunit_case pf_service_test_cases[] = {
+ KUNIT_CASE(pf_negotiate_any),
+ KUNIT_CASE(pf_negotiate_base_match),
+ KUNIT_CASE(pf_negotiate_base_newer),
+ KUNIT_CASE(pf_negotiate_base_next),
+ KUNIT_CASE(pf_negotiate_base_older),
+ KUNIT_CASE(pf_negotiate_base_prev),
+ KUNIT_CASE(pf_negotiate_latest_match),
+ KUNIT_CASE(pf_negotiate_latest_newer),
+ KUNIT_CASE(pf_negotiate_latest_next),
+ KUNIT_CASE(pf_negotiate_latest_older),
+ KUNIT_CASE(pf_negotiate_latest_prev),
+ {}
+};
+
+static struct kunit_suite pf_service_suite = {
+ .name = "pf_service",
+ .test_cases = pf_service_test_cases,
+ .init = pf_service_test_init,
+};
+
+kunit_test_suite(pf_service_suite);
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
index 9570672fce33..5ce0e26822f2 100644
--- a/drivers/gpu/drm/xe/xe_bb.c
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -19,7 +19,7 @@ static int bb_prefetch(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
- if (GRAPHICS_VERx100(xe) >= 1250 && !xe_gt_is_media_type(gt))
+ if (GRAPHICS_VERx100(xe) >= 1250 && xe_gt_is_main_type(gt))
/*
* RCS and CCS require 1K, although other engines would be
* okay with 512.
diff --git a/drivers/gpu/drm/xe/xe_bb.h b/drivers/gpu/drm/xe/xe_bb.h
index fafacd73dcc3..b5cc65506696 100644
--- a/drivers/gpu/drm/xe/xe_bb.h
+++ b/drivers/gpu/drm/xe/xe_bb.h
@@ -14,7 +14,7 @@ struct xe_gt;
struct xe_exec_queue;
struct xe_sched_job;
-struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 size, bool usm);
+struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm);
struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
struct xe_bb *bb);
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 7aa2c17825da..18f27da47a36 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -19,6 +19,8 @@
#include <kunit/static_stub.h>
+#include <trace/events/gpu_mem.h>
+
#include "xe_device.h"
#include "xe_dma_buf.h"
#include "xe_drm_client.h"
@@ -336,15 +338,13 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
/* struct xe_ttm_tt - Subclassed ttm_tt for xe */
struct xe_ttm_tt {
struct ttm_tt ttm;
- /** @xe - The xe device */
- struct xe_device *xe;
struct sg_table sgt;
struct sg_table *sg;
/** @purgeable: Whether the content of the pages of @ttm is purgeable. */
bool purgeable;
};
-static int xe_tt_map_sg(struct ttm_tt *tt)
+static int xe_tt_map_sg(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
unsigned long num_pages = tt->num_pages;
@@ -359,13 +359,13 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
num_pages, 0,
(u64)num_pages << PAGE_SHIFT,
- xe_sg_segment_size(xe_tt->xe->drm.dev),
+ xe_sg_segment_size(xe->drm.dev),
GFP_KERNEL);
if (ret)
return ret;
xe_tt->sg = &xe_tt->sgt;
- ret = dma_map_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL,
+ ret = dma_map_sgtable(xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC);
if (ret) {
sg_free_table(xe_tt->sg);
@@ -376,12 +376,12 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
return 0;
}
-static void xe_tt_unmap_sg(struct ttm_tt *tt)
+static void xe_tt_unmap_sg(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (xe_tt->sg) {
- dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg,
+ dma_unmap_sgtable(xe->drm.dev, xe_tt->sg,
DMA_BIDIRECTIONAL, 0);
sg_free_table(xe_tt->sg);
xe_tt->sg = NULL;
@@ -400,24 +400,37 @@ struct sg_table *xe_bo_sg(struct xe_bo *bo)
* Account ttm pages against the device shrinker's shrinkable and
* purgeable counts.
*/
-static void xe_ttm_tt_account_add(struct ttm_tt *tt)
+static void xe_ttm_tt_account_add(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (xe_tt->purgeable)
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, tt->num_pages);
+ xe_shrinker_mod_pages(xe->mem.shrinker, 0, tt->num_pages);
else
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, tt->num_pages, 0);
+ xe_shrinker_mod_pages(xe->mem.shrinker, tt->num_pages, 0);
}
-static void xe_ttm_tt_account_subtract(struct ttm_tt *tt)
+static void xe_ttm_tt_account_subtract(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (xe_tt->purgeable)
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, -(long)tt->num_pages);
+ xe_shrinker_mod_pages(xe->mem.shrinker, 0, -(long)tt->num_pages);
else
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, -(long)tt->num_pages, 0);
+ xe_shrinker_mod_pages(xe->mem.shrinker, -(long)tt->num_pages, 0);
+}
+
+static void update_global_total_pages(struct ttm_device *ttm_dev,
+ long num_pages)
+{
+#if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
+ struct xe_device *xe = ttm_to_xe_device(ttm_dev);
+ u64 global_total_pages =
+ atomic64_add_return(num_pages, &xe->global_total_pages);
+
+ trace_gpu_mem_total(xe->drm.primary->index, 0,
+ global_total_pages << PAGE_SHIFT);
+#endif
}
static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
@@ -436,11 +449,10 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
return NULL;
tt = &xe_tt->ttm;
- xe_tt->xe = xe;
extra_pages = 0;
if (xe_bo_needs_ccs_pages(bo))
- extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size),
+ extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, xe_bo_size(bo)),
PAGE_SIZE);
/*
@@ -527,21 +539,25 @@ static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
return err;
xe_tt->purgeable = false;
- xe_ttm_tt_account_add(tt);
+ xe_ttm_tt_account_add(ttm_to_xe_device(ttm_dev), tt);
+ update_global_total_pages(ttm_dev, tt->num_pages);
return 0;
}
static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
{
+ struct xe_device *xe = ttm_to_xe_device(ttm_dev);
+
if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
return;
- xe_tt_unmap_sg(tt);
+ xe_tt_unmap_sg(xe, tt);
ttm_pool_free(&ttm_dev->pool, tt);
- xe_ttm_tt_account_subtract(tt);
+ xe_ttm_tt_account_subtract(xe, tt);
+ update_global_total_pages(ttm_dev, -(long)tt->num_pages);
}
static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
@@ -789,7 +805,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
/* Bo creation path, moving to system or TT. */
if ((!old_mem && ttm) && !handle_system_ccs) {
if (new_mem->mem_type == XE_PL_TT)
- ret = xe_tt_map_sg(ttm);
+ ret = xe_tt_map_sg(xe, ttm);
if (!ret)
ttm_bo_move_null(ttm_bo, new_mem);
goto out;
@@ -812,7 +828,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
(!ttm && ttm_bo->type == ttm_bo_type_device);
if (new_mem->mem_type == XE_PL_TT) {
- ret = xe_tt_map_sg(ttm);
+ ret = xe_tt_map_sg(xe, ttm);
if (ret)
goto out;
}
@@ -958,7 +974,7 @@ out:
if (timeout < 0)
ret = timeout;
- xe_tt_unmap_sg(ttm_bo->ttm);
+ xe_tt_unmap_sg(xe, ttm_bo->ttm);
}
return ret;
@@ -968,6 +984,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
struct ttm_buffer_object *bo,
unsigned long *scanned)
{
+ struct xe_device *xe = ttm_to_xe_device(bo->bdev);
long lret;
/* Fake move to system, without copying data. */
@@ -982,7 +999,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
if (lret)
return lret;
- xe_tt_unmap_sg(bo->ttm);
+ xe_tt_unmap_sg(xe, bo->ttm);
ttm_bo_move_null(bo, new_resource);
}
@@ -993,7 +1010,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
.allow_move = false});
if (lret > 0)
- xe_ttm_tt_account_subtract(bo->ttm);
+ xe_ttm_tt_account_subtract(xe, bo->ttm);
return lret;
}
@@ -1043,7 +1060,7 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
struct ttm_place place = {.mem_type = bo->resource->mem_type};
struct xe_bo *xe_bo = ttm_to_xe_bo(bo);
- struct xe_device *xe = xe_tt->xe;
+ struct xe_device *xe = ttm_to_xe_device(bo->bdev);
bool needs_rpm;
long lret = 0L;
@@ -1080,7 +1097,7 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
xe_pm_runtime_put(xe);
if (lret > 0)
- xe_ttm_tt_account_subtract(tt);
+ xe_ttm_tt_account_subtract(xe, tt);
out_unref:
xe_bo_put(xe_bo);
@@ -1122,7 +1139,7 @@ int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
goto out_unlock_bo;
- backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size,
+ backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
XE_BO_FLAG_PINNED);
@@ -1200,7 +1217,8 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
goto out_unlock_bo;
if (!backup) {
- backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size,
+ backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv,
+ NULL, xe_bo_size(bo),
DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
XE_BO_FLAG_PINNED);
@@ -1254,7 +1272,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
}
xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0,
- bo->size);
+ xe_bo_size(bo));
}
if (!bo->backup_obj)
@@ -1347,7 +1365,7 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
}
xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr,
- bo->size);
+ xe_bo_size(bo));
}
bo->backup_obj = NULL;
@@ -1381,7 +1399,8 @@ int xe_bo_dma_unmap_pinned(struct xe_bo *bo)
ttm_bo->sg = NULL;
xe_tt->sg = NULL;
} else if (xe_tt->sg) {
- dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg,
+ dma_unmap_sgtable(ttm_to_xe_device(ttm_bo->bdev)->drm.dev,
+ xe_tt->sg,
DMA_BIDIRECTIONAL, 0);
sg_free_table(xe_tt->sg);
xe_tt->sg = NULL;
@@ -1557,7 +1576,7 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
vram = res_to_mem_region(ttm_bo->resource);
xe_res_first(ttm_bo->resource, offset & PAGE_MASK,
- bo->size - (offset & PAGE_MASK), &cursor);
+ xe_bo_size(bo) - (offset & PAGE_MASK), &cursor);
do {
unsigned long page_offset = (offset & ~PAGE_MASK);
@@ -1857,7 +1876,6 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
bo->ccs_cleared = false;
bo->tile = tile;
- bo->size = size;
bo->flags = flags;
bo->cpu_caching = cpu_caching;
bo->ttm.base.funcs = &xe_gem_object_funcs;
@@ -2035,7 +2053,7 @@ __xe_bo_create_locked(struct xe_device *xe,
if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo,
- start + bo->size, U64_MAX);
+ start + xe_bo_size(bo), U64_MAX);
} else {
err = xe_ggtt_insert_bo(t->mem.ggtt, bo);
}
@@ -2156,21 +2174,6 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags);
}
-struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
- const void *data, size_t size,
- enum ttm_bo_type type, u32 flags)
-{
- struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- type, flags);
- if (IS_ERR(bo))
- return bo;
-
- xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
-
- return bo;
-}
-
static void __xe_bo_unpin_map_no_vm(void *arg)
{
xe_bo_unpin_map_no_vm(arg);
@@ -2233,7 +2236,7 @@ int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, str
xe_assert(xe, !(*src)->vmap.is_iomem);
bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr,
- (*src)->size, dst_flags);
+ xe_bo_size(*src), dst_flags);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -2293,7 +2296,7 @@ int xe_bo_pin_external(struct xe_bo *bo)
ttm_bo_pin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_subtract(bo->ttm.ttm);
+ xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2341,7 +2344,7 @@ int xe_bo_pin(struct xe_bo *bo)
ttm_bo_pin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_subtract(bo->ttm.ttm);
+ xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2377,7 +2380,7 @@ void xe_bo_unpin_external(struct xe_bo *bo)
ttm_bo_unpin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_add(bo->ttm.ttm);
+ xe_ttm_tt_account_add(xe, bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2409,7 +2412,7 @@ void xe_bo_unpin(struct xe_bo *bo)
}
ttm_bo_unpin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_add(bo->ttm.ttm);
+ xe_ttm_tt_account_add(xe, bo->ttm.ttm);
}
/**
@@ -2523,7 +2526,7 @@ int xe_bo_vmap(struct xe_bo *bo)
* TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap
* to use struct iosys_map.
*/
- ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap);
+ ret = ttm_bo_kmap(&bo->ttm, 0, xe_bo_size(bo) >> PAGE_SHIFT, &bo->kmap);
if (ret)
return ret;
@@ -2992,6 +2995,14 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM))
return false;
+ /*
+ * Compression implies coh_none, therefore we know for sure that WB
+ * memory can't currently use compression, which is likely one of the
+ * common cases.
+ */
+ if (bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)
+ return false;
+
return true;
}
@@ -3067,7 +3078,7 @@ void xe_bo_put(struct xe_bo *bo)
#endif
for_each_tile(tile, xe_bo_device(bo), id)
if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt)
- might_lock(&bo->ggtt_node[id]->ggtt->lock);
+ xe_ggtt_might_lock(bo->ggtt_node[id]->ggtt);
drm_gem_object_put(&bo->ttm.base);
}
}
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 02ada1fb8a23..02e8cde4c6b2 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -118,9 +118,6 @@ struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
size_t size, u64 offset,
enum ttm_bo_type type, u32 flags,
u64 alignment);
-struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
- const void *data, size_t size,
- enum ttm_bo_type type, u32 flags);
struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
size_t size, u32 flags);
struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
@@ -238,6 +235,19 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
return xe_bo_addr(bo, 0, page_size);
}
+/**
+ * xe_bo_size() - Xe BO size
+ * @bo: The bo object.
+ *
+ * Simple helper to return Xe BO's size.
+ *
+ * Return: Xe BO's size
+ */
+static inline size_t xe_bo_size(struct xe_bo *bo)
+{
+ return bo->ttm.base.size;
+}
+
static inline u32
__xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id)
{
@@ -246,7 +256,7 @@ __xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id)
if (XE_WARN_ON(!ggtt_node))
return 0;
- XE_WARN_ON(ggtt_node->base.size > bo->size);
+ XE_WARN_ON(ggtt_node->base.size > xe_bo_size(bo));
XE_WARN_ON(ggtt_node->base.start + ggtt_node->base.size > (1ull << 32));
return ggtt_node->base.start;
}
@@ -300,7 +310,7 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo);
static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo)
{
- return PAGE_ALIGN(bo->ttm.base.size);
+ return PAGE_ALIGN(xe_bo_size(bo));
}
static inline bool xe_bo_has_pages(struct xe_bo *bo)
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index ed3746d32b27..7484ce55a303 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -197,9 +197,7 @@ static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo)
if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
continue;
- mutex_lock(&tile->mem.ggtt->lock);
- xe_ggtt_map_bo(tile->mem.ggtt, bo);
- mutex_unlock(&tile->mem.ggtt->lock);
+ xe_ggtt_map_bo_unlocked(tile->mem.ggtt, bo);
}
}
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index eb5e83c5f233..ff560d82496f 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -32,8 +32,6 @@ struct xe_bo {
struct xe_bo *backup_obj;
/** @parent_obj: Ref to parent bo if this a backup_obj */
struct xe_bo *parent_obj;
- /** @size: Size of this buffer object */
- size_t size;
/** @flags: flags for this buffer object */
u32 flags;
/** @vm: VM this BO is attached to, for extobj this will be NULL */
@@ -86,7 +84,7 @@ struct xe_bo {
u16 cpu_caching;
/** @devmem_allocation: SVM device memory allocation */
- struct drm_gpusvm_devmem devmem_allocation;
+ struct drm_pagemap_devmem devmem_allocation;
/** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
struct list_head vram_userfault_link;
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
index cb9f175c89a1..8ec1ff1e4e80 100644
--- a/drivers/gpu/drm/xe/xe_configfs.c
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -3,14 +3,19 @@
* Copyright © 2025 Intel Corporation
*/
+#include <linux/bitops.h>
#include <linux/configfs.h>
+#include <linux/find.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/string.h>
#include "xe_configfs.h"
#include "xe_module.h"
+#include "xe_hw_engine_types.h"
+
/**
* DOC: Xe Configfs
*
@@ -48,6 +53,30 @@
* # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
* # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind (Enters survivability mode if supported)
*
+ * Allowed engines:
+ * ----------------
+ *
+ * Allow only a set of engine(s) to be available, disabling the other engines
+ * even if they are available in hardware. This is applied after HW fuses are
+ * considered on each tile. Examples:
+ *
+ * Allow only one render and one copy engines, nothing else::
+ *
+ * # echo 'rcs0,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
+ *
+ * Allow only compute engines and first copy engine::
+ *
+ * # echo 'ccs*,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
+ *
+ * Note that the engine names are the per-GT hardware names. On multi-tile
+ * platforms, writing ``rcs0,bcs0`` to this file would allow the first render
+ * and copy engines on each tile.
+ *
+ * The requested configuration may not be supported by the platform and driver
+ * may fail to probe. For example: if at least one copy engine is expected to be
+ * available for migrations, but it's disabled. This is intended for debugging
+ * purposes only.
+ *
* Remove devices
* ==============
*
@@ -60,11 +89,30 @@ struct xe_config_device {
struct config_group group;
bool survivability_mode;
+ u64 engines_allowed;
/* protects attributes */
struct mutex lock;
};
+struct engine_info {
+ const char *cls;
+ u64 mask;
+};
+
+/* Some helpful macros to aid on the sizing of buffer allocation when parsing */
+#define MAX_ENGINE_CLASS_CHARS 5
+#define MAX_ENGINE_INSTANCE_CHARS 2
+
+static const struct engine_info engine_info[] = {
+ { .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK },
+ { .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK },
+ { .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK },
+ { .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK },
+ { .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK },
+ { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK },
+};
+
static struct xe_config_device *to_xe_config_device(struct config_item *item)
{
return container_of(to_config_group(item), struct xe_config_device, group);
@@ -94,10 +142,96 @@ static ssize_t survivability_mode_store(struct config_item *item, const char *pa
return len;
}
+static ssize_t engines_allowed_show(struct config_item *item, char *page)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+ char *p = page;
+
+ for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
+ u64 mask = engine_info[i].mask;
+
+ if ((dev->engines_allowed & mask) == mask) {
+ p += sprintf(p, "%s*\n", engine_info[i].cls);
+ } else if (mask & dev->engines_allowed) {
+ u16 bit0 = __ffs64(mask), bit;
+
+ mask &= dev->engines_allowed;
+
+ for_each_set_bit(bit, (const unsigned long *)&mask, 64)
+ p += sprintf(p, "%s%u\n", engine_info[i].cls,
+ bit - bit0);
+ }
+ }
+
+ return p - page;
+}
+
+static bool lookup_engine_mask(const char *pattern, u64 *mask)
+{
+ for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
+ u8 instance;
+ u16 bit;
+
+ if (!str_has_prefix(pattern, engine_info[i].cls))
+ continue;
+
+ pattern += strlen(engine_info[i].cls);
+
+ if (!strcmp(pattern, "*")) {
+ *mask = engine_info[i].mask;
+ return true;
+ }
+
+ if (kstrtou8(pattern, 10, &instance))
+ return false;
+
+ bit = __ffs64(engine_info[i].mask) + instance;
+ if (bit >= fls64(engine_info[i].mask))
+ return false;
+
+ *mask = BIT_ULL(bit);
+ return true;
+ }
+
+ return false;
+}
+
+static ssize_t engines_allowed_store(struct config_item *item, const char *page,
+ size_t len)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+ size_t patternlen, p;
+ u64 mask, val = 0;
+
+ for (p = 0; p < len; p += patternlen + 1) {
+ char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
+
+ patternlen = strcspn(page + p, ",\n");
+ if (patternlen >= sizeof(buf))
+ return -EINVAL;
+
+ memcpy(buf, page + p, patternlen);
+ buf[patternlen] = '\0';
+
+ if (!lookup_engine_mask(buf, &mask))
+ return -EINVAL;
+
+ val |= mask;
+ }
+
+ mutex_lock(&dev->lock);
+ dev->engines_allowed = val;
+ mutex_unlock(&dev->lock);
+
+ return len;
+}
+
CONFIGFS_ATTR(, survivability_mode);
+CONFIGFS_ATTR(, engines_allowed);
static struct configfs_attribute *xe_config_device_attrs[] = {
&attr_survivability_mode,
+ &attr_engines_allowed,
NULL,
};
@@ -139,6 +273,9 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro
if (!dev)
return ERR_PTR(-ENOMEM);
+ /* Default values */
+ dev->engines_allowed = U64_MAX;
+
config_group_init_type_name(&dev->group, name, &xe_config_device_type);
mutex_init(&dev->lock);
@@ -226,6 +363,29 @@ void xe_configfs_clear_survivability_mode(struct pci_dev *pdev)
config_item_put(&dev->group.cg_item);
}
+/**
+ * xe_configfs_get_engines_allowed - get engine allowed mask from configfs
+ * @pdev: pci device
+ *
+ * Find the configfs group that belongs to the pci device and return
+ * the mask of engines allowed to be used.
+ *
+ * Return: engine mask with allowed engines
+ */
+u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
+{
+ struct xe_config_device *dev = configfs_find_group(pdev);
+ u64 engines_allowed;
+
+ if (!dev)
+ return U64_MAX;
+
+ engines_allowed = dev->engines_allowed;
+ config_item_put(&dev->group.cg_item);
+
+ return engines_allowed;
+}
+
int __init xe_configfs_init(void)
{
struct config_group *root = &xe_configfs.su_group;
diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h
index d7d041ec2611..fb8764008089 100644
--- a/drivers/gpu/drm/xe/xe_configfs.h
+++ b/drivers/gpu/drm/xe/xe_configfs.h
@@ -5,6 +5,7 @@
#ifndef _XE_CONFIGFS_H_
#define _XE_CONFIGFS_H_
+#include <linux/limits.h>
#include <linux/types.h>
struct pci_dev;
@@ -14,11 +15,13 @@ int xe_configfs_init(void);
void xe_configfs_exit(void);
bool xe_configfs_get_survivability_mode(struct pci_dev *pdev);
void xe_configfs_clear_survivability_mode(struct pci_dev *pdev);
+u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev);
#else
-static inline int xe_configfs_init(void) { return 0; };
-static inline void xe_configfs_exit(void) {};
-static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; };
-static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) {};
+static inline int xe_configfs_init(void) { return 0; }
+static inline void xe_configfs_exit(void) { }
+static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; }
+static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) { }
+static inline u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) { return U64_MAX; }
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index d0503959a8ed..26e9d146ccbf 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -20,7 +20,9 @@
#include "xe_pm.h"
#include "xe_pxp_debugfs.h"
#include "xe_sriov.h"
+#include "xe_sriov_pf.h"
#include "xe_step.h"
+#include "xe_wa.h"
#ifdef CONFIG_DRM_XE_DEBUG
#include "xe_bo_evict.h"
@@ -82,9 +84,28 @@ static int sriov_info(struct seq_file *m, void *data)
return 0;
}
+static int workarounds(struct xe_device *xe, struct drm_printer *p)
+{
+ xe_pm_runtime_get(xe);
+ xe_wa_device_dump(xe, p);
+ xe_pm_runtime_put(xe);
+
+ return 0;
+}
+
+static int workaround_info(struct seq_file *m, void *data)
+{
+ struct xe_device *xe = node_to_xe(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ workarounds(xe, &p);
+ return 0;
+}
+
static const struct drm_info_list debugfs_list[] = {
{"info", info, 0},
{ .name = "sriov_info", .show = sriov_info, },
+ { .name = "workarounds", .show = workaround_info, },
};
static int forcewake_open(struct inode *inode, struct file *file)
@@ -191,6 +212,41 @@ static const struct file_operations wedged_mode_fops = {
.write = wedged_mode_set,
};
+static ssize_t atomic_svm_timeslice_ms_show(struct file *f, char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ char buf[32];
+ int len = 0;
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", xe->atomic_svm_timeslice_ms);
+
+ return simple_read_from_buffer(ubuf, size, pos, buf, len);
+}
+
+static ssize_t atomic_svm_timeslice_ms_set(struct file *f,
+ const char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ u32 atomic_svm_timeslice_ms;
+ ssize_t ret;
+
+ ret = kstrtouint_from_user(ubuf, size, 0, &atomic_svm_timeslice_ms);
+ if (ret)
+ return ret;
+
+ xe->atomic_svm_timeslice_ms = atomic_svm_timeslice_ms;
+
+ return size;
+}
+
+static const struct file_operations atomic_svm_timeslice_ms_fops = {
+ .owner = THIS_MODULE,
+ .read = atomic_svm_timeslice_ms_show,
+ .write = atomic_svm_timeslice_ms_set,
+};
+
void xe_debugfs_register(struct xe_device *xe)
{
struct ttm_device *bdev = &xe->ttm;
@@ -211,6 +267,9 @@ void xe_debugfs_register(struct xe_device *xe)
debugfs_create_file("wedged_mode", 0600, root, xe,
&wedged_mode_fops);
+ debugfs_create_file("atomic_svm_timeslice_ms", 0600, root, xe,
+ &atomic_svm_timeslice_ms_fops);
+
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
man = ttm_manager_type(bdev, mem_type);
@@ -235,4 +294,7 @@ void xe_debugfs_register(struct xe_device *xe)
xe_pxp_debugfs_register(xe->pxp);
fault_create_debugfs_attr("fail_gt_reset", root, &gt_reset_failure);
+
+ if (IS_SRIOV_PF(xe))
+ xe_sriov_pf_debugfs_register(xe, root);
}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 11e60d687572..203e3038cc81 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -331,13 +331,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
{
struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
struct xe_guc *guc = exec_queue_to_guc(q);
- u32 adj_logical_mask = q->logical_mask;
- u32 width_mask = (0x1 << q->width) - 1;
const char *process_name = "no process";
-
unsigned int fw_ref;
bool cookie;
- int i;
ss->snapshot_time = ktime_get_real();
ss->boot_time = ktime_get_boottime();
@@ -353,14 +349,6 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work);
cookie = dma_fence_begin_signalling();
- for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
- if (adj_logical_mask & BIT(i)) {
- adj_logical_mask |= width_mask << i;
- i += q->width;
- } else {
- ++i;
- }
- }
/* keep going if fw fails as we still want to save the memory and SW data */
fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index e9f3c1a53db2..6dc84e4ed281 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -43,10 +43,11 @@
#include "xe_guc_pc.h"
#include "xe_hw_engine_group.h"
#include "xe_hwmon.h"
+#include "xe_i2c.h"
#include "xe_irq.h"
-#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_module.h"
+#include "xe_nvm.h"
#include "xe_oa.h"
#include "xe_observation.h"
#include "xe_pat.h"
@@ -67,6 +68,7 @@
#include "xe_wait_user_fence.h"
#include "xe_wa.h"
+#include <generated/xe_device_wa_oob.h>
#include <generated/xe_wa_oob.h>
static int xe_file_open(struct drm_device *dev, struct drm_file *file)
@@ -403,9 +405,6 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
if (xe->unordered_wq)
destroy_workqueue(xe->unordered_wq);
- if (!IS_ERR_OR_NULL(xe->mem.shrinker))
- xe_shrinker_destroy(xe->mem.shrinker);
-
if (xe->destroy_wq)
destroy_workqueue(xe->destroy_wq);
@@ -439,13 +438,14 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (err)
goto err;
- xe->mem.shrinker = xe_shrinker_create(xe);
- if (IS_ERR(xe->mem.shrinker))
- return ERR_CAST(xe->mem.shrinker);
+ err = xe_shrinker_create(xe);
+ if (err)
+ goto err;
xe->info.devid = pdev->device;
xe->info.revid = pdev->revision;
xe->info.force_execlist = xe_modparam.force_execlist;
+ xe->atomic_svm_timeslice_ms = 5;
err = xe_irq_init(xe);
if (err)
@@ -494,10 +494,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (err)
goto err;
- err = xe_display_create(xe);
- if (WARN_ON(err))
- goto err;
-
return xe;
err:
@@ -705,6 +701,9 @@ int xe_device_probe_early(struct xe_device *xe)
{
int err;
+ xe_wa_device_init(xe);
+ xe_wa_process_device_oob(xe);
+
err = xe_mmio_probe_early(xe);
if (err)
return err;
@@ -790,45 +789,16 @@ int xe_device_probe(struct xe_device *xe)
if (err)
return err;
- err = xe_ttm_sys_mgr_init(xe);
- if (err)
- return err;
-
for_each_gt(gt, xe, id) {
err = xe_gt_init_early(gt);
if (err)
return err;
-
- /*
- * Only after this point can GT-specific MMIO operations
- * (including things like communication with the GuC)
- * be performed.
- */
- xe_gt_mmio_init(gt);
}
for_each_tile(tile, xe, id) {
- if (IS_SRIOV_VF(xe)) {
- xe_guc_comm_init_early(&tile->primary_gt->uc.guc);
- err = xe_gt_sriov_vf_bootstrap(tile->primary_gt);
- if (err)
- return err;
- err = xe_gt_sriov_vf_query_config(tile->primary_gt);
- if (err)
- return err;
- }
err = xe_ggtt_init_early(tile->mem.ggtt);
if (err)
return err;
- err = xe_memirq_init(&tile->memirq);
- if (err)
- return err;
- }
-
- for_each_gt(gt, xe, id) {
- err = xe_gt_init_hwconfig(gt);
- if (err)
- return err;
}
err = xe_devcoredump_init(xe);
@@ -856,6 +826,14 @@ int xe_device_probe(struct xe_device *xe)
return err;
}
+ /*
+ * Allow allocations only now to ensure xe_display_init_early()
+ * is the first to allocate, always.
+ */
+ err = xe_ttm_sys_mgr_init(xe);
+ if (err)
+ return err;
+
/* Allocate and map stolen after potential VRAM resize */
err = xe_ttm_stolen_mgr_init(xe);
if (err)
@@ -887,6 +865,12 @@ int xe_device_probe(struct xe_device *xe)
return err;
}
+ if (xe->tiles->media_gt &&
+ XE_WA(xe->tiles->media_gt, 15015404425_disable))
+ XE_DEVICE_WA_DISABLE(xe, 15015404425);
+
+ xe_nvm_init(xe);
+
err = xe_heci_gsc_init(xe);
if (err)
return err;
@@ -927,6 +911,10 @@ int xe_device_probe(struct xe_device *xe)
if (err)
goto err_unregister_display;
+ err = xe_i2c_probe(xe);
+ if (err)
+ goto err_unregister_display;
+
for_each_gt(gt, xe, id)
xe_gt_sanitize_freq(gt);
@@ -944,6 +932,8 @@ void xe_device_remove(struct xe_device *xe)
{
xe_display_unregister(xe);
+ xe_nvm_fini(xe);
+
drm_dev_unplug(&xe->drm);
xe_bo_pci_dev_remove_all(xe);
@@ -1184,7 +1174,8 @@ void xe_device_declare_wedged(struct xe_device *xe)
/* Notify userspace of wedged device */
drm_dev_wedged_event(&xe->drm,
- DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET);
+ DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET,
+ NULL);
}
for_each_gt(gt, xe, id)
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 0bc3bc8e6803..bc802e066a7d 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -60,35 +60,32 @@ static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe)
return &xe->tiles[0];
}
+/*
+ * Highest GT/tile count for any platform. Used only for memory allocation
+ * sizing. Any logic looping over GTs or mapping userspace GT IDs into GT
+ * structures should use the per-platform xe->info.max_gt_per_tile instead.
+ */
#define XE_MAX_GT_PER_TILE 2
-static inline struct xe_gt *xe_tile_get_gt(struct xe_tile *tile, u8 gt_id)
-{
- if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id >= XE_MAX_GT_PER_TILE))
- gt_id = 0;
-
- return gt_id ? tile->media_gt : tile->primary_gt;
-}
-
static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
{
- struct xe_tile *root_tile = xe_device_get_root_tile(xe);
+ struct xe_tile *tile;
struct xe_gt *gt;
- /*
- * FIXME: This only works for now because multi-tile and standalone
- * media are mutually exclusive on the platforms we have today.
- *
- * id => GT mapping may change once we settle on how we want to handle
- * our UAPI.
- */
- if (MEDIA_VER(xe) >= 13) {
- gt = xe_tile_get_gt(root_tile, gt_id);
- } else {
- if (drm_WARN_ON(&xe->drm, gt_id >= XE_MAX_TILES_PER_DEVICE))
- gt_id = 0;
-
- gt = xe->tiles[gt_id].primary_gt;
+ if (gt_id >= xe->info.tile_count * xe->info.max_gt_per_tile)
+ return NULL;
+
+ tile = &xe->tiles[gt_id / xe->info.max_gt_per_tile];
+ switch (gt_id % xe->info.max_gt_per_tile) {
+ default:
+ xe_assert(xe, false);
+ fallthrough;
+ case 0:
+ gt = tile->primary_gt;
+ break;
+ case 1:
+ gt = tile->media_gt;
+ break;
}
if (!gt)
@@ -130,14 +127,14 @@ static inline bool xe_device_uc_enabled(struct xe_device *xe)
for ((id__) = 1; (id__) < (xe__)->info.tile_count; (id__)++) \
for_each_if((tile__) = &(xe__)->tiles[(id__)])
-/*
- * FIXME: This only works for now since multi-tile and standalone media
- * happen to be mutually exclusive. Future platforms may change this...
- */
#define for_each_gt(gt__, xe__, id__) \
- for ((id__) = 0; (id__) < (xe__)->info.gt_count; (id__)++) \
+ for ((id__) = 0; (id__) < (xe__)->info.tile_count * (xe__)->info.max_gt_per_tile; (id__)++) \
for_each_if((gt__) = xe_device_get_gt((xe__), (id__)))
+#define for_each_gt_on_tile(gt__, tile__, id__) \
+ for_each_gt((gt__), (tile__)->xe, (id__)) \
+ for_each_if((gt__)->tile == (tile__))
+
static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
{
return &gt->pm.fw;
@@ -195,6 +192,8 @@ void xe_device_declare_wedged(struct xe_device *xe);
struct xe_file *xe_file_get(struct xe_file *xef);
void xe_file_put(struct xe_file *xef);
+int xe_is_injection_active(void);
+
/*
* Occasionally it is seen that the G2H worker starts running after a delay of more than
* a second even after being queued and activated by the Linux workqueue subsystem. This
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index b9440f8c781e..e5fd0cd537bc 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -24,6 +24,12 @@
*
* vram_d3cold_threshold - Report/change vram used threshold(in MB) below
* which vram save/restore is permissible during runtime D3cold entry/exit.
+ *
+ * lb_fan_control_version - Fan control version provisioned by late binding.
+ * Exposed only if supported by the device.
+ *
+ * lb_voltage_regulator_version - Voltage regulator version provisioned by late
+ * binding. Exposed only if supported by the device.
*/
static ssize_t
@@ -65,6 +71,135 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(vram_d3cold_threshold);
+static ssize_t
+lb_fan_control_version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
+ struct xe_tile *root = xe_device_get_root_tile(xe);
+ u32 cap, ver_low = FAN_TABLE, ver_high = FAN_TABLE;
+ u16 major = 0, minor = 0, hotfix = 0, build = 0;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
+ &cap, NULL);
+ if (ret)
+ goto out;
+
+ if (REG_FIELD_GET(V1_FAN_PROVISIONED, cap)) {
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0),
+ &ver_low, NULL);
+ if (ret)
+ goto out;
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0),
+ &ver_high, NULL);
+ if (ret)
+ goto out;
+
+ major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low);
+ minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low);
+ hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high);
+ build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high);
+ }
+out:
+ xe_pm_runtime_put(xe);
+
+ return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
+}
+static DEVICE_ATTR_ADMIN_RO(lb_fan_control_version);
+
+static ssize_t
+lb_voltage_regulator_version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
+ struct xe_tile *root = xe_device_get_root_tile(xe);
+ u32 cap, ver_low = VR_CONFIG, ver_high = VR_CONFIG;
+ u16 major = 0, minor = 0, hotfix = 0, build = 0;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
+ &cap, NULL);
+ if (ret)
+ goto out;
+
+ if (REG_FIELD_GET(VR_PARAMS_PROVISIONED, cap)) {
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0),
+ &ver_low, NULL);
+ if (ret)
+ goto out;
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0),
+ &ver_high, NULL);
+ if (ret)
+ goto out;
+
+ major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low);
+ minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low);
+ hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high);
+ build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high);
+ }
+out:
+ xe_pm_runtime_put(xe);
+
+ return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
+}
+static DEVICE_ATTR_ADMIN_RO(lb_voltage_regulator_version);
+
+static int late_bind_create_files(struct device *dev)
+{
+ struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
+ struct xe_tile *root = xe_device_get_root_tile(xe);
+ u32 cap;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
+ &cap, NULL);
+ if (ret)
+ goto out;
+
+ if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) {
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr);
+ if (ret)
+ goto out;
+ }
+
+ if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr);
+out:
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+static void late_bind_remove_files(struct device *dev)
+{
+ struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
+ struct xe_tile *root = xe_device_get_root_tile(xe);
+ u32 cap;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+
+ ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
+ &cap, NULL);
+ if (ret)
+ goto out;
+
+ if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap))
+ sysfs_remove_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr);
+
+ if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
+ sysfs_remove_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr);
+out:
+ xe_pm_runtime_put(xe);
+}
+
/**
* DOC: PCIe Gen5 Limitations
*
@@ -151,8 +286,10 @@ static void xe_device_sysfs_fini(void *arg)
if (xe->d3cold.capable)
sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
- if (xe->info.platform == XE_BATTLEMAGE)
+ if (xe->info.platform == XE_BATTLEMAGE) {
sysfs_remove_files(&xe->drm.dev->kobj, auto_link_downgrade_attrs);
+ late_bind_remove_files(xe->drm.dev);
+ }
}
int xe_device_sysfs_init(struct xe_device *xe)
@@ -170,6 +307,10 @@ int xe_device_sysfs_init(struct xe_device *xe)
ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs);
if (ret)
return ret;
+
+ ret = late_bind_create_files(dev);
+ if (ret)
+ return ret;
}
return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe);
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 6383a1c0d478..d4d2c6854790 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -21,7 +21,9 @@
#include "xe_platform_types.h"
#include "xe_pmu_types.h"
#include "xe_pt_types.h"
+#include "xe_sriov_pf_types.h"
#include "xe_sriov_types.h"
+#include "xe_sriov_vf_types.h"
#include "xe_step_types.h"
#include "xe_survivability_mode_types.h"
#include "xe_ttm_vram_mgr_types.h"
@@ -30,12 +32,11 @@
#define TEST_VM_OPS_ERROR
#endif
-#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
-#include "intel_display_core.h"
-#include "intel_display_device.h"
-#endif
-
+struct dram_info;
+struct intel_display;
+struct intel_dg_nvm_dev;
struct xe_ggtt;
+struct xe_i2c;
struct xe_pat_ops;
struct xe_pxp;
@@ -108,7 +109,7 @@ struct xe_vram_region {
void __iomem *mapping;
/** @ttm: VRAM TTM manager */
struct xe_ttm_vram_mgr ttm;
-#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
/** @pagemap: Used to remap device memory as ZONE_DEVICE */
struct dev_pagemap pagemap;
/**
@@ -296,6 +297,8 @@ struct xe_device {
u8 vram_flags;
/** @info.tile_count: Number of tiles */
u8 tile_count;
+ /** @info.max_gt_per_tile: Number of GT IDs allocated to each tile */
+ u8 max_gt_per_tile;
/** @info.gt_count: Total number of GTs for entire device */
u8 gt_count;
/** @info.vm_max_level: Max VM level */
@@ -319,6 +322,8 @@ struct xe_device {
u8 has_fan_control:1;
/** @info.has_flat_ccs: Whether flat CCS metadata is used */
u8 has_flat_ccs:1;
+ /** @info.has_gsc_nvm: Device has gsc non-volatile memory */
+ u8 has_gsc_nvm:1;
/** @info.has_heci_cscfi: device has heci cscfi */
u8 has_heci_cscfi:1;
/** @info.has_heci_gscfi: device has heci gscfi */
@@ -360,6 +365,19 @@ struct xe_device {
u8 skip_pcode:1;
} info;
+ /** @wa_active: keep track of active workarounds */
+ struct {
+ /** @wa_active.oob: bitmap with active OOB workarounds */
+ unsigned long *oob;
+
+ /**
+ * @wa_active.oob_initialized: Mark oob as initialized to help detecting misuse
+ * of XE_DEVICE_WA() - it can only be called on initialization after
+ * Device OOB WAs have been processed.
+ */
+ bool oob_initialized;
+ } wa_active;
+
/** @survivability: survivability information for device */
struct xe_survivability survivability;
@@ -406,10 +424,12 @@ struct xe_device {
/** @sriov.__mode: SR-IOV mode (Don't access directly!) */
enum xe_sriov_mode __mode;
- /** @sriov.pf: PF specific data */
- struct xe_device_pf pf;
- /** @sriov.vf: VF specific data */
- struct xe_device_vf vf;
+ union {
+ /** @sriov.pf: PF specific data */
+ struct xe_device_pf pf;
+ /** @sriov.vf: VF specific data */
+ struct xe_device_vf vf;
+ };
/** @sriov.wq: workqueue used by the virtualization workers */
struct workqueue_struct *wq;
@@ -502,6 +522,10 @@ struct xe_device {
const struct xe_pat_table_entry *table;
/** @pat.n_entries: Number of PAT entries */
int n_entries;
+ /** @pat.ats_entry: PAT entry for PCIe ATS responses */
+ const struct xe_pat_table_entry *pat_ats;
+ /** @pat.pta_entry: PAT entry for page table accesses */
+ const struct xe_pat_table_entry *pat_pta;
u32 idx[__XE_CACHE_LEVEL_COUNT];
} pat;
@@ -548,6 +572,9 @@ struct xe_device {
/** @heci_gsc: graphics security controller */
struct xe_heci_gsc heci_gsc;
+ /** @nvm: discrete graphics non-volatile memory */
+ struct intel_dg_nvm_dev *nvm;
+
/** @oa: oa observation subsystem */
struct xe_oa oa;
@@ -576,6 +603,12 @@ struct xe_device {
/** @pmu: performance monitoring unit */
struct xe_pmu pmu;
+ /** @i2c: I2C host controller */
+ struct xe_i2c *i2c;
+
+ /** @atomic_svm_timeslice_ms: Atomic SVM fault timeslice MS */
+ u32 atomic_svm_timeslice_ms;
+
#ifdef TEST_VM_OPS_ERROR
/**
* @vm_inject_error_position: inject errors at different places in VM
@@ -584,6 +617,14 @@ struct xe_device {
u8 vm_inject_error_position;
#endif
+#if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
+ /**
+ * @global_total_pages: global GPU page usage tracked for gpu_mem
+ * tracepoints
+ */
+ atomic64_t global_total_pages;
+#endif
+
/* private: */
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
@@ -593,27 +634,9 @@ struct xe_device {
* drm_i915_private during build. After cleanup these should go away,
* migrating to the right sub-structs
*/
- struct intel_display display;
-
- struct dram_info {
- bool wm_lv_0_adjust_needed;
- u8 num_channels;
- bool symmetric_memory;
- enum intel_dram_type {
- INTEL_DRAM_UNKNOWN,
- INTEL_DRAM_DDR3,
- INTEL_DRAM_DDR4,
- INTEL_DRAM_LPDDR3,
- INTEL_DRAM_LPDDR4,
- INTEL_DRAM_DDR5,
- INTEL_DRAM_LPDDR5,
- INTEL_DRAM_GDDR,
- INTEL_DRAM_GDDR_ECC,
- __INTEL_DRAM_TYPE_MAX,
- } type;
- u8 num_qgv_points;
- u8 num_psf_gv_points;
- } dram_info;
+ struct intel_display *display;
+
+ const struct dram_info *dram_info;
/*
* edram size in MB.
diff --git a/drivers/gpu/drm/xe/xe_device_wa_oob.rules b/drivers/gpu/drm/xe/xe_device_wa_oob.rules
new file mode 100644
index 000000000000..3a0c4ccc4224
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_device_wa_oob.rules
@@ -0,0 +1,2 @@
+15015404425 PLATFORM(LUNARLAKE)
+ PLATFORM(PANTHERLAKE)
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 31f688e953d7..f931ff9b1ec0 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -167,7 +167,7 @@ void xe_drm_client_remove_bo(struct xe_bo *bo)
static void bo_meminfo(struct xe_bo *bo,
struct drm_memory_stats stats[TTM_NUM_MEM_TYPES])
{
- u64 sz = bo->size;
+ u64 sz = xe_bo_size(bo);
u32 mem_type = bo->ttm.resource->mem_type;
xe_bo_assert_held(bo);
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index 96732613b4b7..af7916315ac6 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -258,11 +258,13 @@ static int set_prop_eu_stall_wait_num_reports(struct xe_device *xe, u64 value,
static int set_prop_eu_stall_gt_id(struct xe_device *xe, u64 value,
struct eu_stall_open_properties *props)
{
- if (value >= xe->info.gt_count) {
+ struct xe_gt *gt = xe_device_get_gt(xe, value);
+
+ if (!gt) {
drm_dbg(&xe->drm, "Invalid GT ID %llu for EU stall sampling\n", value);
return -EINVAL;
}
- props->gt = xe_device_get_gt(xe, value);
+ props->gt = gt;
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index fee22358cc09..8991b4aed440 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -610,7 +610,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
- if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
+ if (XE_IOCTL_DBG(xe, !xe_device_get_gt(xe, eci[0].gt_id)))
return -EINVAL;
if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)
diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c
index 8a5cba22b586..c59a9b330697 100644
--- a/drivers/gpu/drm/xe/xe_force_wake.c
+++ b/drivers/gpu/drm/xe/xe_force_wake.c
@@ -64,7 +64,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
{
int i, j;
- if (!xe_gt_is_media_type(gt))
+ if (xe_gt_is_main_type(gt))
init_domain(fw, XE_FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER,
FORCEWAKE_ACK_RENDER);
diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
index ed9183599e31..6581cb0f0e59 100644
--- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c
+++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
@@ -18,8 +18,8 @@
" *\n" \
" * This file was generated from rules: %s\n" \
" */\n" \
- "#ifndef _GENERATED_XE_WA_OOB_\n" \
- "#define _GENERATED_XE_WA_OOB_\n" \
+ "#ifndef _GENERATED_%s_\n" \
+ "#define _GENERATED_%s_\n" \
"\n" \
"enum {\n"
@@ -52,7 +52,7 @@ static char *strip(char *line, size_t linelen)
}
#define MAX_LINE_LEN 4096
-static int parse(FILE *input, FILE *csource, FILE *cheader)
+static int parse(FILE *input, FILE *csource, FILE *cheader, char *prefix)
{
char line[MAX_LINE_LEN + 1];
char *name, *prev_name = NULL, *rules;
@@ -96,7 +96,7 @@ static int parse(FILE *input, FILE *csource, FILE *cheader)
}
if (name) {
- fprintf(cheader, "\tXE_WA_OOB_%s = %u,\n", name, idx);
+ fprintf(cheader, "\t%s_%s = %u,\n", prefix, name, idx);
/* Close previous entry before starting a new one */
if (idx)
@@ -118,7 +118,33 @@ static int parse(FILE *input, FILE *csource, FILE *cheader)
if (idx)
fprintf(csource, ") },\n");
- fprintf(cheader, "\t_XE_WA_OOB_COUNT = %u\n", idx);
+ fprintf(cheader, "\t_%s_COUNT = %u\n", prefix, idx);
+
+ return 0;
+}
+
+static int fn_to_prefix(const char *fn, char *prefix, size_t size)
+{
+ size_t len;
+
+ fn = basename(fn);
+ len = strlen(fn);
+
+ if (len > size - 1)
+ return -ENAMETOOLONG;
+
+ memcpy(prefix, fn, len + 1);
+
+ for (char *p = prefix; *p; p++) {
+ switch (*p) {
+ case '.':
+ *p = '\0';
+ return 0;
+ default:
+ *p = toupper(*p);
+ break;
+ }
+ }
return 0;
}
@@ -141,6 +167,7 @@ int main(int argc, const char *argv[])
[ARGS_CHEADER] = { .fn = argv[3], .mode = "w" },
};
int ret = 1;
+ char prefix[128];
if (argc < 3) {
fprintf(stderr, "ERROR: wrong arguments\n");
@@ -148,6 +175,9 @@ int main(int argc, const char *argv[])
return 1;
}
+ if (fn_to_prefix(args[ARGS_CHEADER].fn, prefix, sizeof(prefix)) < 0)
+ return 1;
+
for (int i = 0; i < _ARGS_COUNT; i++) {
args[i].f = fopen(args[i].fn, args[i].mode);
if (!args[i].f) {
@@ -157,9 +187,10 @@ int main(int argc, const char *argv[])
}
}
- fprintf(args[ARGS_CHEADER].f, HEADER, args[ARGS_INPUT].fn);
+ fprintf(args[ARGS_CHEADER].f, HEADER, args[ARGS_INPUT].fn, prefix, prefix);
+
ret = parse(args[ARGS_INPUT].f, args[ARGS_CSOURCE].f,
- args[ARGS_CHEADER].f);
+ args[ARGS_CHEADER].f, prefix);
if (!ret)
fprintf(args[ARGS_CHEADER].f, FOOTER);
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 2c799958c1e4..29d4d3f51da1 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -5,6 +5,7 @@
#include "xe_ggtt.h"
+#include <kunit/visibility.h>
#include <linux/fault-inject.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/sizes.h>
@@ -22,12 +23,13 @@
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
-#include "xe_gt_sriov_vf.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_pm.h"
+#include "xe_res_cursor.h"
#include "xe_sriov.h"
+#include "xe_tile_sriov_vf.h"
#include "xe_wa.h"
#include "xe_wopcm.h"
@@ -64,13 +66,9 @@
* give us the correct placement for free.
*/
-static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
- u16 pat_index)
+static u64 xelp_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
{
- u64 pte;
-
- pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
- pte |= XE_PAGE_PRESENT;
+ u64 pte = XE_PAGE_PRESENT;
if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
pte |= XE_GGTT_PTE_DM;
@@ -78,13 +76,12 @@ static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
return pte;
}
-static u64 xelpg_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
- u16 pat_index)
+static u64 xelpg_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
{
struct xe_device *xe = xe_bo_device(bo);
u64 pte;
- pte = xelp_ggtt_pte_encode_bo(bo, bo_offset, pat_index);
+ pte = xelp_ggtt_pte_flags(bo, pat_index);
xe_assert(xe, pat_index <= 3);
@@ -149,8 +146,9 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
xe_tile_assert(ggtt->tile, start < end);
if (ggtt->scratch)
- scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0,
- pat_index);
+ scratch_pte = xe_bo_addr(ggtt->scratch, 0, XE_PAGE_SIZE) |
+ ggtt->pt_ops->pte_encode_flags(ggtt->scratch,
+ pat_index);
else
scratch_pte = 0;
@@ -160,6 +158,22 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
}
}
+/**
+ * xe_ggtt_alloc - Allocate a GGTT for a given &xe_tile
+ * @tile: &xe_tile
+ *
+ * Allocates a &xe_ggtt for a given tile.
+ *
+ * Return: &xe_ggtt on success, or NULL when out of memory.
+ */
+struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile)
+{
+ struct xe_ggtt *ggtt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*ggtt), GFP_KERNEL);
+ if (ggtt)
+ ggtt->tile = tile;
+ return ggtt;
+}
+
static void ggtt_fini_early(struct drm_device *drm, void *arg)
{
struct xe_ggtt *ggtt = arg;
@@ -176,6 +190,13 @@ static void ggtt_fini(void *arg)
ggtt->scratch = NULL;
}
+#ifdef CONFIG_LOCKDEP
+void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
+{
+ might_lock(&ggtt->lock);
+}
+#endif
+
static void primelockdep(struct xe_ggtt *ggtt)
{
if (!IS_ENABLED(CONFIG_LOCKDEP))
@@ -187,20 +208,36 @@ static void primelockdep(struct xe_ggtt *ggtt)
}
static const struct xe_ggtt_pt_ops xelp_pt_ops = {
- .pte_encode_bo = xelp_ggtt_pte_encode_bo,
+ .pte_encode_flags = xelp_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte,
};
static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
- .pte_encode_bo = xelpg_ggtt_pte_encode_bo,
+ .pte_encode_flags = xelpg_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte,
};
static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
- .pte_encode_bo = xelpg_ggtt_pte_encode_bo,
+ .pte_encode_flags = xelpg_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
};
+static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved)
+{
+ drm_mm_init(&ggtt->mm, reserved,
+ ggtt->size - reserved);
+ mutex_init(&ggtt->lock);
+ primelockdep(ggtt);
+}
+
+int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size)
+{
+ ggtt->size = size;
+ __xe_ggtt_init_early(ggtt, reserved);
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_ggtt_init_kunit);
+
static void dev_fini_ggtt(void *arg)
{
struct xe_ggtt *ggtt = arg;
@@ -226,7 +263,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
unsigned int gsm_size;
int err;
- if (IS_SRIOV_VF(xe))
+ if (IS_SRIOV_VF(xe) || GRAPHICS_VERx100(xe) >= 1250)
gsm_size = SZ_8M; /* GGTT is expected to be 4GiB */
else
gsm_size = probe_gsm_size(pdev);
@@ -254,11 +291,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
ggtt->pt_ops = &xelp_pt_ops;
ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
-
- drm_mm_init(&ggtt->mm, xe_wopcm_size(xe),
- ggtt->size - xe_wopcm_size(xe));
- mutex_init(&ggtt->lock);
- primelockdep(ggtt);
+ __xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
if (err)
@@ -269,7 +302,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
return err;
if (IS_SRIOV_VF(xe)) {
- err = xe_gt_sriov_vf_prepare_ggtt(xe_tile_get_gt(ggtt->tile, 0));
+ err = xe_tile_sriov_vf_prepare_ggtt(ggtt->tile);
if (err)
return err;
}
@@ -388,7 +421,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
goto err;
}
- xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, ggtt->scratch->size);
+ xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, xe_bo_size(ggtt->scratch));
xe_ggtt_initial_clear(ggtt);
@@ -440,16 +473,17 @@ static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
}
/**
- * xe_ggtt_node_insert_balloon - prevent allocation of specified GGTT addresses
+ * xe_ggtt_node_insert_balloon_locked - prevent allocation of specified GGTT addresses
* @node: the &xe_ggtt_node to hold reserved GGTT node
* @start: the starting GGTT address of the reserved region
* @end: then end GGTT address of the reserved region
*
- * Use xe_ggtt_node_remove_balloon() to release a reserved GGTT node.
+ * To be used in cases where ggtt->lock is already taken.
+ * Use xe_ggtt_node_remove_balloon_locked() to release a reserved GGTT node.
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
+int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node, u64 start, u64 end)
{
struct xe_ggtt *ggtt = node->ggtt;
int err;
@@ -458,14 +492,13 @@ int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE));
xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE));
xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(&node->base));
+ lockdep_assert_held(&ggtt->lock);
node->base.color = 0;
node->base.start = start;
node->base.size = end - start;
- mutex_lock(&ggtt->lock);
err = drm_mm_reserve_node(&ggtt->mm, &node->base);
- mutex_unlock(&ggtt->lock);
if (xe_gt_WARN(ggtt->tile->primary_gt, err,
"Failed to balloon GGTT %#llx-%#llx (%pe)\n",
@@ -477,27 +510,72 @@ int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
}
/**
- * xe_ggtt_node_remove_balloon - release a reserved GGTT region
+ * xe_ggtt_node_remove_balloon_locked - release a reserved GGTT region
* @node: the &xe_ggtt_node with reserved GGTT region
*
- * See xe_ggtt_node_insert_balloon() for details.
+ * To be used in cases where ggtt->lock is already taken.
+ * See xe_ggtt_node_insert_balloon_locked() for details.
*/
-void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node)
+void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node)
{
- if (!node || !node->ggtt)
+ if (!xe_ggtt_node_allocated(node))
return;
- if (!drm_mm_node_allocated(&node->base))
- goto free_node;
+ lockdep_assert_held(&node->ggtt->lock);
xe_ggtt_dump_node(node->ggtt, &node->base, "remove-balloon");
- mutex_lock(&node->ggtt->lock);
drm_mm_remove_node(&node->base);
- mutex_unlock(&node->ggtt->lock);
+}
-free_node:
- xe_ggtt_node_fini(node);
+static void xe_ggtt_assert_fit(struct xe_ggtt *ggtt, u64 start, u64 size)
+{
+ struct xe_tile *tile = ggtt->tile;
+ struct xe_device *xe = tile_to_xe(tile);
+ u64 __maybe_unused wopcm = xe_wopcm_size(xe);
+
+ xe_tile_assert(tile, start >= wopcm);
+ xe_tile_assert(tile, start + size < ggtt->size - wopcm);
+}
+
+/**
+ * xe_ggtt_shift_nodes_locked - Shift GGTT nodes to adjust for a change in usable address range.
+ * @ggtt: the &xe_ggtt struct instance
+ * @shift: change to the location of area provisioned for current VF
+ *
+ * This function moves all nodes from the GGTT VM, to a temp list. These nodes are expected
+ * to represent allocations in range formerly assigned to current VF, before the range changed.
+ * When the GGTT VM is completely clear of any nodes, they are re-added with shifted offsets.
+ *
+ * The function has no ability of failing - because it shifts existing nodes, without
+ * any additional processing. If the nodes were successfully existing at the old address,
+ * they will do the same at the new one. A fail inside this function would indicate that
+ * the list of nodes was either already damaged, or that the shift brings the address range
+ * outside of valid bounds. Both cases justify an assert rather than error code.
+ */
+void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift)
+{
+ struct xe_tile *tile __maybe_unused = ggtt->tile;
+ struct drm_mm_node *node, *tmpn;
+ LIST_HEAD(temp_list_head);
+
+ lockdep_assert_held(&ggtt->lock);
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
+ drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm)
+ xe_ggtt_assert_fit(ggtt, node->start + shift, node->size);
+
+ drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm) {
+ drm_mm_remove_node(node);
+ list_add(&node->node_list, &temp_list_head);
+ }
+
+ list_for_each_entry_safe(node, tmpn, &temp_list_head, node_list) {
+ list_del(&node->node_list);
+ node->start += shift;
+ drm_mm_reserve_node(&ggtt->mm, node);
+ xe_tile_assert(tile, drm_mm_node_allocated(node));
+ }
}
/**
@@ -548,12 +626,12 @@ int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align)
* xe_ggtt_node_init - Initialize %xe_ggtt_node struct
* @ggtt: the &xe_ggtt where the new node will later be inserted/reserved.
*
- * This function will allocated the struct %xe_ggtt_node and return it's pointer.
+ * This function will allocate the struct %xe_ggtt_node and return its pointer.
* This struct will then be freed after the node removal upon xe_ggtt_node_remove()
- * or xe_ggtt_node_remove_balloon().
+ * or xe_ggtt_node_remove_balloon_locked().
* Having %xe_ggtt_node struct allocated doesn't mean that the node is already allocated
* in GGTT. Only the xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
- * xe_ggtt_node_insert_balloon() will ensure the node is inserted or reserved in GGTT.
+ * xe_ggtt_node_insert_balloon_locked() will ensure the node is inserted or reserved in GGTT.
*
* Return: A pointer to %xe_ggtt_node struct on success. An ERR_PTR otherwise.
**/
@@ -575,7 +653,7 @@ struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt)
* @node: the &xe_ggtt_node to be freed
*
* If anything went wrong with either xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
- * or xe_ggtt_node_insert_balloon(); and this @node is not going to be reused, then,
+ * or xe_ggtt_node_insert_balloon_locked(); and this @node is not going to be reused, then,
* this function needs to be called to free the %xe_ggtt_node struct
**/
void xe_ggtt_node_fini(struct xe_ggtt_node *node)
@@ -600,26 +678,59 @@ bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node)
/**
* xe_ggtt_map_bo - Map the BO into GGTT
* @ggtt: the &xe_ggtt where node will be mapped
+ * @node: the &xe_ggtt_node where this BO is mapped
* @bo: the &xe_bo to be mapped
+ * @pat_index: Which pat_index to use.
*/
-void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
+ struct xe_bo *bo, u16 pat_index)
{
- u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
- u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
- u64 start;
- u64 offset, pte;
- if (XE_WARN_ON(!bo->ggtt_node[ggtt->tile->id]))
+ u64 start, pte, end;
+ struct xe_res_cursor cur;
+
+ if (XE_WARN_ON(!node))
return;
- start = bo->ggtt_node[ggtt->tile->id]->base.start;
+ start = node->base.start;
+ end = start + xe_bo_size(bo);
+
+ pte = ggtt->pt_ops->pte_encode_flags(bo, pat_index);
+ if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
+ xe_assert(xe_bo_device(bo), bo->ttm.ttm);
+
+ for (xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &cur);
+ cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
+ ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
+ pte | xe_res_dma(&cur));
+ } else {
+ /* Prepend GPU offset */
+ pte |= vram_region_gpu_offset(bo->ttm.resource);
- for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
- pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
- ggtt->pt_ops->ggtt_set_pte(ggtt, start + offset, pte);
+ for (xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur);
+ cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
+ ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
+ pte + cur.start);
}
}
+/**
+ * xe_ggtt_map_bo_unlocked - Restore a mapping of a BO into GGTT
+ * @ggtt: the &xe_ggtt where node will be mapped
+ * @bo: the &xe_bo to be mapped
+ *
+ * This is used to restore a GGTT mapping after suspend.
+ */
+void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo)
+{
+ u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
+
+ mutex_lock(&ggtt->lock);
+ xe_ggtt_map_bo(ggtt, bo->ggtt_node[ggtt->tile->id], bo, pat_index);
+ mutex_unlock(&ggtt->lock);
+}
+
static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end)
{
@@ -632,7 +743,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
if (XE_WARN_ON(bo->ggtt_node[tile_id])) {
/* Someone's already inserted this BO in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo));
return 0;
}
@@ -651,12 +762,15 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
mutex_lock(&ggtt->lock);
err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base,
- bo->size, alignment, 0, start, end, 0);
+ xe_bo_size(bo), alignment, 0, start, end, 0);
if (err) {
xe_ggtt_node_fini(bo->ggtt_node[tile_id]);
bo->ggtt_node[tile_id] = NULL;
} else {
- xe_ggtt_map_bo(ggtt, bo);
+ u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
+
+ xe_ggtt_map_bo(ggtt, bo->ggtt_node[tile_id], bo, pat_index);
}
mutex_unlock(&ggtt->lock);
@@ -709,7 +823,7 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
return;
/* This BO is not currently in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo));
xe_ggtt_node_remove(bo->ggtt_node[tile_id],
bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
@@ -852,3 +966,30 @@ u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer
return total;
}
+
+/**
+ * xe_ggtt_encode_pte_flags - Get PTE encoding flags for BO
+ * @ggtt: &xe_ggtt
+ * @bo: &xe_bo
+ * @pat_index: The pat_index for the PTE.
+ *
+ * This function returns the pte_flags for a given BO, without address.
+ * It's used for DPT to fill a GGTT mapped BO with a linear lookup table.
+ */
+u64 xe_ggtt_encode_pte_flags(struct xe_ggtt *ggtt,
+ struct xe_bo *bo, u16 pat_index)
+{
+ return ggtt->pt_ops->pte_encode_flags(bo, pat_index);
+}
+
+/**
+ * xe_ggtt_read_pte - Read a PTE from the GGTT
+ * @ggtt: &xe_ggtt
+ * @offset: the offset for which the mapping should be read.
+ *
+ * Used by testcases, and by display reading out an inherited bios FB.
+ */
+u64 xe_ggtt_read_pte(struct xe_ggtt *ggtt, u64 offset)
+{
+ return ioread64(ggtt->gsm + (offset / XE_PAGE_SIZE));
+}
diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
index 27e7d67de004..fbe1e397d05d 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.h
+++ b/drivers/gpu/drm/xe/xe_ggtt.h
@@ -9,22 +9,28 @@
#include "xe_ggtt_types.h"
struct drm_printer;
+struct xe_tile;
+struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile);
int xe_ggtt_init_early(struct xe_ggtt *ggtt);
+int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size);
int xe_ggtt_init(struct xe_ggtt *ggtt);
struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt);
void xe_ggtt_node_fini(struct xe_ggtt_node *node);
-int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node,
- u64 start, u64 size);
-void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node);
+int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node,
+ u64 start, u64 size);
+void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node);
+void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift);
int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align);
int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
u32 size, u32 align, u32 mm_flags);
void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate);
bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node);
-void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
+void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
+ struct xe_bo *bo, u16 pat_index);
+void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end);
@@ -38,4 +44,14 @@ u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer
void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid);
#endif
+#ifndef CONFIG_LOCKDEP
+static inline void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
+{ }
+#else
+void xe_ggtt_might_lock(struct xe_ggtt *ggtt);
+#endif
+
+u64 xe_ggtt_encode_pte_flags(struct xe_ggtt *ggtt, struct xe_bo *bo, u16 pat_index);
+u64 xe_ggtt_read_pte(struct xe_ggtt *ggtt, u64 offset);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h
index cb02b7994a9a..c5e999d58ff2 100644
--- a/drivers/gpu/drm/xe/xe_ggtt_types.h
+++ b/drivers/gpu/drm/xe/xe_ggtt_types.h
@@ -74,8 +74,8 @@ struct xe_ggtt_node {
* Which can vary from platform to platform.
*/
struct xe_ggtt_pt_ops {
- /** @pte_encode_bo: Encode PTE address for a given BO */
- u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index);
+ /** @pte_encode_flags: Encode PTE flags for a given BO */
+ u64 (*pte_encode_flags)(struct xe_bo *bo, u16 pat_index);
/** @ggtt_set_pte: Directly write into GGTT's PTE */
void (*ggtt_set_pte)(struct xe_ggtt *ggtt, u64 addr, u64 pte);
};
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index 0bcf97063ff6..1d84bf2f2cef 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -59,7 +59,8 @@ static int memcpy_fw(struct xe_gsc *gsc)
xe_map_memcpy_from(xe, storage, &gsc->fw.bo->vmap, 0, fw_size);
xe_map_memcpy_to(xe, &gsc->private->vmap, 0, storage, fw_size);
- xe_map_memset(xe, &gsc->private->vmap, fw_size, 0, gsc->private->size - fw_size);
+ xe_map_memset(xe, &gsc->private->vmap, fw_size, 0,
+ xe_bo_size(gsc->private) - fw_size);
kfree(storage);
@@ -82,7 +83,8 @@ static int emit_gsc_upload(struct xe_gsc *gsc)
bb->cs[bb->len++] = GSC_FW_LOAD;
bb->cs[bb->len++] = lower_32_bits(offset);
bb->cs[bb->len++] = upper_32_bits(offset);
- bb->cs[bb->len++] = (gsc->private->size / SZ_4K) | GSC_FW_LOAD_LIMIT_VALID;
+ bb->cs[bb->len++] = (xe_bo_size(gsc->private) / SZ_4K) |
+ GSC_FW_LOAD_LIMIT_VALID;
job = xe_bb_create_job(gsc->q, bb);
if (IS_ERR(job)) {
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index d0519cd6704a..464282a89eef 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -23,6 +23,7 @@
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_pm.h"
+#include "xe_tile.h"
/*
* GSC proxy:
@@ -483,7 +484,7 @@ int xe_gsc_proxy_init(struct xe_gsc *gsc)
}
/* no multi-tile devices with this feature yet */
- if (tile->id > 0) {
+ if (!xe_tile_is_root(tile)) {
xe_gt_err(gt, "unexpected GSC proxy init on tile %u\n", tile->id);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index e3517ce2e18c..c8eda36546d3 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -112,7 +112,7 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
if (!fw_ref)
return;
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
reg |= CG_DIS_CNTLBUS;
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
@@ -146,30 +146,23 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
static void gt_reset_worker(struct work_struct *w);
-static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
+static int emit_job_sync(struct xe_exec_queue *q, struct xe_bb *bb,
+ long timeout_jiffies)
{
struct xe_sched_job *job;
- struct xe_bb *bb;
struct dma_fence *fence;
long timeout;
- bb = xe_bb_new(gt, 4, false);
- if (IS_ERR(bb))
- return PTR_ERR(bb);
-
job = xe_bb_create_job(q, bb);
- if (IS_ERR(job)) {
- xe_bb_free(bb, NULL);
+ if (IS_ERR(job))
return PTR_ERR(job);
- }
xe_sched_job_arm(job);
fence = dma_fence_get(&job->drm.s_fence->finished);
xe_sched_job_push(job);
- timeout = dma_fence_wait_timeout(fence, false, HZ);
+ timeout = dma_fence_wait_timeout(fence, false, timeout_jiffies);
dma_fence_put(fence);
- xe_bb_free(bb, NULL);
if (timeout < 0)
return timeout;
else if (!timeout)
@@ -178,27 +171,30 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
return 0;
}
+static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
+{
+ struct xe_bb *bb;
+ int ret;
+
+ bb = xe_bb_new(gt, 4, false);
+ if (IS_ERR(bb))
+ return PTR_ERR(bb);
+
+ ret = emit_job_sync(q, bb, HZ);
+ xe_bb_free(bb, NULL);
+
+ return ret;
+}
+
static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
{
struct xe_reg_sr *sr = &q->hwe->reg_lrc;
struct xe_reg_sr_entry *entry;
+ int count_rmw = 0, count = 0, ret;
unsigned long idx;
- struct xe_sched_job *job;
struct xe_bb *bb;
- struct dma_fence *fence;
- long timeout;
- int count_rmw = 0;
- int count = 0;
-
- if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
- /* Big enough to emit all of the context's 3DSTATE */
- bb = xe_bb_new(gt, xe_gt_lrc_size(gt, q->hwe->class), false);
- else
- /* Just pick a large BB size */
- bb = xe_bb_new(gt, SZ_4K, false);
-
- if (IS_ERR(bb))
- return PTR_ERR(bb);
+ size_t bb_len = 0;
+ u32 *cs;
/* count RMW registers as those will be handled separately */
xa_for_each(&sr->xa, idx, entry) {
@@ -208,13 +204,34 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
++count_rmw;
}
- if (count || count_rmw)
- xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
+ if (count)
+ bb_len += count * 2 + 1;
+
+ if (count_rmw)
+ bb_len += count_rmw * 20 + 7;
+
+ if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
+ /*
+ * Big enough to emit all of the context's 3DSTATE via
+ * xe_lrc_emit_hwe_state_instructions()
+ */
+ bb_len += xe_gt_lrc_size(gt, q->hwe->class) / sizeof(u32);
+
+ xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", q->hwe->name, bb_len);
+
+ bb = xe_bb_new(gt, bb_len, false);
+ if (IS_ERR(bb))
+ return PTR_ERR(bb);
+
+ cs = bb->cs;
if (count) {
- /* emit single LRI with all non RMW regs */
+ /*
+ * Emit single LRI with all non RMW regs: 1 leading dw + 2dw per
+ * reg + 1
+ */
- bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
+ *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
xa_for_each(&sr->xa, idx, entry) {
struct xe_reg reg = entry->reg;
@@ -229,79 +246,68 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
val |= entry->set_bits;
- bb->cs[bb->len++] = reg.addr;
- bb->cs[bb->len++] = val;
+ *cs++ = reg.addr;
+ *cs++ = val;
xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
}
}
if (count_rmw) {
- /* emit MI_MATH for each RMW reg */
+ /* Emit MI_MATH for each RMW reg: 20dw per reg + 7 trailing dw */
xa_for_each(&sr->xa, idx, entry) {
if (entry->reg.masked || entry->clr_bits == ~0)
continue;
- bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
- bb->cs[bb->len++] = entry->reg.addr;
- bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
-
- bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
- MI_LRI_LRM_CS_MMIO;
- bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr;
- bb->cs[bb->len++] = entry->clr_bits;
- bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr;
- bb->cs[bb->len++] = entry->set_bits;
-
- bb->cs[bb->len++] = MI_MATH(8);
- bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0);
- bb->cs[bb->len++] = CS_ALU_INSTR_LOADINV(SRCB, REG1);
- bb->cs[bb->len++] = CS_ALU_INSTR_AND;
- bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU);
- bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0);
- bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCB, REG2);
- bb->cs[bb->len++] = CS_ALU_INSTR_OR;
- bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU);
-
- bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO;
- bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
- bb->cs[bb->len++] = entry->reg.addr;
+ *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
+ *cs++ = entry->reg.addr;
+ *cs++ = CS_GPR_REG(0, 0).addr;
+
+ *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = CS_GPR_REG(0, 1).addr;
+ *cs++ = entry->clr_bits;
+ *cs++ = CS_GPR_REG(0, 2).addr;
+ *cs++ = entry->set_bits;
+
+ *cs++ = MI_MATH(8);
+ *cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0);
+ *cs++ = CS_ALU_INSTR_LOADINV(SRCB, REG1);
+ *cs++ = CS_ALU_INSTR_AND;
+ *cs++ = CS_ALU_INSTR_STORE(REG0, ACCU);
+ *cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0);
+ *cs++ = CS_ALU_INSTR_LOAD(SRCB, REG2);
+ *cs++ = CS_ALU_INSTR_OR;
+ *cs++ = CS_ALU_INSTR_STORE(REG0, ACCU);
+
+ *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO;
+ *cs++ = CS_GPR_REG(0, 0).addr;
+ *cs++ = entry->reg.addr;
xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n",
entry->reg.addr, entry->clr_bits, entry->set_bits);
}
/* reset used GPR */
- bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) | MI_LRI_LRM_CS_MMIO;
- bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
- bb->cs[bb->len++] = 0;
- bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr;
- bb->cs[bb->len++] = 0;
- bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr;
- bb->cs[bb->len++] = 0;
+ *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = CS_GPR_REG(0, 0).addr;
+ *cs++ = 0;
+ *cs++ = CS_GPR_REG(0, 1).addr;
+ *cs++ = 0;
+ *cs++ = CS_GPR_REG(0, 2).addr;
+ *cs++ = 0;
}
- xe_lrc_emit_hwe_state_instructions(q, bb);
+ cs = xe_lrc_emit_hwe_state_instructions(q, cs);
- job = xe_bb_create_job(q, bb);
- if (IS_ERR(job)) {
- xe_bb_free(bb, NULL);
- return PTR_ERR(job);
- }
+ bb->len = cs - bb->cs;
- xe_sched_job_arm(job);
- fence = dma_fence_get(&job->drm.s_fence->finished);
- xe_sched_job_push(job);
+ ret = emit_job_sync(q, bb, HZ);
- timeout = dma_fence_wait_timeout(fence, false, HZ);
- dma_fence_put(fence);
xe_bb_free(bb, NULL);
- if (timeout < 0)
- return timeout;
- else if (!timeout)
- return -ETIME;
- return 0;
+ return ret;
}
int xe_gt_record_default_lrcs(struct xe_gt *gt)
@@ -363,14 +369,6 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
goto put_nop_q;
}
- /* Reload golden LRC to record the effect of any indirect W/A */
- err = emit_nop_job(gt, q);
- if (err) {
- xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
- hwe->name, ERR_PTR(err), q->guc->id);
- goto put_nop_q;
- }
-
xe_map_memcpy_from(xe, default_lrc,
&q->lrc[0]->bo->vmap,
xe_lrc_pphwsp_offset(q->lrc[0]),
@@ -390,6 +388,7 @@ put_exec_queue:
int xe_gt_init_early(struct xe_gt *gt)
{
+ unsigned int fw_ref;
int err;
if (IS_SRIOV_PF(gt_to_xe(gt))) {
@@ -419,6 +418,25 @@ int xe_gt_init_early(struct xe_gt *gt)
xe_mocs_init_early(gt);
+ /*
+ * Only after this point can GT-specific MMIO operations
+ * (including things like communication with the GuC)
+ * be performed.
+ */
+ xe_gt_mmio_init(gt);
+
+ err = xe_uc_init_noalloc(&gt->uc);
+ if (err)
+ return err;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
+
+ xe_gt_mcr_init_early(gt);
+ xe_pat_init(gt);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
return 0;
}
@@ -433,7 +451,7 @@ static void dump_pat_on_error(struct xe_gt *gt)
xe_pat_dump(gt, &p);
}
-static int gt_fw_domain_init(struct xe_gt *gt)
+static int gt_init_with_gt_forcewake(struct xe_gt *gt)
{
unsigned int fw_ref;
int err;
@@ -442,7 +460,15 @@ static int gt_fw_domain_init(struct xe_gt *gt)
if (!fw_ref)
return -ETIMEDOUT;
- if (!xe_gt_is_media_type(gt)) {
+ err = xe_uc_init(&gt->uc);
+ if (err)
+ goto err_force_wake;
+
+ xe_gt_topology_init(gt);
+ xe_gt_mcr_init(gt);
+ xe_gt_enable_host_l2_vram(gt);
+
+ if (xe_gt_is_main_type(gt)) {
err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
if (err)
goto err_force_wake;
@@ -457,8 +483,10 @@ static int gt_fw_domain_init(struct xe_gt *gt)
xe_gt_mcr_init(gt);
err = xe_hw_engines_init_early(gt);
- if (err)
+ if (err) {
+ dump_pat_on_error(gt);
goto err_force_wake;
+ }
err = xe_hw_engine_class_sysfs_init(gt);
if (err)
@@ -479,13 +507,12 @@ static int gt_fw_domain_init(struct xe_gt *gt)
return 0;
err_force_wake:
- dump_pat_on_error(gt);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
return err;
}
-static int all_fw_domain_init(struct xe_gt *gt)
+static int gt_init_with_all_forcewake(struct xe_gt *gt)
{
unsigned int fw_ref;
int err;
@@ -518,7 +545,7 @@ static int all_fw_domain_init(struct xe_gt *gt)
if (err)
goto err_force_wake;
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
/*
* USM has its only SA pool to non-block behind user operations
*/
@@ -534,7 +561,7 @@ static int all_fw_domain_init(struct xe_gt *gt)
}
}
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
struct xe_tile *tile = gt_to_tile(gt);
tile->migrate = xe_migrate_init(tile);
@@ -544,7 +571,7 @@ static int all_fw_domain_init(struct xe_gt *gt)
}
}
- err = xe_uc_init_hw(&gt->uc);
+ err = xe_uc_load_hw(&gt->uc);
if (err)
goto err_force_wake;
@@ -554,7 +581,7 @@ static int all_fw_domain_init(struct xe_gt *gt)
xe_gt_apply_ccs_mode(gt);
}
- if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
+ if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
if (IS_SRIOV_PF(gt_to_xe(gt))) {
@@ -572,39 +599,6 @@ err_force_wake:
return err;
}
-/*
- * Initialize enough GT to be able to load GuC in order to obtain hwconfig and
- * enable CTB communication.
- */
-int xe_gt_init_hwconfig(struct xe_gt *gt)
-{
- unsigned int fw_ref;
- int err;
-
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (!fw_ref)
- return -ETIMEDOUT;
-
- xe_gt_mcr_init_early(gt);
- xe_pat_init(gt);
-
- err = xe_uc_init(&gt->uc);
- if (err)
- goto out_fw;
-
- err = xe_uc_init_hwconfig(&gt->uc);
- if (err)
- goto out_fw;
-
- xe_gt_topology_init(gt);
- xe_gt_mcr_init(gt);
- xe_gt_enable_host_l2_vram(gt);
-
-out_fw:
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- return err;
-}
-
static void xe_gt_fini(void *arg)
{
struct xe_gt *gt = arg;
@@ -636,7 +630,7 @@ int xe_gt_init(struct xe_gt *gt)
if (err)
return err;
- err = gt_fw_domain_init(gt);
+ err = gt_init_with_gt_forcewake(gt);
if (err)
return err;
@@ -654,7 +648,7 @@ int xe_gt_init(struct xe_gt *gt)
xe_force_wake_init_engines(gt, gt_to_fw(gt));
- err = all_fw_domain_init(gt);
+ err = gt_init_with_all_forcewake(gt);
if (err)
return err;
@@ -742,7 +736,7 @@ static int vf_gt_restart(struct xe_gt *gt)
if (err)
return err;
- err = xe_uc_init_hw(&gt->uc);
+ err = xe_uc_load_hw(&gt->uc);
if (err)
return err;
@@ -780,11 +774,11 @@ static int do_gt_restart(struct xe_gt *gt)
if (err)
return err;
- err = xe_uc_init_hw(&gt->uc);
+ err = xe_uc_load_hw(&gt->uc);
if (err)
return err;
- if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
+ if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
if (IS_SRIOV_PF(gt_to_xe(gt)))
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 6357325f3939..41880979f4de 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -28,7 +28,6 @@ static inline bool xe_fault_inject_gt_reset(void)
}
struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
-int xe_gt_init_hwconfig(struct xe_gt *gt);
int xe_gt_init_early(struct xe_gt *gt);
int xe_gt_init(struct xe_gt *gt);
void xe_gt_mmio_init(struct xe_gt *gt);
@@ -107,6 +106,11 @@ static inline bool xe_gt_has_indirect_ring_state(struct xe_gt *gt)
xe_device_uc_enabled(gt_to_xe(gt));
}
+static inline bool xe_gt_is_main_type(struct xe_gt *gt)
+{
+ return gt->info.type == XE_GT_TYPE_MAIN;
+}
+
static inline bool xe_gt_is_media_type(struct xe_gt *gt)
{
return gt->info.type == XE_GT_TYPE_MEDIA;
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 119a55bb7580..848618acdca8 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -122,24 +122,6 @@ static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
return ret;
}
-static int force_reset(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_reset_async(gt);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
-static int force_reset_sync(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_reset(gt);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
static int sa_info(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_tile *tile = gt_to_tile(gt);
@@ -306,8 +288,6 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
* - without access to the PF specific data
*/
static const struct drm_info_list vf_safe_debugfs_list[] = {
- {"force_reset", .show = xe_gt_debugfs_simple_show, .data = force_reset},
- {"force_reset_sync", .show = xe_gt_debugfs_simple_show, .data = force_reset_sync},
{"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info},
{"topology", .show = xe_gt_debugfs_simple_show, .data = topology},
{"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt},
@@ -332,6 +312,78 @@ static const struct drm_info_list pf_only_debugfs_list[] = {
{"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
};
+static ssize_t write_to_gt_call(const char __user *userbuf, size_t count, loff_t *ppos,
+ void (*call)(struct xe_gt *), struct xe_gt *gt)
+{
+ bool yes;
+ int ret;
+
+ if (*ppos)
+ return -EINVAL;
+ ret = kstrtobool_from_user(userbuf, count, &yes);
+ if (ret < 0)
+ return ret;
+ if (yes)
+ call(gt);
+ return count;
+}
+
+static void force_reset(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_pm_runtime_get(xe);
+ xe_gt_reset_async(gt);
+ xe_pm_runtime_put(xe);
+}
+
+static ssize_t force_reset_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct xe_gt *gt = s->private;
+
+ return write_to_gt_call(userbuf, count, ppos, force_reset, gt);
+}
+
+static int force_reset_show(struct seq_file *s, void *unused)
+{
+ struct xe_gt *gt = s->private;
+
+ force_reset(gt); /* to be deprecated! */
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(force_reset);
+
+static void force_reset_sync(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_pm_runtime_get(xe);
+ xe_gt_reset(gt);
+ xe_pm_runtime_put(xe);
+}
+
+static ssize_t force_reset_sync_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct xe_gt *gt = s->private;
+
+ return write_to_gt_call(userbuf, count, ppos, force_reset_sync, gt);
+}
+
+static int force_reset_sync_show(struct seq_file *s, void *unused)
+{
+ struct xe_gt *gt = s->private;
+
+ force_reset_sync(gt); /* to be deprecated! */
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(force_reset_sync);
+
void xe_gt_debugfs_register(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -355,6 +407,10 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
*/
root->d_inode->i_private = gt;
+ /* VF safe */
+ debugfs_create_file("force_reset", 0600, root, gt, &force_reset_fops);
+ debugfs_create_file("force_reset_sync", 0600, root, gt, &force_reset_sync_fops);
+
drm_debugfs_create_files(vf_safe_debugfs_list,
ARRAY_SIZE(vf_safe_debugfs_list),
root, minor);
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index c11206410a4d..ffb210216aa9 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -121,7 +121,7 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
if (vcs_mask || vecs_mask)
gtidle->powergate_enable = MEDIA_POWERGATE_ENABLE;
- if (!xe_gt_is_media_type(gt))
+ if (xe_gt_is_main_type(gt))
gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE;
if (xe->info.platform != XE_DG1) {
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index d4d9730f0d2c..64a2f0d6aaf9 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -420,12 +420,6 @@ static void init_steering_sqidi_psmi(struct xe_gt *gt)
gt->steering[SQIDI_PSMI].instance_target = select & 0x1;
}
-static void init_steering_inst0(struct xe_gt *gt)
-{
- gt->steering[INSTANCE0].group_target = 0; /* unused */
- gt->steering[INSTANCE0].instance_target = 0; /* unused */
-}
-
static const struct {
const char *name;
void (*init)(struct xe_gt *gt);
@@ -436,7 +430,7 @@ static const struct {
[DSS] = { "DSS", init_steering_dss },
[OADDRM] = { "OADDRM / GPMXMT", init_steering_oaddrm },
[SQIDI_PSMI] = { "SQIDI_PSMI", init_steering_sqidi_psmi },
- [INSTANCE0] = { "INSTANCE 0", init_steering_inst0 },
+ [INSTANCE0] = { "INSTANCE 0", NULL },
[IMPLICIT_STEERING] = { "IMPLICIT", NULL },
};
@@ -446,25 +440,17 @@ static const struct {
*
* Perform early software only initialization of the MCR lock to allow
* the synchronization on accessing the STEER_SEMAPHORE register and
- * use the xe_gt_mcr_multicast_write() function.
+ * use the xe_gt_mcr_multicast_write() function, plus the minimum
+ * safe MCR registers required for VRAM/CCS probing.
*/
void xe_gt_mcr_init_early(struct xe_gt *gt)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
BUILD_BUG_ON(IMPLICIT_STEERING + 1 != NUM_STEERING_TYPES);
BUILD_BUG_ON(ARRAY_SIZE(xe_steering_types) != NUM_STEERING_TYPES);
spin_lock_init(&gt->mcr_lock);
-}
-
-/**
- * xe_gt_mcr_init - Normal initialization of the MCR support
- * @gt: GT structure
- *
- * Perform normal initialization of the MCR for all usages.
- */
-void xe_gt_mcr_init(struct xe_gt *gt)
-{
- struct xe_device *xe = gt_to_xe(gt);
if (IS_SRIOV_VF(xe))
return;
@@ -505,10 +491,27 @@ void xe_gt_mcr_init(struct xe_gt *gt)
}
}
+ /* Mark instance 0 as initialized, we need this early for VRAM and CCS probe. */
+ gt->steering[INSTANCE0].initialized = true;
+}
+
+/**
+ * xe_gt_mcr_init - Normal initialization of the MCR support
+ * @gt: GT structure
+ *
+ * Perform normal initialization of the MCR for all usages.
+ */
+void xe_gt_mcr_init(struct xe_gt *gt)
+{
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
+
/* Select non-terminated steering target for each type */
- for (int i = 0; i < NUM_STEERING_TYPES; i++)
+ for (int i = 0; i < NUM_STEERING_TYPES; i++) {
+ gt->steering[i].initialized = true;
if (gt->steering[i].ranges && xe_steering_types[i].init)
xe_steering_types[i].init(gt);
+ }
}
/**
@@ -570,6 +573,10 @@ bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
for (int i = 0; gt->steering[type].ranges[i].end > 0; i++) {
if (xe_mmio_in_range(&gt->mmio, &gt->steering[type].ranges[i], reg)) {
+ drm_WARN(&gt_to_xe(gt)->drm, !gt->steering[type].initialized,
+ "Uninitialized usage of MCR register %s/%#x\n",
+ xe_steering_types[type].name, reg.addr);
+
*group = gt->steering[type].group_target;
*instance = gt->steering[type].instance_target;
return true;
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 6717a636b1d9..5a75d56d8558 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -14,6 +14,7 @@
#include "abi/guc_actions_abi.h"
#include "xe_bo.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_gt_stats.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
@@ -68,31 +69,8 @@ static bool access_is_atomic(enum access_type access_type)
static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
{
- return BIT(tile->id) & vma->tile_present &&
- !(BIT(tile->id) & vma->tile_invalidated);
-}
-
-static bool vma_matches(struct xe_vma *vma, u64 page_addr)
-{
- if (page_addr > xe_vma_end(vma) - 1 ||
- page_addr + SZ_4K - 1 < xe_vma_start(vma))
- return false;
-
- return true;
-}
-
-static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr)
-{
- struct xe_vma *vma = NULL;
-
- if (vm->usm.last_fault_vma) { /* Fast lookup */
- if (vma_matches(vm->usm.last_fault_vma, page_addr))
- vma = vm->usm.last_fault_vma;
- }
- if (!vma)
- vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
-
- return vma;
+ return xe_vm_has_valid_gpu_mapping(tile, vma->tile_present,
+ vma->tile_invalidated);
}
static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
@@ -143,7 +121,7 @@ static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
trace_xe_vma_pagefault(vma);
- /* Check if VMA is valid */
+ /* Check if VMA is valid, opportunistic check only */
if (vma_is_valid(tile, vma) && !atomic)
return 0;
@@ -180,7 +158,6 @@ retry_userptr:
dma_fence_wait(fence, false);
dma_fence_put(fence);
- vma->tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
drm_exec_fini(&exec);
@@ -231,7 +208,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
goto unlock_vm;
}
- vma = lookup_vma(vm, pf->page_addr);
+ vma = xe_vm_find_vma_by_addr(vm, pf->page_addr);
if (!vma) {
err = -EINVAL;
goto unlock_vm;
@@ -266,22 +243,22 @@ static int send_pagefault_reply(struct xe_guc *guc,
return xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
}
-static void print_pagefault(struct xe_device *xe, struct pagefault *pf)
+static void print_pagefault(struct xe_gt *gt, struct pagefault *pf)
{
- drm_dbg(&xe->drm, "\n\tASID: %d\n"
- "\tVFID: %d\n"
- "\tPDATA: 0x%04x\n"
- "\tFaulted Address: 0x%08x%08x\n"
- "\tFaultType: %d\n"
- "\tAccessType: %d\n"
- "\tFaultLevel: %d\n"
- "\tEngineClass: %d %s\n"
- "\tEngineInstance: %d\n",
- pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
- lower_32_bits(pf->page_addr),
- pf->fault_type, pf->access_type, pf->fault_level,
- pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class),
- pf->engine_instance);
+ xe_gt_dbg(gt, "\n\tASID: %d\n"
+ "\tVFID: %d\n"
+ "\tPDATA: 0x%04x\n"
+ "\tFaulted Address: 0x%08x%08x\n"
+ "\tFaultType: %d\n"
+ "\tAccessType: %d\n"
+ "\tFaultLevel: %d\n"
+ "\tEngineClass: %d %s\n"
+ "\tEngineInstance: %d\n",
+ pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
+ lower_32_bits(pf->page_addr),
+ pf->fault_type, pf->access_type, pf->fault_level,
+ pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class),
+ pf->engine_instance);
}
#define PF_MSG_LEN_DW 4
@@ -333,7 +310,6 @@ static bool pf_queue_full(struct pf_queue *pf_queue)
int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = gt_to_xe(gt);
struct pf_queue *pf_queue;
unsigned long flags;
u32 asid;
@@ -358,7 +334,7 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
pf_queue->num_dw;
queue_work(gt->usm.pf_wq, &pf_queue->worker);
} else {
- drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
+ xe_gt_warn(gt, "PageFault Queue full, shouldn't be possible\n");
}
spin_unlock_irqrestore(&pf_queue->lock, flags);
@@ -371,7 +347,6 @@ static void pf_queue_work_func(struct work_struct *w)
{
struct pf_queue *pf_queue = container_of(w, struct pf_queue, worker);
struct xe_gt *gt = pf_queue->gt;
- struct xe_device *xe = gt_to_xe(gt);
struct xe_guc_pagefault_reply reply = {};
struct pagefault pf = {};
unsigned long threshold;
@@ -382,9 +357,9 @@ static void pf_queue_work_func(struct work_struct *w)
while (get_pagefault(pf_queue, &pf)) {
ret = handle_pagefault(gt, &pf);
if (unlikely(ret)) {
- print_pagefault(xe, &pf);
+ print_pagefault(gt, &pf);
pf.fault_unsuccessful = 1;
- drm_dbg(&xe->drm, "Fault response: Unsuccessful %d\n", ret);
+ xe_gt_dbg(gt, "Fault response: Unsuccessful %pe\n", ERR_PTR(ret));
}
reply.dw0 = FIELD_PREP(PFR_VALID, 1) |
@@ -538,21 +513,21 @@ static int sub_granularity_in_byte(int val)
return (granularity_in_byte(val) / 32);
}
-static void print_acc(struct xe_device *xe, struct acc *acc)
+static void print_acc(struct xe_gt *gt, struct acc *acc)
{
- drm_warn(&xe->drm, "Access counter request:\n"
- "\tType: %s\n"
- "\tASID: %d\n"
- "\tVFID: %d\n"
- "\tEngine: %d:%d\n"
- "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
- "\tSub_Granularity Vector: 0x%08x\n"
- "\tVA Range base: 0x%016llx\n",
- acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
- acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
- granularity_in_byte(acc->granularity) / SZ_1K,
- sub_granularity_in_byte(acc->granularity) / SZ_1K,
- acc->sub_granularity, acc->va_range_base);
+ xe_gt_warn(gt, "Access counter request:\n"
+ "\tType: %s\n"
+ "\tASID: %d\n"
+ "\tVFID: %d\n"
+ "\tEngine: %d:%d\n"
+ "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
+ "\tSub_Granularity Vector: 0x%08x\n"
+ "\tVA Range base: 0x%016llx\n",
+ acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
+ acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
+ granularity_in_byte(acc->granularity) / SZ_1K,
+ sub_granularity_in_byte(acc->granularity) / SZ_1K,
+ acc->sub_granularity, acc->va_range_base);
}
static struct xe_vma *get_acc_vma(struct xe_vm *vm, struct acc *acc)
@@ -650,7 +625,6 @@ static void acc_queue_work_func(struct work_struct *w)
{
struct acc_queue *acc_queue = container_of(w, struct acc_queue, worker);
struct xe_gt *gt = acc_queue->gt;
- struct xe_device *xe = gt_to_xe(gt);
struct acc acc = {};
unsigned long threshold;
int ret;
@@ -660,8 +634,8 @@ static void acc_queue_work_func(struct work_struct *w)
while (get_acc(acc_queue, &acc)) {
ret = handle_acc(gt, &acc);
if (unlikely(ret)) {
- print_acc(xe, &acc);
- drm_warn(&xe->drm, "ACC: Unsuccessful %d\n", ret);
+ print_acc(gt, &acc);
+ xe_gt_warn(gt, "ACC: Unsuccessful %pe\n", ERR_PTR(ret));
}
if (time_after(jiffies, threshold) &&
@@ -706,7 +680,7 @@ int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW;
queue_work(gt->usm.acc_wq, &acc_queue->worker);
} else {
- drm_warn(&gt_to_xe(gt)->drm, "ACC Queue full, dropping ACC");
+ xe_gt_warn(gt, "ACC Queue full, dropping ACC\n");
}
spin_unlock(&acc_queue->lock);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 53a44702c04a..494909f74eb2 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -104,13 +104,13 @@ static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs
}
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
- struct drm_printer p = xe_gt_info_printer(gt);
+ struct drm_printer p = xe_gt_dbg_printer(gt);
void *klvs = xe_guc_buf_cpu_ptr(buf);
char name[8];
- xe_gt_sriov_info(gt, "pushed %s config with %u KLV%s:\n",
- xe_sriov_function_name(vfid, name, sizeof(name)),
- num_klvs, str_plural(num_klvs));
+ xe_gt_sriov_dbg(gt, "pushed %s config with %u KLV%s:\n",
+ xe_sriov_function_name(vfid, name, sizeof(name)),
+ num_klvs, str_plural(num_klvs));
xe_guc_klv_print(klvs, num_dwords, &p);
}
@@ -238,26 +238,35 @@ static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned i
}
/* Return: number of configuration dwords written */
-static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
+static u32 encode_ggtt(u32 *cfg, u64 start, u64 size, bool details)
{
u32 n = 0;
- if (xe_ggtt_node_allocated(config->ggtt_region)) {
- if (details) {
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
- cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
- cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
- }
-
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
- cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
- cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
+ if (details) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
+ cfg[n++] = lower_32_bits(start);
+ cfg[n++] = upper_32_bits(start);
}
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
+ cfg[n++] = lower_32_bits(size);
+ cfg[n++] = upper_32_bits(size);
+
return n;
}
/* Return: number of configuration dwords written */
+static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
+{
+ struct xe_ggtt_node *node = config->ggtt_region;
+
+ if (!xe_ggtt_node_allocated(node))
+ return 0;
+
+ return encode_ggtt(cfg, node->base.start, node->base.size, details);
+}
+
+/* Return: number of configuration dwords written */
static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
{
u32 n = 0;
@@ -282,8 +291,8 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool
if (config->lmem_obj) {
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
- cfg[n++] = lower_32_bits(config->lmem_obj->size);
- cfg[n++] = upper_32_bits(config->lmem_obj->size);
+ cfg[n++] = lower_32_bits(xe_bo_size(config->lmem_obj));
+ cfg[n++] = upper_32_bits(xe_bo_size(config->lmem_obj));
}
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
@@ -332,6 +341,17 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
}
xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
+ if (vfid == PFID) {
+ u64 ggtt_start = xe_wopcm_size(gt_to_xe(gt));
+ u64 ggtt_size = gt_to_tile(gt)->mem.ggtt->size - ggtt_start;
+
+ /* plain PF config data will never include a real GGTT region */
+ xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
+
+ /* fake PF GGTT config covers full GGTT range except reserved WOPCM */
+ num_dwords += encode_ggtt(cfg + num_dwords, ggtt_start, ggtt_size, true);
+ }
+
num_klvs = xe_guc_klv_count(cfg, num_dwords);
err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
@@ -376,7 +396,7 @@ static u64 pf_get_spare_ggtt(struct xe_gt *gt)
{
u64 spare;
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
@@ -388,7 +408,7 @@ static u64 pf_get_spare_ggtt(struct xe_gt *gt)
static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
{
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
@@ -443,7 +463,7 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
int err;
xe_gt_assert(gt, vfid);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
size = round_up(size, alignment);
@@ -492,7 +512,7 @@ static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
struct xe_ggtt_node *node = config->ggtt_region;
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
return xe_ggtt_node_allocated(node) ? node->base.size : 0;
}
@@ -560,7 +580,7 @@ int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size
{
int err;
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
if (vfid)
@@ -622,7 +642,7 @@ int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
int err = 0;
xe_gt_assert(gt, vfid);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
if (!num_vfs)
return 0;
@@ -693,7 +713,7 @@ int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, num_vfs);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
fair = pf_estimate_fair_ggtt(gt, num_vfs);
@@ -1299,7 +1319,7 @@ static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
struct xe_bo *bo;
bo = config->lmem_obj;
- return bo ? bo->size : 0;
+ return bo ? xe_bo_size(bo) : 0;
}
static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
@@ -1327,7 +1347,17 @@ static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 si
static void pf_force_lmtt_invalidate(struct xe_device *xe)
{
- /* TODO */
+ struct xe_lmtt *lmtt;
+ struct xe_tile *tile;
+ unsigned int tid;
+
+ xe_assert(xe, xe_device_has_lmtt(xe));
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ for_each_tile(tile, xe, tid) {
+ lmtt = &tile->sriov.pf.lmtt;
+ xe_lmtt_invalidate_hw(lmtt);
+ }
}
static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
@@ -1388,7 +1418,7 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
if (err)
goto fail;
- offset += bo->size;
+ offset += xe_bo_size(bo);
}
}
@@ -1406,7 +1436,7 @@ fail:
static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
if (config->lmem_obj) {
@@ -1425,7 +1455,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, IS_DGFX(xe));
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
size = round_up(size, pf_get_lmem_alignment(gt));
@@ -1469,12 +1499,12 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
goto release;
}
- err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
+ err = pf_push_vf_cfg_lmem(gt, vfid, xe_bo_size(bo));
if (unlikely(err))
goto reset_lmtt;
xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
- vfid, bo->size, bo->size / SZ_1M);
+ vfid, xe_bo_size(bo), xe_bo_size(bo) / SZ_1M);
return 0;
reset_lmtt:
@@ -1520,6 +1550,8 @@ int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size
{
int err;
+ xe_gt_assert(gt, xe_device_has_lmtt(gt_to_xe(gt)));
+
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
if (vfid)
err = pf_provision_vf_lmem(gt, vfid, size);
@@ -1550,7 +1582,7 @@ int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
int err = 0;
xe_gt_assert(gt, vfid);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
if (!num_vfs)
return 0;
@@ -1627,9 +1659,9 @@ int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, num_vfs);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
- if (!IS_DGFX(gt_to_xe(gt)))
+ if (!xe_device_has_lmtt(gt_to_xe(gt)))
return 0;
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
@@ -1661,7 +1693,7 @@ int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, num_vfs);
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
result = result ?: err;
err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
@@ -1989,7 +2021,7 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
struct xe_device *xe = gt_to_xe(gt);
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
pf_release_vf_config_ggtt(gt, config);
if (IS_DGFX(xe)) {
pf_release_vf_config_lmem(gt, config);
@@ -2080,7 +2112,7 @@ static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
* Only GGTT and LMEM requires to be cleared by the PF.
* GuC doorbell IDs and context IDs do not need any clearing.
*/
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
pf_sanitize_ggtt(config->ggtt_region, vfid);
if (IS_DGFX(xe))
err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
@@ -2147,7 +2179,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
- bool is_primary = !xe_gt_is_media_type(gt);
+ bool is_primary = xe_gt_is_main_type(gt);
bool valid_ggtt, valid_ctxs, valid_dbs;
bool valid_any, valid_all;
@@ -2163,7 +2195,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
valid_all = valid_all && valid_ggtt;
valid_any = valid_any || (valid_ggtt && is_primary);
- if (IS_DGFX(xe)) {
+ if (xe_device_has_lmtt(xe)) {
bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
valid_any = valid_any || (valid_lmem && is_primary);
@@ -2347,7 +2379,7 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
return -EINVAL;
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
- struct drm_printer p = xe_gt_info_printer(gt);
+ struct drm_printer p = xe_gt_dbg_printer(gt);
drm_printf(&p, "restoring VF%u config:\n", vfid);
xe_guc_klv_print(buf, size / sizeof(u32), &p);
@@ -2364,6 +2396,20 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
return err;
}
+static void pf_prepare_self_config(struct xe_gt *gt)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, PFID);
+
+ /*
+ * We want PF to be allowed to use all of context ID, doorbells IDs
+ * and whole usable GGTT area. While we can store ctxs/dbs numbers
+ * directly in the config structure, can't do the same with the GGTT
+ * configuration, so let it be prepared on demand while pushing KLVs.
+ */
+ config->num_ctxs = GUC_ID_MAX;
+ config->num_dbs = GUC_NUM_DOORBELLS;
+}
+
static int pf_push_self_config(struct xe_gt *gt)
{
int err;
@@ -2407,6 +2453,7 @@ int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
xe_gt_assert(gt, IS_SRIOV_PF(xe));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ pf_prepare_self_config(gt);
err = pf_push_self_config(gt);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
@@ -2577,10 +2624,10 @@ int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
if (!config->lmem_obj)
continue;
- string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2,
+ string_get_size(xe_bo_size(config->lmem_obj), 1, STRING_UNITS_2,
buf, sizeof(buf));
drm_printf(p, "VF%u:\t%zu\t(%s)\n",
- n, config->lmem_obj->size, buf);
+ n, xe_bo_size(config->lmem_obj), buf);
}
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
index 1f50aec3a059..4f7fff892bc0 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -15,10 +15,11 @@
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_monitor.h"
-#include "xe_gt_sriov_pf_service.h"
#include "xe_gt_sriov_printk.h"
#include "xe_guc_ct.h"
#include "xe_sriov.h"
+#include "xe_sriov_pf_service.h"
+#include "xe_tile.h"
static const char *control_cmd_to_string(u32 cmd)
{
@@ -1064,7 +1065,9 @@ static bool pf_exit_vf_flr_reset_data(struct xe_gt *gt, unsigned int vfid)
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA))
return false;
- xe_gt_sriov_pf_service_reset(gt, vfid);
+ if (xe_tile_is_root(gt->tile) && xe_gt_is_main_type(gt))
+ xe_sriov_pf_service_reset_vf(gt_to_xe(gt), vfid);
+
xe_gt_sriov_pf_monitor_flr(gt, vfid);
pf_enter_vf_flr_reset_mmio(gt, vfid);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
index 0fe47f41b63c..bf679b21f485 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -78,11 +78,6 @@ static const struct drm_info_list pf_info[] = {
.data = xe_gt_sriov_pf_service_print_runtime,
},
{
- "negotiated_versions",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_service_print_version,
- },
- {
"adverse_events",
.show = xe_gt_debugfs_simple_show,
.data = xe_gt_sriov_pf_monitor_print_events,
@@ -305,10 +300,10 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne
xe_gt_assert(gt, gt == extract_gt(parent));
xe_gt_assert(gt, vfid == extract_vfid(parent));
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
debugfs_create_file_unsafe(vfid ? "ggtt_quota" : "ggtt_spare",
0644, parent, parent, &ggtt_fops);
- if (IS_DGFX(gt_to_xe(gt)))
+ if (xe_device_has_lmtt(gt_to_xe(gt)))
debugfs_create_file_unsafe(vfid ? "lmem_quota" : "lmem_spare",
0644, parent, parent, &lmem_fops);
}
@@ -554,11 +549,11 @@ void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root)
pfdentry->d_inode->i_private = gt;
drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), pfdentry, minor);
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
drm_debugfs_create_files(pf_ggtt_info,
ARRAY_SIZE(pf_ggtt_info),
pfdentry, minor);
- if (IS_DGFX(gt_to_xe(gt)))
+ if (xe_device_has_lmtt(gt_to_xe(gt)))
drm_debugfs_create_files(pf_lmem_info,
ARRAY_SIZE(pf_lmem_info),
pfdentry, minor);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
index 821cfcc34e6b..76dd9233ef9f 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
@@ -19,91 +19,7 @@
#include "xe_gt_sriov_pf_service_types.h"
#include "xe_guc_ct.h"
#include "xe_guc_hxg_helpers.h"
-
-static void pf_init_versions(struct xe_gt *gt)
-{
- BUILD_BUG_ON(!GUC_RELAY_VERSION_BASE_MAJOR && !GUC_RELAY_VERSION_BASE_MINOR);
- BUILD_BUG_ON(GUC_RELAY_VERSION_BASE_MAJOR > GUC_RELAY_VERSION_LATEST_MAJOR);
-
- /* base versions may differ between platforms */
- gt->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR;
- gt->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR;
-
- /* latest version is same for all platforms */
- gt->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR;
- gt->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR;
-}
-
-/* Return: 0 on success or a negative error code on failure. */
-static int pf_negotiate_version(struct xe_gt *gt,
- u32 wanted_major, u32 wanted_minor,
- u32 *major, u32 *minor)
-{
- struct xe_gt_sriov_pf_service_version base = gt->sriov.pf.service.version.base;
- struct xe_gt_sriov_pf_service_version latest = gt->sriov.pf.service.version.latest;
-
- xe_gt_assert(gt, base.major);
- xe_gt_assert(gt, base.major <= latest.major);
- xe_gt_assert(gt, (base.major < latest.major) || (base.minor <= latest.minor));
-
- /* VF doesn't care - return our latest */
- if (wanted_major == VF2PF_HANDSHAKE_MAJOR_ANY &&
- wanted_minor == VF2PF_HANDSHAKE_MINOR_ANY) {
- *major = latest.major;
- *minor = latest.minor;
- return 0;
- }
-
- /* VF wants newer than our - return our latest */
- if (wanted_major > latest.major) {
- *major = latest.major;
- *minor = latest.minor;
- return 0;
- }
-
- /* VF wants older than min required - reject */
- if (wanted_major < base.major ||
- (wanted_major == base.major && wanted_minor < base.minor)) {
- return -EPERM;
- }
-
- /* previous major - return wanted, as we should still support it */
- if (wanted_major < latest.major) {
- /* XXX: we are not prepared for multi-versions yet */
- xe_gt_assert(gt, base.major == latest.major);
- return -ENOPKG;
- }
-
- /* same major - return common minor */
- *major = wanted_major;
- *minor = min_t(u32, latest.minor, wanted_minor);
- return 0;
-}
-
-static void pf_connect(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
-{
- xe_gt_sriov_pf_assert_vfid(gt, vfid);
- xe_gt_assert(gt, major || minor);
-
- gt->sriov.pf.vfs[vfid].version.major = major;
- gt->sriov.pf.vfs[vfid].version.minor = minor;
-}
-
-static void pf_disconnect(struct xe_gt *gt, u32 vfid)
-{
- xe_gt_sriov_pf_assert_vfid(gt, vfid);
-
- gt->sriov.pf.vfs[vfid].version.major = 0;
- gt->sriov.pf.vfs[vfid].version.minor = 0;
-}
-
-static bool pf_is_negotiated(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
-{
- xe_gt_sriov_pf_assert_vfid(gt, vfid);
-
- return major == gt->sriov.pf.vfs[vfid].version.major &&
- minor <= gt->sriov.pf.vfs[vfid].version.minor;
-}
+#include "xe_sriov_pf_service.h"
static const struct xe_reg tgl_runtime_regs[] = {
RPM_CONFIG0, /* _MMIO(0x0d00) */
@@ -266,7 +182,7 @@ static void pf_prepare_runtime_info(struct xe_gt *gt)
read_many(gt, size, regs, values);
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
- struct drm_printer p = xe_gt_info_printer(gt);
+ struct drm_printer p = xe_gt_dbg_printer(gt);
xe_gt_sriov_pf_service_print_runtime(gt, &p);
}
@@ -285,8 +201,6 @@ int xe_gt_sriov_pf_service_init(struct xe_gt *gt)
{
int err;
- pf_init_versions(gt);
-
err = pf_alloc_runtime_info(gt);
if (unlikely(err))
goto failed;
@@ -311,47 +225,6 @@ void xe_gt_sriov_pf_service_update(struct xe_gt *gt)
pf_prepare_runtime_info(gt);
}
-/**
- * xe_gt_sriov_pf_service_reset - Reset a connection with the VF.
- * @gt: the &xe_gt
- * @vfid: the VF identifier
- *
- * Reset a VF driver negotiated VF/PF ABI version.
- * After that point, the VF driver will have to perform new version handshake
- * to continue use of the PF services again.
- *
- * This function can only be called on PF.
- */
-void xe_gt_sriov_pf_service_reset(struct xe_gt *gt, unsigned int vfid)
-{
- pf_disconnect(gt, vfid);
-}
-
-/* Return: 0 on success or a negative error code on failure. */
-static int pf_process_handshake(struct xe_gt *gt, u32 vfid,
- u32 wanted_major, u32 wanted_minor,
- u32 *major, u32 *minor)
-{
- int err;
-
- xe_gt_sriov_dbg_verbose(gt, "VF%u wants ABI version %u.%u\n",
- vfid, wanted_major, wanted_minor);
-
- err = pf_negotiate_version(gt, wanted_major, wanted_minor, major, minor);
-
- if (err < 0) {
- xe_gt_sriov_notice(gt, "VF%u failed to negotiate ABI %u.%u (%pe)\n",
- vfid, wanted_major, wanted_minor, ERR_PTR(err));
- pf_disconnect(gt, vfid);
- } else {
- xe_gt_sriov_dbg(gt, "VF%u negotiated ABI version %u.%u\n",
- vfid, *major, *minor);
- pf_connect(gt, vfid, *major, *minor);
- }
-
- return 0;
-}
-
/* Return: length of the response message or a negative error code on failure. */
static int pf_process_handshake_msg(struct xe_gt *gt, u32 origin,
const u32 *request, u32 len, u32 *response, u32 size)
@@ -371,7 +244,8 @@ static int pf_process_handshake_msg(struct xe_gt *gt, u32 origin,
wanted_major = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, request[1]);
wanted_minor = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, request[1]);
- err = pf_process_handshake(gt, origin, wanted_major, wanted_minor, &major, &minor);
+ err = xe_sriov_pf_service_handshake_vf(gt_to_xe(gt), origin, wanted_major, wanted_minor,
+ &major, &minor);
if (err < 0)
return err;
@@ -430,8 +304,10 @@ static int pf_process_runtime_query_msg(struct xe_gt *gt, u32 origin,
u32 remaining = 0;
int ret;
- if (!pf_is_negotiated(gt, origin, 1, 0))
+ /* this action is available from ABI 1.0 */
+ if (!xe_sriov_pf_service_is_negotiated(gt_to_xe(gt), origin, 1, 0))
return -EACCES;
+
if (unlikely(msg_len > VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
return -EMSGSIZE;
if (unlikely(msg_len < VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
@@ -528,33 +404,3 @@ int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p
return 0;
}
-
-/**
- * xe_gt_sriov_pf_service_print_version - Print ABI versions negotiated with VFs.
- * @gt: the &xe_gt
- * @p: the &drm_printer
- *
- * This function is for PF use only.
- */
-int xe_gt_sriov_pf_service_print_version(struct xe_gt *gt, struct drm_printer *p)
-{
- struct xe_device *xe = gt_to_xe(gt);
- unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
- struct xe_gt_sriov_pf_service_version *version;
-
- xe_gt_assert(gt, IS_SRIOV_PF(xe));
-
- for (n = 1; n <= total_vfs; n++) {
- version = &gt->sriov.pf.vfs[n].version;
- if (!version->major && !version->minor)
- continue;
-
- drm_printf(p, "VF%u:\t%u.%u\n", n, version->major, version->minor);
- }
-
- return 0;
-}
-
-#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
-#include "tests/xe_gt_sriov_pf_service_test.c"
-#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h
index 56aaadf0360d..10b02c9b651c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h
@@ -14,9 +14,7 @@ struct xe_gt;
int xe_gt_sriov_pf_service_init(struct xe_gt *gt);
void xe_gt_sriov_pf_service_update(struct xe_gt *gt);
-void xe_gt_sriov_pf_service_reset(struct xe_gt *gt, unsigned int vfid);
-int xe_gt_sriov_pf_service_print_version(struct xe_gt *gt, struct drm_printer *p);
int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p);
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index a439261bf4d7..b282838d59e6 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -82,17 +82,17 @@ int xe_gt_sriov_vf_reset(struct xe_gt *gt)
}
static int guc_action_match_version(struct xe_guc *guc,
- u32 wanted_branch, u32 wanted_major, u32 wanted_minor,
- u32 *branch, u32 *major, u32 *minor, u32 *patch)
+ struct xe_uc_fw_version *wanted,
+ struct xe_uc_fw_version *found)
{
u32 request[VF2GUC_MATCH_VERSION_REQUEST_MSG_LEN] = {
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
GUC_ACTION_VF2GUC_MATCH_VERSION),
- FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted_branch) |
- FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted_major) |
- FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted_minor),
+ FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted->branch) |
+ FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted->major) |
+ FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted->minor),
};
u32 response[GUC_MAX_MMIO_MSG_LEN];
int ret;
@@ -106,120 +106,138 @@ static int guc_action_match_version(struct xe_guc *guc,
if (unlikely(FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_0_MBZ, response[0])))
return -EPROTO;
- *branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]);
- *major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]);
- *minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]);
- *patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]);
+ memset(found, 0, sizeof(struct xe_uc_fw_version));
+ found->branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]);
+ found->major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]);
+ found->minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]);
+ found->patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]);
return 0;
}
-static void vf_minimum_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
+static int guc_action_match_version_any(struct xe_guc *guc,
+ struct xe_uc_fw_version *found)
+{
+ struct xe_uc_fw_version wanted = {
+ .branch = GUC_VERSION_BRANCH_ANY,
+ .major = GUC_VERSION_MAJOR_ANY,
+ .minor = GUC_VERSION_MINOR_ANY,
+ .patch = 0
+ };
+
+ return guc_action_match_version(guc, &wanted, found);
+}
+
+static void vf_minimum_guc_version(struct xe_gt *gt, struct xe_uc_fw_version *ver)
{
struct xe_device *xe = gt_to_xe(gt);
+ memset(ver, 0, sizeof(struct xe_uc_fw_version));
+
switch (xe->info.platform) {
case XE_TIGERLAKE ... XE_PVC:
/* 1.1 this is current baseline for Xe driver */
- *branch = 0;
- *major = 1;
- *minor = 1;
+ ver->branch = 0;
+ ver->major = 1;
+ ver->minor = 1;
break;
default:
/* 1.2 has support for the GMD_ID KLV */
- *branch = 0;
- *major = 1;
- *minor = 2;
+ ver->branch = 0;
+ ver->major = 1;
+ ver->minor = 2;
break;
}
}
-static void vf_wanted_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
+static void vf_wanted_guc_version(struct xe_gt *gt, struct xe_uc_fw_version *ver)
{
/* for now it's the same as minimum */
- return vf_minimum_guc_version(gt, branch, major, minor);
+ return vf_minimum_guc_version(gt, ver);
}
static int vf_handshake_with_guc(struct xe_gt *gt)
{
- struct xe_gt_sriov_vf_guc_version *guc_version = &gt->sriov.vf.guc_version;
+ struct xe_uc_fw_version *guc_version = &gt->sriov.vf.guc_version;
+ struct xe_uc_fw_version wanted = {0};
struct xe_guc *guc = &gt->uc.guc;
- u32 wanted_branch, wanted_major, wanted_minor;
- u32 branch, major, minor, patch;
+ bool old = false;
int err;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
/* select wanted version - prefer previous (if any) */
if (guc_version->major || guc_version->minor) {
- wanted_branch = guc_version->branch;
- wanted_major = guc_version->major;
- wanted_minor = guc_version->minor;
+ wanted = *guc_version;
+ old = true;
} else {
- vf_wanted_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
- xe_gt_assert(gt, wanted_major != GUC_VERSION_MAJOR_ANY);
+ vf_wanted_guc_version(gt, &wanted);
+ xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY);
+
+ /* First time we handshake, so record the minimum wanted */
+ gt->sriov.vf.wanted_guc_version = wanted;
}
- err = guc_action_match_version(guc, wanted_branch, wanted_major, wanted_minor,
- &branch, &major, &minor, &patch);
+ err = guc_action_match_version(guc, &wanted, guc_version);
if (unlikely(err))
goto fail;
- /* we don't support interface version change */
- if ((guc_version->major || guc_version->minor) &&
- (guc_version->branch != branch || guc_version->major != major ||
- guc_version->minor != minor)) {
- xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n",
- branch, major, minor, patch);
- xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n",
- guc_version->branch, guc_version->major,
- guc_version->minor, guc_version->patch);
- err = -EREMCHG;
- goto fail;
+ if (old) {
+ /* we don't support interface version change */
+ if (MAKE_GUC_VER_STRUCT(*guc_version) != MAKE_GUC_VER_STRUCT(wanted)) {
+ xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n",
+ guc_version->branch, guc_version->major,
+ guc_version->minor, guc_version->patch);
+ xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n",
+ wanted.branch, wanted.major,
+ wanted.minor, wanted.patch);
+ err = -EREMCHG;
+ goto fail;
+ } else {
+ /* version is unchanged, no need to re-verify it */
+ return 0;
+ }
}
/* illegal */
- if (major > wanted_major) {
+ if (guc_version->major > wanted.major) {
err = -EPROTO;
goto unsupported;
}
/* there's no fallback on major version. */
- if (major != wanted_major) {
+ if (guc_version->major != wanted.major) {
err = -ENOPKG;
goto unsupported;
}
/* check against minimum version supported by us */
- vf_minimum_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
- xe_gt_assert(gt, major != GUC_VERSION_MAJOR_ANY);
- if (major < wanted_major || (major == wanted_major && minor < wanted_minor)) {
+ vf_minimum_guc_version(gt, &wanted);
+ xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY);
+ if (MAKE_GUC_VER_STRUCT(*guc_version) < MAKE_GUC_VER_STRUCT(wanted)) {
err = -ENOKEY;
goto unsupported;
}
xe_gt_sriov_dbg(gt, "using GuC interface version %u.%u.%u.%u\n",
- branch, major, minor, patch);
+ guc_version->branch, guc_version->major,
+ guc_version->minor, guc_version->patch);
- guc_version->branch = branch;
- guc_version->major = major;
- guc_version->minor = minor;
- guc_version->patch = patch;
return 0;
unsupported:
xe_gt_sriov_err(gt, "Unsupported GuC version %u.%u.%u.%u (%pe)\n",
- branch, major, minor, patch, ERR_PTR(err));
+ guc_version->branch, guc_version->major,
+ guc_version->minor, guc_version->patch,
+ ERR_PTR(err));
fail:
xe_gt_sriov_err(gt, "Unable to confirm GuC version %u.%u (%pe)\n",
- wanted_major, wanted_minor, ERR_PTR(err));
+ wanted.major, wanted.minor, ERR_PTR(err));
/* try again with *any* just to query which version is supported */
- if (!guc_action_match_version(guc, GUC_VERSION_BRANCH_ANY,
- GUC_VERSION_MAJOR_ANY, GUC_VERSION_MINOR_ANY,
- &branch, &major, &minor, &patch))
+ if (!guc_action_match_version_any(guc, &wanted))
xe_gt_sriov_notice(gt, "GuC reports interface version %u.%u.%u.%u\n",
- branch, major, minor, patch);
+ wanted.branch, wanted.major, wanted.minor, wanted.patch);
return err;
}
@@ -250,6 +268,29 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
return 0;
}
+/**
+ * xe_gt_sriov_vf_guc_versions - Minimum required and found GuC ABI versions
+ * @gt: the &xe_gt
+ * @wanted: pointer to the xe_uc_fw_version to be filled with the wanted version
+ * @found: pointer to the xe_uc_fw_version to be filled with the found version
+ *
+ * This function is for VF use only and it can only be used after successful
+ * version handshake with the GuC.
+ */
+void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt,
+ struct xe_uc_fw_version *wanted,
+ struct xe_uc_fw_version *found)
+{
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
+
+ if (wanted)
+ *wanted = gt->sriov.vf.wanted_guc_version;
+
+ if (found)
+ *found = gt->sriov.vf.guc_version;
+}
+
static int guc_action_vf_notify_resfix_done(struct xe_guc *guc)
{
u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
@@ -415,6 +456,7 @@ static int vf_get_ggtt_info(struct xe_gt *gt)
xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
start, start + size - 1, size / SZ_1K);
+ config->ggtt_shift = start - (s64)config->ggtt_base;
config->ggtt_base = start;
config->ggtt_size = size;
@@ -510,7 +552,7 @@ int xe_gt_sriov_vf_query_config(struct xe_gt *gt)
if (unlikely(err))
return err;
- if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
+ if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
err = vf_get_lmem_info(gt);
if (unlikely(err))
return err;
@@ -560,106 +602,56 @@ u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
return gt->sriov.vf.self_config.lmem_size;
}
-static struct xe_ggtt_node *
-vf_balloon_ggtt_node(struct xe_ggtt *ggtt, u64 start, u64 end)
-{
- struct xe_ggtt_node *node;
- int err;
-
- node = xe_ggtt_node_init(ggtt);
- if (IS_ERR(node))
- return node;
-
- err = xe_ggtt_node_insert_balloon(node, start, end);
- if (err) {
- xe_ggtt_node_fini(node);
- return ERR_PTR(err);
- }
-
- return node;
-}
-
-static int vf_balloon_ggtt(struct xe_gt *gt)
+/**
+ * xe_gt_sriov_vf_ggtt - VF GGTT configuration.
+ * @gt: the &xe_gt
+ *
+ * This function is for VF use only.
+ *
+ * Return: size of the GGTT assigned to VF.
+ */
+u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt)
{
- struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_ggtt *ggtt = tile->mem.ggtt;
- struct xe_device *xe = gt_to_xe(gt);
- u64 start, end;
-
- xe_gt_assert(gt, IS_SRIOV_VF(xe));
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
-
- if (!config->ggtt_size)
- return -ENODATA;
-
- /*
- * VF can only use part of the GGTT as allocated by the PF:
- *
- * WOPCM GUC_GGTT_TOP
- * |<------------ Total GGTT size ------------------>|
- *
- * VF GGTT base -->|<- size ->|
- *
- * +--------------------+----------+-----------------+
- * |////////////////////| block |\\\\\\\\\\\\\\\\\|
- * +--------------------+----------+-----------------+
- *
- * |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->|
- */
-
- start = xe_wopcm_size(xe);
- end = config->ggtt_base;
- if (end != start) {
- tile->sriov.vf.ggtt_balloon[0] = vf_balloon_ggtt_node(ggtt, start, end);
- if (IS_ERR(tile->sriov.vf.ggtt_balloon[0]))
- return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]);
- }
-
- start = config->ggtt_base + config->ggtt_size;
- end = GUC_GGTT_TOP;
- if (end != start) {
- tile->sriov.vf.ggtt_balloon[1] = vf_balloon_ggtt_node(ggtt, start, end);
- if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) {
- xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
- return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]);
- }
- }
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
+ xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
- return 0;
+ return gt->sriov.vf.self_config.ggtt_size;
}
-static void deballoon_ggtt(struct drm_device *drm, void *arg)
+/**
+ * xe_gt_sriov_vf_ggtt_base - VF GGTT base offset.
+ * @gt: the &xe_gt
+ *
+ * This function is for VF use only.
+ *
+ * Return: base offset of the GGTT assigned to VF.
+ */
+u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt)
{
- struct xe_tile *tile = arg;
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
+ xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
- xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
- xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[1]);
- xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
+ return gt->sriov.vf.self_config.ggtt_base;
}
/**
- * xe_gt_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration.
- * @gt: the &xe_gt
+ * xe_gt_sriov_vf_ggtt_shift - Return shift in GGTT range due to VF migration
+ * @gt: the &xe_gt struct instance
*
* This function is for VF use only.
*
- * Return: 0 on success or a negative error code on failure.
+ * Return: The shift value; could be negative
*/
-int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt)
+s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt)
{
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_device *xe = tile_to_xe(tile);
- int err;
-
- if (xe_gt_is_media_type(gt))
- return 0;
+ struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
- err = vf_balloon_ggtt(gt);
- if (err)
- return err;
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
- return drmm_add_action_or_reset(&xe->drm, deballoon_ggtt, tile);
+ return config->ggtt_shift;
}
static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
@@ -694,21 +686,22 @@ static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
return 0;
}
-static void vf_connect_pf(struct xe_gt *gt, u16 major, u16 minor)
+static void vf_connect_pf(struct xe_device *xe, u16 major, u16 minor)
{
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_assert(xe, IS_SRIOV_VF(xe));
- gt->sriov.vf.pf_version.major = major;
- gt->sriov.vf.pf_version.minor = minor;
+ xe->sriov.vf.pf_version.major = major;
+ xe->sriov.vf.pf_version.minor = minor;
}
-static void vf_disconnect_pf(struct xe_gt *gt)
+static void vf_disconnect_pf(struct xe_device *xe)
{
- vf_connect_pf(gt, 0, 0);
+ vf_connect_pf(xe, 0, 0);
}
static int vf_handshake_with_pf(struct xe_gt *gt)
{
+ struct xe_device *xe = gt_to_xe(gt);
u32 major_wanted = GUC_RELAY_VERSION_LATEST_MAJOR;
u32 minor_wanted = GUC_RELAY_VERSION_LATEST_MINOR;
u32 major = major_wanted, minor = minor_wanted;
@@ -724,13 +717,13 @@ static int vf_handshake_with_pf(struct xe_gt *gt)
}
xe_gt_sriov_dbg(gt, "using VF/PF ABI %u.%u\n", major, minor);
- vf_connect_pf(gt, major, minor);
+ vf_connect_pf(xe, major, minor);
return 0;
failed:
xe_gt_sriov_err(gt, "Unable to confirm VF/PF ABI version %u.%u (%pe)\n",
major, minor, ERR_PTR(err));
- vf_disconnect_pf(gt);
+ vf_disconnect_pf(xe);
return err;
}
@@ -783,10 +776,12 @@ void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
{
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ struct xe_device *xe = gt_to_xe(gt);
- return major == gt->sriov.vf.pf_version.major &&
- minor <= gt->sriov.vf.pf_version.minor;
+ xe_gt_assert(gt, IS_SRIOV_VF(xe));
+
+ return major == xe->sriov.vf.pf_version.major &&
+ minor <= xe->sriov.vf.pf_version.minor;
}
static int vf_prepare_runtime_info(struct xe_gt *gt, unsigned int num_regs)
@@ -974,7 +969,6 @@ u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
struct vf_runtime_reg *rr;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- xe_gt_assert(gt, gt->sriov.vf.pf_version.major);
xe_gt_assert(gt, !reg.vf);
if (reg.addr == GMD_ID.addr) {
@@ -1043,7 +1037,9 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
- if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
+ drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift);
+
+ if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);
}
@@ -1079,19 +1075,21 @@ void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p)
*/
void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt_sriov_vf_guc_version *guc_version = &gt->sriov.vf.guc_version;
- struct xe_gt_sriov_vf_relay_version *pf_version = &gt->sriov.vf.pf_version;
- u32 branch, major, minor;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_uc_fw_version *guc_version = &gt->sriov.vf.guc_version;
+ struct xe_uc_fw_version *wanted = &gt->sriov.vf.wanted_guc_version;
+ struct xe_sriov_vf_relay_version *pf_version = &xe->sriov.vf.pf_version;
+ struct xe_uc_fw_version ver;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
drm_printf(p, "GuC ABI:\n");
- vf_minimum_guc_version(gt, &branch, &major, &minor);
- drm_printf(p, "\tbase:\t%u.%u.%u.*\n", branch, major, minor);
+ vf_minimum_guc_version(gt, &ver);
+ drm_printf(p, "\tbase:\t%u.%u.%u.*\n", ver.branch, ver.major, ver.minor);
- vf_wanted_guc_version(gt, &branch, &major, &minor);
- drm_printf(p, "\twanted:\t%u.%u.%u.*\n", branch, major, minor);
+ drm_printf(p, "\twanted:\t%u.%u.%u.*\n",
+ wanted->branch, wanted->major, wanted->minor);
drm_printf(p, "\thandshake:\t%u.%u.%u.%u\n",
guc_version->branch, guc_version->major,
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index ba6c5d74e326..e0357f341a2d 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -11,19 +11,26 @@
struct drm_printer;
struct xe_gt;
struct xe_reg;
+struct xe_uc_fw_version;
int xe_gt_sriov_vf_reset(struct xe_gt *gt);
int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt);
+void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt,
+ struct xe_uc_fw_version *wanted,
+ struct xe_uc_fw_version *found);
int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
int xe_gt_sriov_vf_connect(struct xe_gt *gt);
int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
-int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt);
int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt);
+u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt);
+u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt);
+s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt);
+
u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg);
void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
index a57f13b5afcd..298dedf4b009 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
@@ -7,30 +7,7 @@
#define _XE_GT_SRIOV_VF_TYPES_H_
#include <linux/types.h>
-
-/**
- * struct xe_gt_sriov_vf_guc_version - GuC ABI version details.
- */
-struct xe_gt_sriov_vf_guc_version {
- /** @branch: branch version. */
- u8 branch;
- /** @major: major version. */
- u8 major;
- /** @minor: minor version. */
- u8 minor;
- /** @patch: patch version. */
- u8 patch;
-};
-
-/**
- * struct xe_gt_sriov_vf_relay_version - PF ABI version details.
- */
-struct xe_gt_sriov_vf_relay_version {
- /** @major: major version. */
- u16 major;
- /** @minor: minor version. */
- u16 minor;
-};
+#include "xe_uc_fw_types.h"
/**
* struct xe_gt_sriov_vf_selfconfig - VF configuration data.
@@ -40,6 +17,8 @@ struct xe_gt_sriov_vf_selfconfig {
u64 ggtt_base;
/** @ggtt_size: assigned size of the GGTT region. */
u64 ggtt_size;
+ /** @ggtt_shift: difference in ggtt_base on last migration */
+ s64 ggtt_shift;
/** @lmem_size: assigned size of the LMEM. */
u64 lmem_size;
/** @num_ctxs: assigned number of GuC submission context IDs. */
@@ -71,12 +50,12 @@ struct xe_gt_sriov_vf_runtime {
* struct xe_gt_sriov_vf - GT level VF virtualization data.
*/
struct xe_gt_sriov_vf {
+ /** @wanted_guc_version: minimum wanted GuC ABI version. */
+ struct xe_uc_fw_version wanted_guc_version;
/** @guc_version: negotiated GuC ABI version. */
- struct xe_gt_sriov_vf_guc_version guc_version;
+ struct xe_uc_fw_version guc_version;
/** @self_config: resource configurations. */
struct xe_gt_sriov_vf_selfconfig self_config;
- /** @pf_version: negotiated VF/PF ABI version. */
- struct xe_gt_sriov_vf_relay_version pf_version;
/** @runtime: runtime data retrieved from the PF. */
struct xe_gt_sriov_vf_runtime runtime;
};
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index e1362e608146..086c12ee3d9d 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -330,6 +330,40 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
return 0;
}
+static int send_tlb_invalidation_all(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_TLB_INVALIDATION_ALL,
+ 0, /* seqno, replaced in send_tlb_invalidation */
+ MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL),
+ };
+
+ return send_tlb_invalidation(&gt->uc.guc, fence, action, ARRAY_SIZE(action));
+}
+
+/**
+ * xe_gt_tlb_invalidation_all - Invalidate all TLBs across PF and all VFs.
+ * @gt: the &xe_gt structure
+ * @fence: the &xe_gt_tlb_invalidation_fence to be signaled on completion
+ *
+ * Send a request to invalidate all TLBs across PF and all VFs.
+ *
+ * Return: 0 on success, negative error code on error
+ */
+int xe_gt_tlb_invalidation_all(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence)
+{
+ int err;
+
+ xe_gt_assert(gt, gt == fence->gt);
+
+ err = send_tlb_invalidation_all(gt, fence);
+ if (err)
+ xe_gt_err(gt, "TLB invalidation request failed (%pe)", ERR_PTR(err));
+
+ return err;
+}
+
/*
* Ensure that roundup_pow_of_two(length) doesn't overflow.
* Note that roundup_pow_of_two() operates on unsigned long,
@@ -449,30 +483,6 @@ void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
}
/**
- * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
- * @gt: GT structure
- * @fence: invalidation fence which will be signal on TLB invalidation
- * completion, can be NULL
- * @vma: VMA to invalidate
- *
- * Issue a range based TLB invalidation if supported, if not fallback to a full
- * TLB invalidation. Completion of TLB is asynchronous and caller can use
- * the invalidation fence to wait for completion.
- *
- * Return: Negative error code on error, 0 on success
- */
-int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- struct xe_vma *vma)
-{
- xe_gt_assert(gt, vma);
-
- return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma),
- xe_vma_end(vma),
- xe_vma_vm(vma)->usm.asid);
-}
-
-/**
* xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
* @guc: guc
* @msg: message indicating TLB invalidation done
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index abe9b03d543e..f7f0f2eaf4b5 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -19,10 +19,8 @@ int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
-int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- struct xe_vma *vma);
void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm);
+int xe_gt_tlb_invalidation_all(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence);
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
u64 start, u64 end, u32 asid);
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 516c81e3b8dd..8c63e3263643 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -12,23 +12,20 @@
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_mmio.h"
#include "xe_wa.h"
-static void
-load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
+static void load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs,
+ const struct xe_reg regs[])
{
- va_list argp;
u32 fuse_val[XE_MAX_DSS_FUSE_REGS] = {};
int i;
- if (drm_WARN_ON(&gt_to_xe(gt)->drm, numregs > XE_MAX_DSS_FUSE_REGS))
- numregs = XE_MAX_DSS_FUSE_REGS;
+ xe_gt_assert(gt, numregs <= ARRAY_SIZE(fuse_val));
- va_start(argp, numregs);
for (i = 0; i < numregs; i++)
- fuse_val[i] = xe_mmio_read32(&gt->mmio, va_arg(argp, struct xe_reg));
- va_end(argp);
+ fuse_val[i] = xe_mmio_read32(&gt->mmio, regs[i]);
bitmap_from_arr32(mask, fuse_val, numregs * 32);
}
@@ -218,9 +215,19 @@ get_num_dss_regs(struct xe_device *xe, int *geometry_regs, int *compute_regs)
void
xe_gt_topology_init(struct xe_gt *gt)
{
+ static const struct xe_reg geometry_regs[] = {
+ XELP_GT_GEOMETRY_DSS_ENABLE,
+ XE2_GT_GEOMETRY_DSS_1,
+ XE2_GT_GEOMETRY_DSS_2,
+ };
+ static const struct xe_reg compute_regs[] = {
+ XEHP_GT_COMPUTE_DSS_ENABLE,
+ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
+ XE2_GT_COMPUTE_DSS_2,
+ };
+ int num_geometry_regs, num_compute_regs;
struct xe_device *xe = gt_to_xe(gt);
struct drm_printer p;
- int num_geometry_regs, num_compute_regs;
get_num_dss_regs(xe, &num_geometry_regs, &num_compute_regs);
@@ -228,23 +235,18 @@ xe_gt_topology_init(struct xe_gt *gt)
* Register counts returned shouldn't exceed the number of registers
* passed as parameters below.
*/
- drm_WARN_ON(&xe->drm, num_geometry_regs > 3);
- drm_WARN_ON(&xe->drm, num_compute_regs > 3);
+ xe_gt_assert(gt, num_geometry_regs <= ARRAY_SIZE(geometry_regs));
+ xe_gt_assert(gt, num_compute_regs <= ARRAY_SIZE(compute_regs));
load_dss_mask(gt, gt->fuse_topo.g_dss_mask,
- num_geometry_regs,
- XELP_GT_GEOMETRY_DSS_ENABLE,
- XE2_GT_GEOMETRY_DSS_1,
- XE2_GT_GEOMETRY_DSS_2);
- load_dss_mask(gt, gt->fuse_topo.c_dss_mask, num_compute_regs,
- XEHP_GT_COMPUTE_DSS_ENABLE,
- XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
- XE2_GT_COMPUTE_DSS_2);
+ num_geometry_regs, geometry_regs);
+ load_dss_mask(gt, gt->fuse_topo.c_dss_mask,
+ num_compute_regs, compute_regs);
+
load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss, &gt->fuse_topo.eu_type);
load_l3_bank_mask(gt, gt->fuse_topo.l3_bank_mask);
- p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology");
-
+ p = xe_gt_dbg_printer(gt);
xe_gt_topology_dump(gt, &p);
}
@@ -288,11 +290,6 @@ xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum)
return find_next_bit(mask, XE_MAX_DSS_FUSE_BITS, groupnum * groupsize);
}
-bool xe_dss_mask_empty(const xe_dss_mask_t mask)
-{
- return bitmap_empty(mask, XE_MAX_DSS_FUSE_BITS);
-}
-
/**
* xe_gt_topology_has_dss_in_quadrant - check fusing of DSS in GT quadrant
* @gt: GT to check
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h
index a72d26ba0653..c8140704ad4c 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.h
+++ b/drivers/gpu/drm/xe/xe_gt_topology.h
@@ -41,8 +41,6 @@ xe_gt_topology_mask_last_dss(const xe_dss_mask_t mask)
unsigned int
xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum);
-bool xe_dss_mask_empty(const xe_dss_mask_t mask);
-
bool
xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 7def0959da35..96344c604726 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -377,6 +377,8 @@ struct xe_gt {
u16 group_target;
/** @steering.instance_target: instance to steer accesses to */
u16 instance_target;
+ /** @steering.initialized: Whether this steering range is initialized */
+ bool initialized;
} steering[NUM_STEERING_TYPES];
/**
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index bac5471a1a78..b1d1d6da3758 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -29,6 +29,7 @@
#include "xe_guc_db_mgr.h"
#include "xe_guc_engine_activity.h"
#include "xe_guc_hwconfig.h"
+#include "xe_guc_klv_helpers.h"
#include "xe_guc_log.h"
#include "xe_guc_pc.h"
#include "xe_guc_relay.h"
@@ -59,7 +60,7 @@ static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
/* GuC addresses above GUC_GGTT_TOP don't map through the GTT */
xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
xe_assert(xe, addr < GUC_GGTT_TOP);
- xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr);
+ xe_assert(xe, xe_bo_size(bo) <= GUC_GGTT_TOP - addr);
return addr;
}
@@ -420,7 +421,7 @@ static int guc_g2g_register(struct xe_guc *near_guc, struct xe_gt *far_gt, u32 t
buf = base + G2G_DESC_AREA_SIZE + slot * G2G_BUFFER_SIZE;
xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE);
- xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= g2g_bo->size);
+ xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= xe_bo_size(g2g_bo));
return guc_action_register_g2g_buffer(near_guc, type, far_tile, far_dev,
desc, buf, G2G_BUFFER_SIZE);
@@ -570,6 +571,86 @@ err_deregister:
return err;
}
+static int __guc_opt_in_features_enable(struct xe_guc *guc, u64 addr, u32 num_dwords)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_OPT_IN_FEATURE_KLV,
+ lower_32_bits(addr),
+ upper_32_bits(addr),
+ num_dwords
+ };
+
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static bool supports_dynamic_ics(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ /* Dynamic ICS is available for PVC and Xe2 and newer platforms. */
+ if (xe->info.platform != XE_PVC && GRAPHICS_VER(xe) < 20)
+ return false;
+
+ /*
+ * The feature is currently not compatible with multi-lrc, so the GuC
+ * does not support it at all on the media engines (which are the main
+ * users of mlrc). On the primary GT side, to avoid it being used in
+ * conjunction with mlrc, we only enable it if we are in single CCS
+ * mode.
+ */
+ if (xe_gt_is_media_type(gt) || gt->ccs_mode > 1)
+ return false;
+
+ /*
+ * Dynamic ICS requires GuC v70.40.1, which maps to compatibility
+ * version v1.18.4.
+ */
+ return GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 18, 4);
+}
+
+#define OPT_IN_MAX_DWORDS 16
+int xe_guc_opt_in_features_enable(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ CLASS(xe_guc_buf, buf)(&guc->buf, OPT_IN_MAX_DWORDS);
+ u32 count = 0;
+ u32 *klvs;
+ int ret;
+
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
+
+ klvs = xe_guc_buf_cpu_ptr(buf);
+
+ /*
+ * The extra CAT error type opt-in was added in GuC v70.17.0, which maps
+ * to compatibility version v1.7.0.
+ * Note that the GuC allows enabling this KLV even on platforms that do
+ * not support the extra type; in such case the returned type variable
+ * will be set to a known invalid value which we can check against.
+ */
+ if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 7, 0))
+ klvs[count++] = PREP_GUC_KLV_TAG(OPT_IN_FEATURE_EXT_CAT_ERR_TYPE);
+
+ if (supports_dynamic_ics(guc))
+ klvs[count++] = PREP_GUC_KLV_TAG(OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH);
+
+ if (count) {
+ xe_assert(xe, count <= OPT_IN_MAX_DWORDS);
+
+ ret = __guc_opt_in_features_enable(guc, xe_guc_buf_flush(buf), count);
+ if (ret < 0) {
+ xe_gt_err(guc_to_gt(guc),
+ "failed to enable GuC opt-in features: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void guc_fini_hw(void *arg)
{
struct xe_guc *guc = arg;
@@ -577,7 +658,7 @@ static void guc_fini_hw(void *arg)
unsigned int fw_ref;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- xe_uc_fini_hw(&guc_to_gt(guc)->uc);
+ xe_uc_sanitize_reset(&guc_to_gt(guc)->uc);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
guc_g2g_fini(guc);
@@ -627,23 +708,51 @@ static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc)
return 0;
}
-static int vf_guc_init(struct xe_guc *guc)
+static int vf_guc_init_noalloc(struct xe_guc *guc)
{
+ struct xe_gt *gt = guc_to_gt(guc);
int err;
- xe_guc_comm_init_early(guc);
-
- err = xe_guc_ct_init(&guc->ct);
+ err = xe_gt_sriov_vf_bootstrap(gt);
if (err)
return err;
- err = xe_guc_relay_init(&guc->relay);
+ err = xe_gt_sriov_vf_query_config(gt);
if (err)
return err;
return 0;
}
+int xe_guc_init_noalloc(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ xe_guc_comm_init_early(guc);
+
+ ret = xe_guc_ct_init_noalloc(&guc->ct);
+ if (ret)
+ goto out;
+
+ ret = xe_guc_relay_init(&guc->relay);
+ if (ret)
+ goto out;
+
+ if (IS_SRIOV_VF(xe)) {
+ ret = vf_guc_init_noalloc(guc);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret));
+ return ret;
+}
+
int xe_guc_init(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
@@ -653,13 +762,13 @@ int xe_guc_init(struct xe_guc *guc)
guc->fw.type = XE_UC_FW_TYPE_GUC;
ret = xe_uc_fw_init(&guc->fw);
if (ret)
- goto out;
+ return ret;
if (!xe_uc_fw_is_enabled(&guc->fw))
return 0;
if (IS_SRIOV_VF(xe)) {
- ret = vf_guc_init(guc);
+ ret = xe_guc_ct_init(&guc->ct);
if (ret)
goto out;
return 0;
@@ -681,10 +790,6 @@ int xe_guc_init(struct xe_guc *guc)
if (ret)
goto out;
- ret = xe_guc_relay_init(&guc->relay);
- if (ret)
- goto out;
-
xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
ret = devm_add_action_or_reset(xe->drm.dev, guc_fini_hw, guc);
@@ -693,8 +798,6 @@ int xe_guc_init(struct xe_guc *guc)
guc_init_params(guc);
- xe_guc_comm_init_early(guc);
-
return 0;
out:
@@ -710,6 +813,10 @@ static int vf_guc_init_post_hwconfig(struct xe_guc *guc)
if (err)
return err;
+ err = xe_guc_buf_cache_init(&guc->buf);
+ if (err)
+ return err;
+
/* XXX xe_guc_db_mgr_init not needed for now */
return 0;
@@ -763,6 +870,10 @@ int xe_guc_post_load_init(struct xe_guc *guc)
xe_guc_ads_populate_post_load(&guc->ads);
+ ret = xe_guc_opt_in_features_enable(guc);
+ if (ret)
+ return ret;
+
if (xe_guc_g2g_wanted(guc_to_xe(guc))) {
ret = guc_g2g_start(guc);
if (ret)
@@ -1098,14 +1209,6 @@ static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc)
struct xe_gt *gt = guc_to_gt(guc);
int ret;
- ret = xe_gt_sriov_vf_bootstrap(gt);
- if (ret)
- return ret;
-
- ret = xe_gt_sriov_vf_query_config(gt);
- if (ret)
- return ret;
-
ret = xe_guc_hwconfig_init(guc);
if (ret)
return ret;
@@ -1116,13 +1219,17 @@ static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc)
ret = xe_gt_sriov_vf_connect(gt);
if (ret)
- return ret;
+ goto err_out;
ret = xe_gt_sriov_vf_query_runtime(gt);
if (ret)
- return ret;
+ goto err_out;
return 0;
+
+err_out:
+ xe_guc_sanitize(guc);
+ return ret;
}
/**
@@ -1285,6 +1392,7 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1;
+ bool lost = false;
int ret;
int i;
@@ -1318,6 +1426,12 @@ retry:
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
50000, &reply, false);
if (ret) {
+ /* scratch registers might be cleared during FLR, try once more */
+ if (!reply && !lost) {
+ xe_gt_dbg(gt, "GuC mmio request %#x: lost, trying again\n", request[0]);
+ lost = true;
+ goto retry;
+ }
timeout:
xe_gt_err(gt, "GuC mmio request %#x: no reply %#x\n",
request[0], reply);
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index 58338be44558..22cf019a11bf 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -26,6 +26,7 @@
struct drm_printer;
void xe_guc_comm_init_early(struct xe_guc *guc);
+int xe_guc_init_noalloc(struct xe_guc *guc);
int xe_guc_init(struct xe_guc *guc);
int xe_guc_init_post_hwconfig(struct xe_guc *guc);
int xe_guc_post_load_init(struct xe_guc *guc);
@@ -33,6 +34,7 @@ int xe_guc_reset(struct xe_guc *guc);
int xe_guc_upload(struct xe_guc *guc);
int xe_guc_min_load_for_hwconfig(struct xe_guc *guc);
int xe_guc_enable_communication(struct xe_guc *guc);
+int xe_guc_opt_in_features_enable(struct xe_guc *guc);
int xe_guc_suspend(struct xe_guc *guc);
void xe_guc_notify(struct xe_guc *guc);
int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 44c1fa2fe7c8..131cfc56be00 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -20,6 +20,7 @@
#include "xe_gt_ccs_mode.h"
#include "xe_gt_printk.h"
#include "xe_guc.h"
+#include "xe_guc_buf.h"
#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
#include "xe_hw_engine.h"
@@ -889,7 +890,7 @@ void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads)
xe_gt_assert(gt, ads->bo);
- xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
+ xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, xe_bo_size(ads->bo));
guc_policies_init(ads);
guc_golden_lrc_init(ads);
guc_mapping_table_init_invalid(gt, &info_map);
@@ -913,7 +914,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
xe_gt_assert(gt, ads->bo);
- xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
+ xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, xe_bo_size(ads->bo));
guc_policies_init(ads);
fill_engine_enable_masks(gt, &info_map);
guc_mmio_reg_state_init(ads);
@@ -1004,16 +1005,16 @@ static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_off
*/
int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
{
- struct xe_device *xe = ads_to_xe(ads);
- struct xe_gt *gt = ads_to_gt(ads);
- struct xe_tile *tile = gt_to_tile(gt);
struct guc_policies *policies;
- struct xe_bo *bo;
- int ret = 0;
+ struct xe_guc *guc = ads_to_guc(ads);
+ struct xe_device *xe = ads_to_xe(ads);
+ CLASS(xe_guc_buf, buf)(&guc->buf, sizeof(*policies));
+
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
- policies = kmalloc(sizeof(*policies), GFP_KERNEL);
- if (!policies)
- return -ENOMEM;
+ policies = xe_guc_buf_cpu_ptr(buf);
+ memset(policies, 0, sizeof(*policies));
policies->dpc_promote_time = ads_blob_read(ads, policies.dpc_promote_time);
policies->max_num_work_items = ads_blob_read(ads, policies.max_num_work_items);
@@ -1023,16 +1024,5 @@ int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
else
policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET;
- bo = xe_managed_bo_create_from_data(xe, tile, policies, sizeof(struct guc_policies),
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT);
- if (IS_ERR(bo)) {
- ret = PTR_ERR(bo);
- goto out;
- }
-
- ret = guc_ads_action_update_policies(ads, xe_bo_ggtt_addr(bo));
-out:
- kfree(policies);
- return ret;
+ return guc_ads_action_update_policies(ads, xe_guc_buf_flush(buf));
}
diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c
index 0193c94dd6a0..14a07dca48e7 100644
--- a/drivers/gpu/drm/xe/xe_guc_buf.c
+++ b/drivers/gpu/drm/xe/xe_guc_buf.c
@@ -37,10 +37,6 @@ int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
struct xe_gt *gt = cache_to_gt(cache);
struct xe_sa_manager *sam;
- /* XXX: currently it's useful only for the PF actions */
- if (!IS_SRIOV_PF(gt_to_xe(gt)))
- return 0;
-
sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32));
if (IS_ERR(sam))
return PTR_ERR(sam);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index bbcbb348256f..b6acccfcd351 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -25,6 +25,7 @@
#include "xe_gt_printk.h"
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_monitor.h"
+#include "xe_gt_sriov_printk.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_log.h"
@@ -84,11 +85,14 @@ struct g2h_fence {
u16 error;
u16 hint;
u16 reason;
+ bool cancel;
bool retry;
bool fail;
bool done;
};
+#define make_u64(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
+
static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
{
g2h_fence->response_buffer = response_buffer;
@@ -100,6 +104,13 @@ static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
g2h_fence->seqno = ~0x0;
}
+static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
+{
+ g2h_fence->cancel = true;
+ g2h_fence->fail = true;
+ g2h_fence->done = true;
+}
+
static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
{
return g2h_fence->seqno == ~0x0;
@@ -206,12 +217,10 @@ static void primelockdep(struct xe_guc_ct *ct)
fs_reclaim_release(GFP_KERNEL);
}
-int xe_guc_ct_init(struct xe_guc_ct *ct)
+int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_gt *gt = ct_to_gt(ct);
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_bo *bo;
int err;
xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
@@ -237,6 +246,23 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
primelockdep(ct);
+ err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
+ if (err)
+ return err;
+
+ xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
+ ct->state = XE_GUC_CT_STATE_DISABLED;
+ return 0;
+}
+ALLOW_ERROR_INJECTION(xe_guc_ct_init_noalloc, ERRNO); /* See xe_pci_probe() */
+
+int xe_guc_ct_init(struct xe_guc_ct *ct)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo;
+
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
@@ -246,13 +272,6 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
return PTR_ERR(bo);
ct->bo = bo;
-
- err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
- if (err)
- return err;
-
- xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
- ct->state = XE_GUC_CT_STATE_DISABLED;
return 0;
}
ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
@@ -373,9 +392,13 @@ static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
return ret > 0 ? -EPROTO : ret;
}
-static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
+static void guc_ct_change_state(struct xe_guc_ct *ct,
enum xe_guc_ct_state state)
{
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct g2h_fence *g2h_fence;
+ unsigned long idx;
+
mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
@@ -387,8 +410,20 @@ static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
ct->g2h_outstanding = 0;
ct->state = state;
+ xe_gt_dbg(gt, "GuC CT communication channel %s\n",
+ state == XE_GUC_CT_STATE_STOPPED ? "stopped" :
+ str_enabled_disabled(state == XE_GUC_CT_STATE_ENABLED));
+
spin_unlock_irq(&ct->fast_lock);
+ /* cancel all in-flight send-recv requests */
+ xa_for_each(&ct->fence_lookup, idx, g2h_fence)
+ g2h_fence_cancel(g2h_fence);
+
+ /* make sure guc_ct_send_recv() will see g2h_fence changes */
+ smp_mb();
+ wake_up_all(&ct->g2h_fence_wq);
+
/*
* Lockdep doesn't like this under the fast lock and he destroy only
* needs to be serialized with the send path which ct lock provides.
@@ -442,7 +477,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
- xe_map_memset(xe, &ct->bo->vmap, 0, 0, ct->bo->size);
+ xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
@@ -458,11 +493,10 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
if (err)
goto err_out;
- xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED);
+ guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED);
smp_mb();
wake_up_all(&ct->wq);
- xe_gt_dbg(gt, "GuC CT communication channel enabled\n");
if (ct_needs_safe_mode(ct))
ct_enter_safe_mode(ct);
@@ -503,7 +537,7 @@ static void stop_g2h_handler(struct xe_guc_ct *ct)
*/
void xe_guc_ct_disable(struct xe_guc_ct *ct)
{
- xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED);
+ guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
ct_exit_safe_mode(ct);
stop_g2h_handler(ct);
}
@@ -519,7 +553,7 @@ void xe_guc_ct_stop(struct xe_guc_ct *ct)
if (!xe_guc_ct_initialized(ct))
return;
- xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
+ guc_ct_change_state(ct, XE_GUC_CT_STATE_STOPPED);
stop_g2h_handler(ct);
}
@@ -630,6 +664,47 @@ static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
spin_unlock_irq(&ct->fast_lock);
}
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
+{
+ unsigned int slot = fence % ARRAY_SIZE(ct->fast_req);
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ unsigned long entries[SZ_32];
+ unsigned int n;
+
+ n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
+
+ /* May be called under spinlock, so avoid sleeping */
+ ct->fast_req[slot].stack = stack_depot_save(entries, n, GFP_NOWAIT);
+#endif
+ ct->fast_req[slot].fence = fence;
+ ct->fast_req[slot].action = action;
+}
+#else
+static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
+{
+}
+#endif
+
+/*
+ * The CT protocol accepts a 16 bits fence. This field is fully owned by the
+ * driver, the GuC will just copy it to the reply message. Since we need to
+ * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
+ * we use one bit of the seqno as an indicator for that and a rolling counter
+ * for the remaining 15 bits.
+ */
+#define CT_SEQNO_MASK GENMASK(14, 0)
+#define CT_SEQNO_UNTRACKED BIT(15)
+static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
+{
+ u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
+
+ if (!is_g2h_fence)
+ seqno |= CT_SEQNO_UNTRACKED;
+
+ return seqno;
+}
+
#define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
@@ -706,6 +781,9 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
} else {
+ fast_req_track(ct, ct_fence_value,
+ FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
+
cmd[1] =
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
@@ -738,25 +816,6 @@ corrupted:
return -EPIPE;
}
-/*
- * The CT protocol accepts a 16 bits fence. This field is fully owned by the
- * driver, the GuC will just copy it to the reply message. Since we need to
- * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
- * we use one bit of the seqno as an indicator for that and a rolling counter
- * for the remaining 15 bits.
- */
-#define CT_SEQNO_MASK GENMASK(14, 0)
-#define CT_SEQNO_UNTRACKED BIT(15)
-static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
-{
- u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
-
- if (!is_g2h_fence)
- seqno |= CT_SEQNO_UNTRACKED;
-
- return seqno;
-}
-
static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 g2h_len, u32 num_g2h,
struct g2h_fence *g2h_fence)
@@ -1057,6 +1116,11 @@ retry_same_fence:
goto retry;
}
if (g2h_fence.fail) {
+ if (g2h_fence.cancel) {
+ xe_gt_dbg(gt, "H2G request %#x canceled!\n", action[0]);
+ ret = -ECANCELED;
+ goto unlock;
+ }
xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n",
action[0], g2h_fence.error, g2h_fence.hint);
ret = -EIO;
@@ -1065,6 +1129,7 @@ retry_same_fence:
if (ret > 0)
ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
+unlock:
mutex_unlock(&ct->lock);
return ret;
@@ -1148,6 +1213,55 @@ static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
return 0;
}
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
+{
+ u16 fence_min = U16_MAX, fence_max = 0;
+ struct xe_gt *gt = ct_to_gt(ct);
+ bool found = false;
+ unsigned int n;
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ char *buf;
+#endif
+
+ lockdep_assert_held(&ct->lock);
+
+ for (n = 0; n < ARRAY_SIZE(ct->fast_req); n++) {
+ if (ct->fast_req[n].fence < fence_min)
+ fence_min = ct->fast_req[n].fence;
+ if (ct->fast_req[n].fence > fence_max)
+ fence_max = ct->fast_req[n].fence;
+
+ if (ct->fast_req[n].fence != fence)
+ continue;
+ found = true;
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ buf = kmalloc(SZ_4K, GFP_NOWAIT);
+ if (buf && stack_depot_snprint(ct->fast_req[n].stack, buf, SZ_4K, 0))
+ xe_gt_err(gt, "Fence 0x%x was used by action %#04x sent at:\n%s",
+ fence, ct->fast_req[n].action, buf);
+ else
+ xe_gt_err(gt, "Fence 0x%x was used by action %#04x [failed to retrieve stack]\n",
+ fence, ct->fast_req[n].action);
+ kfree(buf);
+#else
+ xe_gt_err(gt, "Fence 0x%x was used by action %#04x\n",
+ fence, ct->fast_req[n].action);
+#endif
+ break;
+ }
+
+ if (!found)
+ xe_gt_warn(gt, "Fence 0x%x not found - tracking buffer wrapped? [range = 0x%x -> 0x%x, next = 0x%X]\n",
+ fence, fence_min, fence_max, ct->fence_seqno);
+}
+#else
+static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
+{
+}
+#endif
+
static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_gt *gt = ct_to_gt(ct);
@@ -1176,6 +1290,9 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
else
xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
type, fence);
+
+ fast_req_report(ct, fence);
+
CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
return -EPROTO;
@@ -1629,6 +1746,186 @@ static void g2h_worker_func(struct work_struct *w)
receive_g2h(ct);
}
+static void xe_fixup_u64_in_cmds(struct xe_device *xe, struct iosys_map *cmds,
+ u32 size, u32 idx, s64 shift)
+{
+ u32 hi, lo;
+ u64 offset;
+
+ lo = xe_map_rd_ring_u32(xe, cmds, idx, size);
+ hi = xe_map_rd_ring_u32(xe, cmds, idx + 1, size);
+ offset = make_u64(hi, lo);
+ offset += shift;
+ lo = lower_32_bits(offset);
+ hi = upper_32_bits(offset);
+ xe_map_wr_ring_u32(xe, cmds, idx, size, lo);
+ xe_map_wr_ring_u32(xe, cmds, idx + 1, size, hi);
+}
+
+/*
+ * Shift any GGTT addresses within a single message left within CTB from
+ * before post-migration recovery.
+ * @ct: pointer to CT struct of the target GuC
+ * @cmds: iomap buffer containing CT messages
+ * @head: start of the target message within the buffer
+ * @len: length of the target message
+ * @size: size of the commands buffer
+ * @shift: the address shift to be added to each GGTT reference
+ * Return: true if the message was fixed or needed no fixups, false on failure
+ */
+static bool ct_fixup_ggtt_in_message(struct xe_guc_ct *ct,
+ struct iosys_map *cmds, u32 head,
+ u32 len, u32 size, s64 shift)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_device *xe = ct_to_xe(ct);
+ u32 msg[GUC_HXG_MSG_MIN_LEN];
+ u32 action, i, n;
+
+ xe_gt_assert(gt, len >= GUC_HXG_MSG_MIN_LEN);
+
+ msg[0] = xe_map_rd_ring_u32(xe, cmds, head, size);
+ action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
+
+ xe_gt_sriov_dbg_verbose(gt, "fixing H2G %#x\n", action);
+
+ switch (action) {
+ case XE_GUC_ACTION_REGISTER_CONTEXT:
+ if (len != XE_GUC_REGISTER_CONTEXT_MSG_LEN)
+ goto err_len;
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER,
+ shift);
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER,
+ shift);
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR, shift);
+ break;
+ case XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC:
+ if (len < XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN)
+ goto err_len;
+ n = xe_map_rd_ring_u32(xe, cmds, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS, size);
+ if (len != XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN + 2 * n)
+ goto err_len;
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER,
+ shift);
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER,
+ shift);
+ for (i = 0; i < n; i++)
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR
+ + 2 * i, shift);
+ break;
+ default:
+ break;
+ }
+ return true;
+
+err_len:
+ xe_gt_err(gt, "Skipped G2G %#x message fixups, unexpected length (%u)\n", action, len);
+ return false;
+}
+
+/*
+ * Apply fixups to the next outgoing CT message within given CTB
+ * @ct: the &xe_guc_ct struct instance representing the target GuC
+ * @h2g: the &guc_ctb struct instance of the target buffer
+ * @shift: shift to be added to all GGTT addresses within the CTB
+ * @mhead: pointer to an integer storing message start position; the
+ * position is changed to next message before this function return
+ * @avail: size of the area available for parsing, that is length
+ * of all remaining messages stored within the CTB
+ * Return: size of the area available for parsing after one message
+ * has been parsed, that is length remaining from the updated mhead
+ */
+static int ct_fixup_ggtt_in_buffer(struct xe_guc_ct *ct, struct guc_ctb *h2g,
+ s64 shift, u32 *mhead, s32 avail)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_device *xe = ct_to_xe(ct);
+ u32 msg[GUC_HXG_MSG_MIN_LEN];
+ u32 size = h2g->info.size;
+ u32 head = *mhead;
+ u32 len;
+
+ xe_gt_assert(gt, avail >= (s32)GUC_CTB_MSG_MIN_LEN);
+
+ /* Read header */
+ msg[0] = xe_map_rd_ring_u32(xe, &h2g->cmds, head, size);
+ len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
+
+ if (unlikely(len > (u32)avail)) {
+ xe_gt_err(gt, "H2G channel broken on read, avail=%d, len=%d, fixups skipped\n",
+ avail, len);
+ return 0;
+ }
+
+ head = (head + GUC_CTB_MSG_MIN_LEN) % size;
+ if (!ct_fixup_ggtt_in_message(ct, &h2g->cmds, head, msg_len_to_hxg_len(len), size, shift))
+ return 0;
+ *mhead = (head + msg_len_to_hxg_len(len)) % size;
+
+ return avail - len;
+}
+
+/**
+ * xe_guc_ct_fixup_messages_with_ggtt - Fixup any pending H2G CTB messages
+ * @ct: pointer to CT struct of the target GuC
+ * @ggtt_shift: shift to be added to all GGTT addresses within the CTB
+ *
+ * Messages in GuC to Host CTB are owned by GuC and any fixups in them
+ * are made by GuC. But content of the Host to GuC CTB is owned by the
+ * KMD, so fixups to GGTT references in any pending messages need to be
+ * applied here.
+ * This function updates GGTT offsets in payloads of pending H2G CTB
+ * messages (messages which were not consumed by GuC before the VF got
+ * paused).
+ */
+void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift)
+{
+ struct guc_ctb *h2g = &ct->ctbs.h2g;
+ struct xe_guc *guc = ct_to_guc(ct);
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 head, tail, size;
+ s32 avail;
+
+ if (unlikely(h2g->info.broken))
+ return;
+
+ h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
+ head = h2g->info.head;
+ tail = READ_ONCE(h2g->info.tail);
+ size = h2g->info.size;
+
+ if (unlikely(head > size))
+ goto corrupted;
+
+ if (unlikely(tail >= size))
+ goto corrupted;
+
+ avail = tail - head;
+
+ /* beware of buffer wrap case */
+ if (unlikely(avail < 0))
+ avail += size;
+ xe_gt_dbg(gt, "available %d (%u:%u:%u)\n", avail, head, tail, size);
+ xe_gt_assert(gt, avail >= 0);
+
+ while (avail > 0)
+ avail = ct_fixup_ggtt_in_buffer(ct, h2g, ggtt_shift, &head, avail);
+
+ return;
+
+corrupted:
+ xe_gt_err(gt, "Corrupted H2G descriptor head=%u tail=%u size=%u, fixups not applied\n",
+ head, tail, size);
+ h2g->info.broken = true;
+}
+
static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
bool want_ctb)
{
@@ -1639,7 +1936,7 @@ static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bo
return NULL;
if (ct->bo && want_ctb) {
- snapshot->ctb_size = ct->bo->size;
+ snapshot->ctb_size = xe_bo_size(ct->bo);
snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
}
@@ -1775,6 +2072,24 @@ void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
}
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+/*
+ * This is a helper function which assists the driver in identifying if a fault
+ * injection test is currently active, allowing it to reduce unnecessary debug
+ * output. Typically, the function returns zero, but the fault injection
+ * framework can alter this to return an error. Since faults are injected
+ * through this function, it's important to ensure the compiler doesn't optimize
+ * it into an inline function. To avoid such optimization, the 'noinline'
+ * attribute is applied. Compiler optimizes the static function defined in the
+ * header file as an inline function.
+ */
+noinline int xe_is_injection_active(void) { return 0; }
+ALLOW_ERROR_INJECTION(xe_is_injection_active, ERRNO);
+#else
+int xe_is_injection_active(void) { return 0; }
+#endif
+
static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
{
struct xe_guc_log_snapshot *snapshot_log;
@@ -1785,6 +2100,12 @@ static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reaso
if (ctb)
ctb->info.broken = true;
+ /*
+ * Huge dump is getting generated when injecting error for guc CT/MMIO
+ * functions. So, let us suppress the dump when fault is injected.
+ */
+ if (xe_is_injection_active())
+ return;
/* Ignore further errors after the first dump until a reset */
if (ct->dead.reported)
@@ -1835,7 +2156,6 @@ static void ct_dead_print(struct xe_dead_ct *dead)
return;
}
-
/* Can't generate a genuine core dump at this point, so just do the good bits */
drm_puts(&lp, "**** Xe Device Coredump ****\n");
drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index 582aac106469..18d4225e6502 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -11,6 +11,7 @@
struct drm_printer;
struct xe_device;
+int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct);
int xe_guc_ct_init(struct xe_guc_ct *ct);
int xe_guc_ct_enable(struct xe_guc_ct *ct);
void xe_guc_ct_disable(struct xe_guc_ct *ct);
@@ -22,6 +23,8 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_pr
void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb);
+void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift);
+
static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct)
{
return ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED;
diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
index 8e1b9d981d61..8b03b50313d9 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/iosys-map.h>
#include <linux/spinlock_types.h>
+#include <linux/stackdepot.h>
#include <linux/wait.h>
#include <linux/xarray.h>
@@ -104,6 +105,18 @@ struct xe_dead_ct {
/** snapshot_log: copy of GuC log at point of error */
struct xe_guc_log_snapshot *snapshot_log;
};
+
+/** struct xe_fast_req_fence - Used to track FAST_REQ messages by fence to match error responses */
+struct xe_fast_req_fence {
+ /** @fence: sequence number sent in H2G and return in G2H error */
+ u16 fence;
+ /** @action: H2G action code */
+ u16 action;
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ /** @stack: call stack from when the H2G was sent */
+ depot_stack_handle_t stack;
+#endif
+};
#endif
/**
@@ -152,6 +165,8 @@ struct xe_guc_ct {
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
/** @dead: information for debugging dead CTs */
struct xe_dead_ct dead;
+ /** @fast_req: history of FAST_REQ messages for matching with G2H error responses */
+ struct xe_fast_req_fence fast_req[SZ_32];
#endif
};
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
index 0fb48f8f05d8..92e1f9f41b8c 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity.c
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
@@ -124,7 +124,7 @@ static void free_engine_activity_buffers(struct engine_activity_buffer *buffer)
static bool is_engine_activity_supported(struct xe_guc *guc)
{
struct xe_uc_fw_version *version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
- struct xe_uc_fw_version required = { 1, 14, 1 };
+ struct xe_uc_fw_version required = { .major = 1, .minor = 14, .patch = 1 };
struct xe_gt *gt = guc_to_gt(guc);
if (IS_SRIOV_VF(gt_to_xe(gt))) {
diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
index 4c39f01e4f52..a3f421e2adc0 100644
--- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
@@ -20,6 +20,8 @@ struct xe_exec_queue;
struct xe_guc_exec_queue {
/** @q: Backpointer to parent xe_exec_queue */
struct xe_exec_queue *q;
+ /** @rcu: For safe freeing of exported dma fences */
+ struct rcu_head rcu;
/** @sched: GPU scheduler for this xe_exec_queue */
struct xe_gpu_scheduler sched;
/** @entity: Scheduler entity for this xe_exec_queue */
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index 38039c411387..c01ccb35dc75 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -79,7 +79,7 @@ static struct xe_guc_log_snapshot *xe_guc_log_snapshot_alloc(struct xe_guc_log *
* Also, can't use vmalloc as might be called from atomic context. So need
* to break the buffer up into smaller chunks that can be allocated.
*/
- snapshot->size = log->bo->size;
+ snapshot->size = xe_bo_size(log->bo);
snapshot->num_chunks = DIV_ROUND_UP(snapshot->size, GUC_LOG_CHUNK_SIZE);
snapshot->copy = kcalloc(snapshot->num_chunks, sizeof(*snapshot->copy),
diff --git a/drivers/gpu/drm/xe/xe_guc_log.h b/drivers/gpu/drm/xe/xe_guc_log.h
index 5b896f5fafaf..f1e2b0be90a9 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.h
+++ b/drivers/gpu/drm/xe/xe_guc_log.h
@@ -12,7 +12,7 @@
struct drm_printer;
struct xe_device;
-#if IS_ENABLED(CONFIG_DRM_XE_LARGE_GUC_BUFFER)
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
#define CRASH_BUFFER_SIZE SZ_1M
#define DEBUG_BUFFER_SIZE SZ_8M
#define CAPTURE_BUFFER_SIZE SZ_2M
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index c0ca61695d76..68b192fe3b32 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -189,7 +189,7 @@ static int pc_action_reset(struct xe_guc_pc *pc)
int ret;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
ERR_PTR(ret));
@@ -213,7 +213,7 @@ static int pc_action_query_task_state(struct xe_guc_pc *pc)
/* Blocking here to ensure the results are ready before reading them */
ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
ERR_PTR(ret));
@@ -236,7 +236,7 @@ static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
id, value, ERR_PTR(ret));
@@ -258,7 +258,7 @@ static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
ERR_PTR(ret));
@@ -275,7 +275,7 @@ static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
int ret;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
mode, ERR_PTR(ret));
return ret;
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 2ac87ff4a057..cafb47711e9b 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -498,6 +498,15 @@ static void __register_mlrc_exec_queue(struct xe_guc *guc,
action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
}
+ /* explicitly checks some fields that we might fixup later */
+ xe_gt_assert(guc_to_gt(guc), info->wq_desc_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), info->wq_base_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), q->width ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS]);
+ xe_gt_assert(guc_to_gt(guc), info->hwlrca_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR]);
xe_gt_assert(guc_to_gt(guc), len <= MAX_MLRC_REG_SIZE);
#undef MAX_MLRC_REG_SIZE
@@ -522,6 +531,14 @@ static void __register_exec_queue(struct xe_guc *guc,
info->hwlrca_hi,
};
+ /* explicitly checks some fields that we might fixup later */
+ xe_gt_assert(guc_to_gt(guc), info->wq_desc_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), info->wq_base_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), info->hwlrca_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR]);
+
xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
}
@@ -971,10 +988,7 @@ static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
*/
xe_gt_assert(gt, timeout_ms < 100 * MSEC_PER_SEC);
- if (ctx_timestamp < ctx_job_timestamp)
- diff = ctx_timestamp + U32_MAX - ctx_job_timestamp;
- else
- diff = ctx_timestamp - ctx_job_timestamp;
+ diff = ctx_timestamp - ctx_job_timestamp;
/*
* Ensure timeout is within 5% to account for an GuC scheduling latency
@@ -1079,12 +1093,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
* list so job can be freed and kick scheduler ensuring free job is not
* lost.
*/
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
- xe_sched_add_pending_job(sched, job);
- xe_sched_submission_start(sched);
-
- return DRM_GPU_SCHED_STAT_NOMINAL;
- }
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags))
+ return DRM_GPU_SCHED_STAT_NO_HANG;
/* Kill the run_job entry point */
xe_sched_submission_stop(sched);
@@ -1253,7 +1263,7 @@ trigger_reset:
/* Start fence signaling */
xe_hw_fence_irq_start(q->fence_irq);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
sched_enable:
enable_scheduling(q);
@@ -1263,10 +1273,8 @@ rearm:
* but there is not currently an easy way to do in DRM scheduler. With
* some thought, do this in a follow up.
*/
- xe_sched_add_pending_job(sched, job);
xe_sched_submission_start(sched);
-
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_NO_HANG;
}
static void __guc_exec_queue_fini_async(struct work_struct *w)
@@ -1287,7 +1295,11 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
- kfree(ge);
+ /*
+ * RCU free due sched being exported via DRM scheduler fences
+ * (timeline name).
+ */
+ kfree_rcu(ge, rcu);
xe_exec_queue_fini(q);
xe_pm_runtime_put(guc_to_xe(guc));
}
@@ -1470,6 +1482,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
q->guc = ge;
ge->q = q;
+ init_rcu_head(&ge->rcu);
init_waitqueue_head(&ge->suspend_wait);
for (i = 0; i < MAX_STATIC_MSG_TYPE; ++i)
@@ -2073,12 +2086,16 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
struct xe_gt *gt = guc_to_gt(guc);
struct xe_exec_queue *q;
u32 guc_id;
+ u32 type = XE_GUC_CAT_ERR_TYPE_INVALID;
- if (unlikely(len < 1))
+ if (unlikely(!len || len > 2))
return -EPROTO;
guc_id = msg[0];
+ if (len == 2)
+ type = msg[1];
+
if (guc_id == GUC_ID_UNKNOWN) {
/*
* GuC uses GUC_ID_UNKNOWN if it can not map the CAT fault to any PF/VF
@@ -2092,8 +2109,19 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
if (unlikely(!q))
return -EPROTO;
- xe_gt_dbg(gt, "Engine memory cat error: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
- xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
+ /*
+ * The type is HW-defined and changes based on platform, so we don't
+ * decode it in the kernel and only check if it is valid.
+ * See bspec 54047 and 72187 for details.
+ */
+ if (type != XE_GUC_CAT_ERR_TYPE_INVALID)
+ xe_gt_dbg(gt,
+ "Engine memory CAT error [%u]: class=%s, logical_mask: 0x%x, guc_id=%d",
+ type, xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
+ else
+ xe_gt_dbg(gt,
+ "Engine memory CAT error: class=%s, logical_mask: 0x%x, guc_id=%d",
+ xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
trace_xe_exec_queue_memory_cat_error(q);
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 27d11e06a82b..6d7b62724126 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -11,15 +11,12 @@
#include "xe_device_types.h"
#include "xe_drv.h"
#include "xe_heci_gsc.h"
+#include "regs/xe_gsc_regs.h"
#include "xe_platform_types.h"
#include "xe_survivability_mode.h"
#define GSC_BAR_LENGTH 0x00000FFC
-#define DG1_GSC_HECI2_BASE 0x259000
-#define PVC_GSC_HECI2_BASE 0x285000
-#define DG2_GSC_HECI2_BASE 0x374000
-
static void heci_gsc_irq_mask(struct irq_data *d)
{
/* generic irq handling */
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index 6a846e4cb221..7e43b2dd6a32 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -171,7 +171,7 @@ static int huc_auth_via_gsccs(struct xe_huc *huc)
sizeof(struct pxp43_new_huc_auth_in));
wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset,
xe_bo_ggtt_addr(huc->fw.bo),
- huc->fw.bo->size);
+ xe_bo_size(huc->fw.bo));
do {
err = xe_gsc_pkt_submit_kernel(&gt->uc.gsc, ggtt_offset, wr_offset,
ggtt_offset + PXP43_HUC_AUTH_INOUT_SIZE,
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 93241fd0a4ba..796ba8c34a16 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -17,6 +17,7 @@
#include "regs/xe_irq_regs.h"
#include "xe_assert.h"
#include "xe_bo.h"
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_execlist.h"
#include "xe_force_wake.h"
@@ -693,7 +694,7 @@ static void read_media_fuses(struct xe_gt *gt)
if (!(BIT(j) & vdbox_mask)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "vcs%u fused off\n", j);
+ xe_gt_info(gt, "vcs%u fused off\n", j);
}
}
@@ -703,7 +704,7 @@ static void read_media_fuses(struct xe_gt *gt)
if (!(BIT(j) & vebox_mask)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "vecs%u fused off\n", j);
+ xe_gt_info(gt, "vecs%u fused off\n", j);
}
}
}
@@ -728,15 +729,13 @@ static void read_copy_fuses(struct xe_gt *gt)
if (!(BIT(j / 2) & bcs_mask)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "bcs%u fused off\n", j);
+ xe_gt_info(gt, "bcs%u fused off\n", j);
}
}
}
static void read_compute_fuses_from_dss(struct xe_gt *gt)
{
- struct xe_device *xe = gt_to_xe(gt);
-
/*
* CCS fusing based on DSS masks only applies to platforms that can
* have more than one CCS.
@@ -755,14 +754,13 @@ static void read_compute_fuses_from_dss(struct xe_gt *gt)
if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "ccs%u fused off\n", j);
+ xe_gt_info(gt, "ccs%u fused off\n", j);
}
}
}
static void read_compute_fuses_from_reg(struct xe_gt *gt)
{
- struct xe_device *xe = gt_to_xe(gt);
u32 ccs_mask;
ccs_mask = xe_mmio_read32(&gt->mmio, XEHP_FUSE4);
@@ -774,7 +772,7 @@ static void read_compute_fuses_from_reg(struct xe_gt *gt)
if ((ccs_mask & BIT(j)) == 0) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "ccs%u fused off\n", j);
+ xe_gt_info(gt, "ccs%u fused off\n", j);
}
}
}
@@ -789,8 +787,6 @@ static void read_compute_fuses(struct xe_gt *gt)
static void check_gsc_availability(struct xe_gt *gt)
{
- struct xe_device *xe = gt_to_xe(gt);
-
if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
return;
@@ -806,7 +802,25 @@ static void check_gsc_availability(struct xe_gt *gt)
xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_ENABLE, 0);
xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_MASK, ~0);
- drm_dbg(&xe->drm, "GSC FW not used, disabling gsccs\n");
+ xe_gt_dbg(gt, "GSC FW not used, disabling gsccs\n");
+ }
+}
+
+static void check_sw_disable(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u64 sw_allowed = xe_configfs_get_engines_allowed(to_pci_dev(xe->drm.dev));
+ enum xe_hw_engine_id id;
+
+ for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
+ if (!(gt->info.engine_mask & BIT(id)))
+ continue;
+
+ if (!(sw_allowed & BIT(id))) {
+ gt->info.engine_mask &= ~BIT(id);
+ xe_gt_info(gt, "%s disabled via configfs\n",
+ engine_infos[id].name);
+ }
}
}
@@ -818,6 +832,7 @@ int xe_hw_engines_init_early(struct xe_gt *gt)
read_copy_fuses(gt);
read_compute_fuses(gt);
check_gsc_availability(gt);
+ check_sw_disable(gt);
BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN);
BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX);
@@ -1044,12 +1059,13 @@ struct xe_hw_engine *
xe_hw_engine_lookup(struct xe_device *xe,
struct drm_xe_engine_class_instance eci)
{
+ struct xe_gt *gt = xe_device_get_gt(xe, eci.gt_id);
unsigned int idx;
if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
return NULL;
- if (eci.gt_id >= xe->info.gt_count)
+ if (!gt)
return NULL;
idx = array_index_nospec(eci.engine_class,
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
index 2d68c5b5262a..87a6dcb1b4b5 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -13,15 +13,6 @@
#include "xe_vm.h"
static void
-hw_engine_group_free(struct drm_device *drm, void *arg)
-{
- struct xe_hw_engine_group *group = arg;
-
- destroy_workqueue(group->resume_wq);
- kfree(group);
-}
-
-static void
hw_engine_group_resume_lr_jobs_func(struct work_struct *w)
{
struct xe_exec_queue *q;
@@ -53,7 +44,7 @@ hw_engine_group_alloc(struct xe_device *xe)
struct xe_hw_engine_group *group;
int err;
- group = kzalloc(sizeof(*group), GFP_KERNEL);
+ group = drmm_kzalloc(&xe->drm, sizeof(*group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
@@ -61,14 +52,14 @@ hw_engine_group_alloc(struct xe_device *xe)
if (!group->resume_wq)
return ERR_PTR(-ENOMEM);
+ err = drmm_add_action_or_reset(&xe->drm, __drmm_workqueue_release, group->resume_wq);
+ if (err)
+ return ERR_PTR(err);
+
init_rwsem(&group->mode_sem);
INIT_WORK(&group->resume_work, hw_engine_group_resume_lr_jobs_func);
INIT_LIST_HEAD(&group->exec_queue_list);
- err = drmm_add_action_or_reset(&xe->drm, hw_engine_group_free, group);
- if (err)
- return ERR_PTR(err);
-
return group;
}
diff --git a/drivers/gpu/drm/xe/xe_hw_fence.c b/drivers/gpu/drm/xe/xe_hw_fence.c
index 0b4f12be3692..b2a0c46dfcd4 100644
--- a/drivers/gpu/drm/xe/xe_hw_fence.c
+++ b/drivers/gpu/drm/xe/xe_hw_fence.c
@@ -100,6 +100,9 @@ void xe_hw_fence_irq_finish(struct xe_hw_fence_irq *irq)
spin_unlock_irqrestore(&irq->lock, flags);
dma_fence_end_signalling(tmp);
}
+
+ /* Safe release of the irq->lock used in dma_fence_init. */
+ synchronize_rcu();
}
void xe_hw_fence_irq_run(struct xe_hw_fence_irq *irq)
@@ -165,7 +168,7 @@ static bool xe_hw_fence_signaled(struct dma_fence *dma_fence)
u32 seqno = xe_map_rd(xe, &fence->seqno_map, 0, u32);
return dma_fence->error ||
- !__dma_fence_is_later(dma_fence->seqno, seqno, dma_fence->ops);
+ !__dma_fence_is_later(dma_fence, dma_fence->seqno, seqno);
}
static bool xe_hw_fence_enable_signaling(struct dma_fence *dma_fence)
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index f008e8049700..f08fc4377d25 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -20,6 +20,8 @@
#include "xe_pcode_api.h"
#include "xe_sriov.h"
#include "xe_pm.h"
+#include "xe_vsec.h"
+#include "regs/xe_pmt.h"
enum xe_hwmon_reg {
REG_TEMP,
@@ -51,6 +53,14 @@ enum xe_fan_channel {
FAN_MAX,
};
+/* Attribute index for powerX_xxx_interval sysfs entries */
+enum sensor_attr_power {
+ SENSOR_INDEX_PSYS_PL1,
+ SENSOR_INDEX_PKG_PL1,
+ SENSOR_INDEX_PSYS_PL2,
+ SENSOR_INDEX_PKG_PL2,
+};
+
/*
* For platforms that support mailbox commands for power limits, REG_PKG_POWER_SKU_UNIT is
* not supported and below are SKU units to be used.
@@ -72,8 +82,9 @@ enum xe_fan_channel {
* PL*_HWMON_ATTR - mapping of hardware power limits to corresponding hwmon power attribute.
*/
#define PL1_HWMON_ATTR hwmon_power_max
+#define PL2_HWMON_ATTR hwmon_power_cap
-#define PWR_ATTR_TO_STR(attr) (((attr) == hwmon_power_max) ? "PL1" : "Invalid")
+#define PWR_ATTR_TO_STR(attr) (((attr) == hwmon_power_max) ? "PL1" : "PL2")
/*
* Timeout for power limit write mailbox command.
@@ -124,6 +135,9 @@ struct xe_hwmon {
bool boot_power_limit_read;
/** @pl1_on_boot: power limit PL1 on boot */
u32 pl1_on_boot[CHANNEL_MAX];
+ /** @pl2_on_boot: power limit PL2 on boot */
+ u32 pl2_on_boot[CHANNEL_MAX];
+
};
static int xe_hwmon_pcode_read_power_limit(const struct xe_hwmon *hwmon, u32 attr, int channel,
@@ -151,8 +165,10 @@ static int xe_hwmon_pcode_read_power_limit(const struct xe_hwmon *hwmon, u32 att
/* return the value only if limit is enabled */
if (attr == PL1_HWMON_ATTR)
*uval = (val0 & PWR_LIM_EN) ? val0 : 0;
+ else if (attr == PL2_HWMON_ATTR)
+ *uval = (val1 & PWR_LIM_EN) ? val1 : 0;
else if (attr == hwmon_power_label)
- *uval = (val0 & PWR_LIM_EN) ? 1 : 0;
+ *uval = (val0 & PWR_LIM_EN) ? 1 : (val1 & PWR_LIM_EN) ? 1 : 0;
else
*uval = 0;
@@ -180,6 +196,8 @@ static int xe_hwmon_pcode_rmw_power_limit(const struct xe_hwmon *hwmon, u32 attr
if (attr == PL1_HWMON_ATTR)
val0 = (val0 & ~clr) | set;
+ else if (attr == PL2_HWMON_ATTR)
+ val1 = (val1 & ~clr) | set;
else
return -EIO;
@@ -236,12 +254,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
return GT_PERF_STATUS;
break;
case REG_PKG_ENERGY_STATUS:
- if (xe->info.platform == XE_BATTLEMAGE) {
- if (channel == CHANNEL_PKG)
- return BMG_PACKAGE_ENERGY_STATUS;
- else
- return BMG_PLATFORM_ENERGY_STATUS;
- } else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
return PVC_GT0_PLATFORM_ENERGY_STATUS;
} else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) {
return PCU_CR_PACKAGE_ENERGY_STATUS;
@@ -273,7 +286,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
*/
static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value)
{
- u64 reg_val, min, max;
+ u64 reg_val = 0, min, max;
struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
@@ -285,16 +298,6 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe
} else {
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
-
- /*
- * Valid check of REG_PKG_RAPL_LIMIT is already done in xe_hwmon_power_is_visible.
- * So not checking it again here.
- */
- if (!xe_reg_is_valid(pkg_power_sku)) {
- drm_warn(&xe->drm, "pkg_power_sku invalid\n");
- *value = 0;
- goto unlock;
- }
reg_val = xe_mmio_read32(mmio, rapl_limit);
}
@@ -327,7 +330,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
{
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
int ret = 0;
- u32 reg_val;
+ u32 reg_val, max;
struct xe_reg rapl_limit;
mutex_lock(&hwmon->hwmon_lock);
@@ -355,20 +358,25 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
/* Computation in 64-bits to avoid overflow. Round to nearest. */
reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
- reg_val = PWR_LIM_EN | REG_FIELD_PREP(PWR_LIM_VAL, reg_val);
/*
- * Clamp power limit to card-firmware default as maximum, as an additional protection to
+ * Clamp power limit to GPU firmware default as maximum, as an additional protection to
* pcode clamp.
*/
if (hwmon->xe->info.has_mbx_power_limits) {
- if (reg_val > REG_FIELD_GET(PWR_LIM_VAL, hwmon->pl1_on_boot[channel])) {
- reg_val = REG_FIELD_GET(PWR_LIM_VAL, hwmon->pl1_on_boot[channel]);
- drm_dbg(&hwmon->xe->drm, "Clamping power limit to firmware default 0x%x\n",
+ max = (attr == PL1_HWMON_ATTR) ?
+ hwmon->pl1_on_boot[channel] : hwmon->pl2_on_boot[channel];
+ max = REG_FIELD_PREP(PWR_LIM_VAL, max);
+ if (reg_val > max) {
+ reg_val = max;
+ drm_dbg(&hwmon->xe->drm,
+ "Clamping power limit to GPU firmware default 0x%x\n",
reg_val);
}
}
+ reg_val = PWR_LIM_EN | REG_FIELD_PREP(PWR_LIM_VAL, reg_val);
+
if (hwmon->xe->info.has_mbx_power_limits)
ret = xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM, reg_val);
else
@@ -427,16 +435,37 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
{
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
- u64 reg_val;
+ u32 reg_val;
+ int ret = 0;
- reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
- channel));
+ /* Energy is supported only for card and pkg */
+ if (channel > CHANNEL_PKG) {
+ *energy = 0;
+ return;
+ }
- if (reg_val >= ei->reg_val_prev)
- ei->accum_energy += reg_val - ei->reg_val_prev;
- else
- ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
+ if (hwmon->xe->info.platform == XE_BATTLEMAGE) {
+ u64 pmt_val;
+
+ ret = xe_pmt_telem_read(to_pci_dev(hwmon->xe->drm.dev),
+ xe_mmio_read32(mmio, PUNIT_TELEMETRY_GUID),
+ &pmt_val, BMG_ENERGY_STATUS_PMT_OFFSET, sizeof(pmt_val));
+ if (ret != sizeof(pmt_val)) {
+ drm_warn(&hwmon->xe->drm, "energy read from pmt failed, ret %d\n", ret);
+ *energy = 0;
+ return;
+ }
+ if (channel == CHANNEL_PKG)
+ reg_val = REG_FIELD_GET64(ENERGY_PKG, pmt_val);
+ else
+ reg_val = REG_FIELD_GET64(ENERGY_CARD, pmt_val);
+ } else {
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
+ channel));
+ }
+
+ ei->accum_energy += reg_val - ei->reg_val_prev;
ei->reg_val_prev = reg_val;
*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
@@ -451,8 +480,9 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
u32 x, y, x_w = 2; /* 2 bits */
u64 r, tau4, out;
- int channel = to_sensor_dev_attr(attr)->index;
- u32 power_attr = PL1_HWMON_ATTR;
+ int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
+ u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
+
int ret = 0;
xe_pm_runtime_get(hwmon->xe);
@@ -505,9 +535,9 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
u32 x, y, rxy, x_w = 2; /* 2 bits */
u64 tau4, r, max_win;
unsigned long val;
+ int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
+ u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
int ret;
- int channel = to_sensor_dev_attr(attr)->index;
- u32 power_attr = PL1_HWMON_ATTR;
ret = kstrtoul(buf, 0, &val);
if (ret)
@@ -534,10 +564,8 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
- if (val > max_win) {
- drm_warn(&hwmon->xe->drm, "power_interval invalid val 0x%lx\n", val);
+ if (val > max_win)
return -EINVAL;
- }
/* val in hw units */
val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME) + 1;
@@ -578,15 +606,25 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
/* PSYS PL1 */
static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
xe_hwmon_power_max_interval_show,
- xe_hwmon_power_max_interval_store, CHANNEL_CARD);
-
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL1);
+/* PKG PL1 */
static SENSOR_DEVICE_ATTR(power2_max_interval, 0664,
xe_hwmon_power_max_interval_show,
- xe_hwmon_power_max_interval_store, CHANNEL_PKG);
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL1);
+/* PSYS PL2 */
+static SENSOR_DEVICE_ATTR(power1_cap_interval, 0664,
+ xe_hwmon_power_max_interval_show,
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL2);
+/* PKG PL2 */
+static SENSOR_DEVICE_ATTR(power2_cap_interval, 0664,
+ xe_hwmon_power_max_interval_show,
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL2);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_power1_max_interval.dev_attr.attr,
&sensor_dev_attr_power2_max_interval.dev_attr.attr,
+ &sensor_dev_attr_power1_cap_interval.dev_attr.attr,
+ &sensor_dev_attr_power2_cap_interval.dev_attr.attr,
NULL
};
@@ -596,19 +634,22 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret = 0;
- int channel = index ? CHANNEL_PKG : CHANNEL_CARD;
- u32 power_attr = PL1_HWMON_ATTR;
- u32 uval;
+ int channel = (index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
+ u32 power_attr = (index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
+ u32 uval = 0;
+ struct xe_reg rapl_limit;
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
xe_pm_runtime_get(hwmon->xe);
if (hwmon->xe->info.has_mbx_power_limits) {
xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &uval);
- ret = (uval & PWR_LIM_EN) ? attr->mode : 0;
- } else {
- ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
- channel)) ? attr->mode : 0;
+ } else if (power_attr != PL2_HWMON_ATTR) {
+ rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
+ if (xe_reg_is_valid(rapl_limit))
+ uval = xe_mmio_read32(mmio, rapl_limit);
}
+ ret = (uval & PWR_LIM_EN) ? attr->mode : 0;
xe_pm_runtime_put(hwmon->xe);
@@ -628,8 +669,9 @@ static const struct attribute_group *hwmon_groups[] = {
static const struct hwmon_channel_info * const hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
- HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CRIT,
- HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL),
+ HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CRIT |
+ HWMON_P_CAP,
+ HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CAP),
HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL),
@@ -750,31 +792,62 @@ xe_hwmon_temp_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
static umode_t
xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
- u32 uval;
+ u32 uval = 0;
+ struct xe_reg reg;
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
switch (attr) {
case hwmon_power_max:
+ case hwmon_power_cap:
if (hwmon->xe->info.has_mbx_power_limits) {
xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval);
- return (uval) ? 0664 : 0;
- } else {
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
- channel)) ? 0664 : 0;
+ } else if (attr != PL2_HWMON_ATTR) {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
+ }
+ if (uval & PWR_LIM_EN) {
+ drm_info(&hwmon->xe->drm, "%s is supported on channel %d\n",
+ PWR_ATTR_TO_STR(attr), channel);
+ return 0664;
}
+ drm_dbg(&hwmon->xe->drm, "%s is unsupported on channel %d\n",
+ PWR_ATTR_TO_STR(attr), channel);
+ return 0;
case hwmon_power_rated_max:
- if (hwmon->xe->info.has_mbx_power_limits)
+ if (hwmon->xe->info.has_mbx_power_limits) {
return 0;
- else
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU,
- channel)) ? 0444 : 0;
+ } else {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
+ return uval ? 0444 : 0;
+ }
case hwmon_power_crit:
- case hwmon_power_label:
if (channel == CHANNEL_CARD) {
xe_hwmon_pcode_read_i1(hwmon, &uval);
- return (uval & POWER_SETUP_I1_WATTS) ? (attr == hwmon_power_label) ?
- 0444 : 0644 : 0;
+ return (uval & POWER_SETUP_I1_WATTS) ? 0644 : 0;
}
break;
+ case hwmon_power_label:
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval);
+ } else {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
+
+ if (!uval) {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
+ }
+ }
+ if ((!(uval & PWR_LIM_EN)) && channel == CHANNEL_CARD) {
+ xe_hwmon_pcode_read_i1(hwmon, &uval);
+ return (uval & POWER_SETUP_I1_WATTS) ? 0444 : 0;
+ }
+ return (uval) ? 0444 : 0;
default:
return 0;
}
@@ -786,6 +859,7 @@ xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_power_max:
+ case hwmon_power_cap:
xe_hwmon_power_max_read(hwmon, attr, channel, val);
return 0;
case hwmon_power_rated_max:
@@ -802,6 +876,7 @@ static int
xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
{
switch (attr) {
+ case hwmon_power_cap:
case hwmon_power_max:
return xe_hwmon_power_max_write(hwmon, attr, channel, val);
case hwmon_power_crit:
@@ -884,11 +959,18 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
static umode_t
xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
+ long energy = 0;
+
switch (attr) {
case hwmon_energy_input:
case hwmon_energy_label:
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
- channel)) ? 0444 : 0;
+ if (hwmon->xe->info.platform == XE_BATTLEMAGE) {
+ xe_hwmon_energy_get(hwmon, channel, &energy);
+ return energy ? 0444 : 0;
+ } else {
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
+ channel)) ? 0444 : 0;
+ }
default:
return 0;
}
@@ -1124,13 +1206,17 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
struct xe_reg pkg_power_sku_unit;
if (hwmon->xe->info.has_mbx_power_limits) {
- /* Check if card firmware support mailbox power limits commands. */
+ /* Check if GPU firmware support mailbox power limits commands. */
if (xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_CARD,
&hwmon->pl1_on_boot[CHANNEL_CARD]) |
xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_PKG,
- &hwmon->pl1_on_boot[CHANNEL_PKG])) {
+ &hwmon->pl1_on_boot[CHANNEL_PKG]) |
+ xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, CHANNEL_CARD,
+ &hwmon->pl2_on_boot[CHANNEL_CARD]) |
+ xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, CHANNEL_PKG,
+ &hwmon->pl2_on_boot[CHANNEL_PKG])) {
drm_warn(&hwmon->xe->drm,
- "Failed to read power limits, check card firmware !\n");
+ "Failed to read power limits, check GPU firmware !\n");
} else {
drm_info(&hwmon->xe->drm, "Using mailbox commands for power limits\n");
/* Write default limits to read from pcode from now on. */
@@ -1140,6 +1226,12 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR,
CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME,
hwmon->pl1_on_boot[CHANNEL_PKG]);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR,
+ CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl2_on_boot[CHANNEL_CARD]);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR,
+ CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl2_on_boot[CHANNEL_PKG]);
hwmon->scl_shift_power = PWR_UNIT;
hwmon->scl_shift_energy = ENERGY_UNIT;
hwmon->scl_shift_time = TIME_UNIT;
@@ -1223,4 +1315,4 @@ int xe_hwmon_register(struct xe_device *xe)
return 0;
}
-
+MODULE_IMPORT_NS("INTEL_PMT_TELEMETRY");
diff --git a/drivers/gpu/drm/xe/xe_i2c.c b/drivers/gpu/drm/xe/xe_i2c.c
new file mode 100644
index 000000000000..db9c0340be5c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_i2c.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Intel Xe I2C attached Microcontroller Units (MCU)
+ *
+ * Copyright (C) 2025 Intel Corporation.
+ */
+
+#include <linux/array_size.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/notifier.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "regs/xe_i2c_regs.h"
+#include "regs/xe_irq_regs.h"
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_i2c.h"
+#include "xe_mmio.h"
+#include "xe_platform_types.h"
+
+/**
+ * DOC: Xe I2C devices
+ *
+ * Register a platform device for the I2C host controller (Synpsys DesignWare
+ * I2C) if the registers of that controller are mapped to the MMIO, and also the
+ * I2C client device for the Add-In Management Controller (the MCU) attached to
+ * the host controller.
+ *
+ * See drivers/i2c/busses/i2c-designware-* for more information on the I2C host
+ * controller.
+ */
+
+static const char adapter_name[] = "i2c_designware";
+
+static const struct property_entry xe_i2c_adapter_properties[] = {
+ PROPERTY_ENTRY_STRING("compatible", "intel,xe-i2c"),
+ PROPERTY_ENTRY_U32("clock-frequency", I2C_MAX_FAST_MODE_PLUS_FREQ),
+ { }
+};
+
+static inline void xe_i2c_read_endpoint(struct xe_mmio *mmio, void *ep)
+{
+ u32 *val = ep;
+
+ val[0] = xe_mmio_read32(mmio, REG_SG_REMAP_ADDR_PREFIX);
+ val[1] = xe_mmio_read32(mmio, REG_SG_REMAP_ADDR_POSTFIX);
+}
+
+static void xe_i2c_client_work(struct work_struct *work)
+{
+ struct xe_i2c *i2c = container_of(work, struct xe_i2c, work);
+ struct i2c_board_info info = {
+ .type = "amc",
+ .flags = I2C_CLIENT_HOST_NOTIFY,
+ .addr = i2c->ep.addr[1],
+ };
+
+ i2c->client[0] = i2c_new_client_device(i2c->adapter, &info);
+}
+
+static int xe_i2c_notifier(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct xe_i2c *i2c = container_of(nb, struct xe_i2c, bus_notifier);
+ struct i2c_adapter *adapter = i2c_verify_adapter(data);
+ struct device *dev = data;
+
+ if (action == BUS_NOTIFY_ADD_DEVICE &&
+ adapter && dev->parent == &i2c->pdev->dev) {
+ i2c->adapter = adapter;
+ schedule_work(&i2c->work);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int xe_i2c_register_adapter(struct xe_i2c *i2c)
+{
+ struct pci_dev *pci = to_pci_dev(i2c->drm_dev);
+ struct platform_device *pdev;
+ struct fwnode_handle *fwnode;
+ int ret;
+
+ fwnode = fwnode_create_software_node(xe_i2c_adapter_properties, NULL);
+ if (!fwnode)
+ return -ENOMEM;
+
+ /*
+ * Not using platform_device_register_full() here because we don't have
+ * a handle to the platform_device before it returns. xe_i2c_notifier()
+ * uses that handle, but it may be called before
+ * platform_device_register_full() is done.
+ */
+ pdev = platform_device_alloc(adapter_name, pci_dev_id(pci));
+ if (!pdev) {
+ ret = -ENOMEM;
+ goto err_fwnode_remove;
+ }
+
+ if (i2c->adapter_irq) {
+ struct resource res;
+
+ res = DEFINE_RES_IRQ_NAMED(i2c->adapter_irq, "xe_i2c");
+
+ ret = platform_device_add_resources(pdev, &res, 1);
+ if (ret)
+ goto err_pdev_put;
+ }
+
+ pdev->dev.parent = i2c->drm_dev;
+ pdev->dev.fwnode = fwnode;
+ i2c->adapter_node = fwnode;
+ i2c->pdev = pdev;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto err_pdev_put;
+
+ return 0;
+
+err_pdev_put:
+ platform_device_put(pdev);
+err_fwnode_remove:
+ fwnode_remove_software_node(fwnode);
+
+ return ret;
+}
+
+static void xe_i2c_unregister_adapter(struct xe_i2c *i2c)
+{
+ platform_device_unregister(i2c->pdev);
+ fwnode_remove_software_node(i2c->adapter_node);
+}
+
+/**
+ * xe_i2c_irq_handler: Handler for I2C interrupts
+ * @xe: xe device instance
+ * @master_ctl: interrupt register
+ *
+ * Forward interrupts generated by the I2C host adapter to the I2C host adapter
+ * driver.
+ */
+void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl)
+{
+ if (!xe->i2c || !xe->i2c->adapter_irq)
+ return;
+
+ if (master_ctl & I2C_IRQ)
+ generic_handle_irq_safe(xe->i2c->adapter_irq);
+}
+
+static int xe_i2c_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw_irq_num)
+{
+ irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
+ return 0;
+}
+
+static const struct irq_domain_ops xe_i2c_irq_ops = {
+ .map = xe_i2c_irq_map,
+};
+
+static int xe_i2c_create_irq(struct xe_i2c *i2c)
+{
+ struct irq_domain *domain;
+
+ if (!(i2c->ep.capabilities & XE_I2C_EP_CAP_IRQ))
+ return 0;
+
+ domain = irq_domain_create_linear(dev_fwnode(i2c->drm_dev), 1, &xe_i2c_irq_ops, NULL);
+ if (!domain)
+ return -ENOMEM;
+
+ i2c->adapter_irq = irq_create_mapping(domain, 0);
+ i2c->irqdomain = domain;
+
+ return 0;
+}
+
+static void xe_i2c_remove_irq(struct xe_i2c *i2c)
+{
+ if (!i2c->irqdomain)
+ return;
+
+ irq_dispose_mapping(i2c->adapter_irq);
+ irq_domain_remove(i2c->irqdomain);
+}
+
+static int xe_i2c_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct xe_i2c *i2c = context;
+
+ *val = xe_mmio_read32(i2c->mmio, XE_REG(reg + I2C_MEM_SPACE_OFFSET));
+
+ return 0;
+}
+
+static int xe_i2c_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct xe_i2c *i2c = context;
+
+ xe_mmio_write32(i2c->mmio, XE_REG(reg + I2C_MEM_SPACE_OFFSET), val);
+
+ return 0;
+}
+
+static const struct regmap_config i2c_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_read = xe_i2c_read,
+ .reg_write = xe_i2c_write,
+ .fast_io = true,
+};
+
+void xe_i2c_pm_suspend(struct xe_device *xe)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+
+ if (!xe->i2c || xe->i2c->ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
+ return;
+
+ xe_mmio_rmw32(mmio, I2C_CONFIG_PMCSR, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D3hot);
+ drm_dbg(&xe->drm, "pmcsr: 0x%08x\n", xe_mmio_read32(mmio, I2C_CONFIG_PMCSR));
+}
+
+void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+
+ if (!xe->i2c || xe->i2c->ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
+ return;
+
+ if (d3cold)
+ xe_mmio_rmw32(mmio, I2C_CONFIG_CMD, 0, PCI_COMMAND_MEMORY);
+
+ xe_mmio_rmw32(mmio, I2C_CONFIG_PMCSR, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D0);
+ drm_dbg(&xe->drm, "pmcsr: 0x%08x\n", xe_mmio_read32(mmio, I2C_CONFIG_PMCSR));
+}
+
+static void xe_i2c_remove(void *data)
+{
+ struct xe_i2c *i2c = data;
+ unsigned int i;
+
+ for (i = 0; i < XE_I2C_MAX_CLIENTS; i++)
+ i2c_unregister_device(i2c->client[i]);
+
+ bus_unregister_notifier(&i2c_bus_type, &i2c->bus_notifier);
+ xe_i2c_unregister_adapter(i2c);
+ xe_i2c_remove_irq(i2c);
+}
+
+/**
+ * xe_i2c_probe: Probe the I2C host adapter and the I2C clients attached to it
+ * @xe: xe device instance
+ *
+ * Register all the I2C devices described in the I2C Endpoint data structure.
+ *
+ * Return: 0 on success, error code on failure
+ */
+int xe_i2c_probe(struct xe_device *xe)
+{
+ struct device *drm_dev = xe->drm.dev;
+ struct xe_i2c_endpoint ep;
+ struct regmap *regmap;
+ struct xe_i2c *i2c;
+ int ret;
+
+ if (xe->info.platform != XE_BATTLEMAGE)
+ return 0;
+
+ xe_i2c_read_endpoint(xe_root_tile_mmio(xe), &ep);
+ if (ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
+ return 0;
+
+ i2c = devm_kzalloc(drm_dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ INIT_WORK(&i2c->work, xe_i2c_client_work);
+ i2c->mmio = xe_root_tile_mmio(xe);
+ i2c->drm_dev = drm_dev;
+ i2c->ep = ep;
+ xe->i2c = i2c;
+
+ /* PCI PM isn't aware of this device, bring it up and match it with SGUnit state. */
+ xe_i2c_pm_resume(xe, true);
+
+ regmap = devm_regmap_init(drm_dev, NULL, i2c, &i2c_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ i2c->bus_notifier.notifier_call = xe_i2c_notifier;
+ ret = bus_register_notifier(&i2c_bus_type, &i2c->bus_notifier);
+ if (ret)
+ return ret;
+
+ ret = xe_i2c_create_irq(i2c);
+ if (ret)
+ goto err_unregister_notifier;
+
+ ret = xe_i2c_register_adapter(i2c);
+ if (ret)
+ goto err_remove_irq;
+
+ return devm_add_action_or_reset(drm_dev, xe_i2c_remove, i2c);
+
+err_remove_irq:
+ xe_i2c_remove_irq(i2c);
+
+err_unregister_notifier:
+ bus_unregister_notifier(&i2c_bus_type, &i2c->bus_notifier);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_i2c.h b/drivers/gpu/drm/xe/xe_i2c.h
new file mode 100644
index 000000000000..b767ed8ce52b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_i2c.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef _XE_I2C_H_
+#define _XE_I2C_H_
+
+#include <linux/bits.h>
+#include <linux/notifier.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct device;
+struct fwnode_handle;
+struct i2c_adapter;
+struct i2c_client;
+struct irq_domain;
+struct platform_device;
+struct xe_device;
+struct xe_mmio;
+
+#define XE_I2C_MAX_CLIENTS 3
+
+#define XE_I2C_EP_COOKIE_DEVICE 0xde
+
+/* Endpoint Capabilities */
+#define XE_I2C_EP_CAP_IRQ BIT(0)
+
+struct xe_i2c_endpoint {
+ u8 cookie;
+ u8 capabilities;
+ u16 addr[XE_I2C_MAX_CLIENTS];
+};
+
+struct xe_i2c {
+ struct fwnode_handle *adapter_node;
+ struct platform_device *pdev;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client[XE_I2C_MAX_CLIENTS];
+
+ struct notifier_block bus_notifier;
+ struct work_struct work;
+
+ struct irq_domain *irqdomain;
+ int adapter_irq;
+
+ struct xe_i2c_endpoint ep;
+ struct device *drm_dev;
+
+ struct xe_mmio *mmio;
+};
+
+#if IS_ENABLED(CONFIG_I2C)
+int xe_i2c_probe(struct xe_device *xe);
+void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl);
+void xe_i2c_pm_suspend(struct xe_device *xe);
+void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold);
+#else
+static inline int xe_i2c_probe(struct xe_device *xe) { return 0; }
+static inline void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl) { }
+static inline void xe_i2c_pm_suspend(struct xe_device *xe) { }
+static inline void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold) { }
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 5362d3174b06..5df5b8c2a3e4 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -18,10 +18,12 @@
#include "xe_gt.h"
#include "xe_guc.h"
#include "xe_hw_engine.h"
+#include "xe_i2c.h"
#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_pxp.h"
#include "xe_sriov.h"
+#include "xe_tile.h"
/*
* Interrupt registers for a unit are always consecutive and ordered
@@ -160,7 +162,7 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
dmask = irqs << 16 | irqs;
smask = irqs << 16;
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
/* Enable interrupts for each engine class */
xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask);
if (ccs_mask)
@@ -260,7 +262,7 @@ gt_engine_identity(struct xe_device *xe,
static void
gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir)
{
- if (instance == OTHER_GUC_INSTANCE && !xe_gt_is_media_type(gt))
+ if (instance == OTHER_GUC_INSTANCE && xe_gt_is_main_type(gt))
return xe_guc_irq_handler(&gt->uc.guc, iir);
if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
return xe_guc_irq_handler(&gt->uc.guc, iir);
@@ -476,6 +478,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
if (xe->info.has_heci_cscfi)
xe_heci_csc_irq_handler(xe, master_ctl);
xe_display_irq_handler(xe, master_ctl);
+ xe_i2c_irq_handler(xe, master_ctl);
gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
}
}
@@ -550,7 +553,7 @@ static void xelp_irq_reset(struct xe_tile *tile)
static void dg1_irq_reset(struct xe_tile *tile)
{
- if (tile->id == 0)
+ if (xe_tile_is_root(tile))
dg1_intr_disable(tile_to_xe(tile));
gt_irq_reset(tile);
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index 023ed6a6b49d..a2000307d5bf 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -11,6 +11,7 @@
#include "xe_assert.h"
#include "xe_bo.h"
+#include "xe_gt_tlb_invalidation.h"
#include "xe_lmtt.h"
#include "xe_map.h"
#include "xe_mmio.h"
@@ -80,7 +81,7 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
lmtt_assert(lmtt, xe_bo_is_vram(bo));
lmtt_debug(lmtt, "level=%u addr=%#llx\n", level, (u64)xe_bo_main_addr(bo, XE_PAGE_SIZE));
- xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, bo->size);
+ xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, xe_bo_size(bo));
pt->level = level;
pt->bo = bo;
@@ -222,6 +223,58 @@ void xe_lmtt_init_hw(struct xe_lmtt *lmtt)
lmtt_setup_dir_ptr(lmtt);
}
+static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
+{
+ struct xe_gt_tlb_invalidation_fence fences[XE_MAX_GT_PER_TILE];
+ struct xe_gt_tlb_invalidation_fence *fence = fences;
+ struct xe_tile *tile = lmtt_to_tile(lmtt);
+ struct xe_gt *gt;
+ int result = 0;
+ int err;
+ u8 id;
+
+ for_each_gt_on_tile(gt, tile, id) {
+ xe_gt_tlb_invalidation_fence_init(gt, fence, true);
+ err = xe_gt_tlb_invalidation_all(gt, fence);
+ result = result ?: err;
+ fence++;
+ }
+
+ lmtt_debug(lmtt, "num_fences=%d err=%d\n", (int)(fence - fences), result);
+
+ /*
+ * It is fine to wait for all fences, even for those which covers the
+ * invalidation request that failed, as such fence should be already
+ * marked as signaled.
+ */
+ fence = fences;
+ for_each_gt_on_tile(gt, tile, id)
+ xe_gt_tlb_invalidation_fence_wait(fence++);
+
+ return result;
+}
+
+/**
+ * xe_lmtt_invalidate_hw - Invalidate LMTT hardware.
+ * @lmtt: the &xe_lmtt to invalidate
+ *
+ * Send requests to all GuCs on this tile to invalidate all TLBs.
+ *
+ * This function should be called only when running as a PF driver.
+ */
+void xe_lmtt_invalidate_hw(struct xe_lmtt *lmtt)
+{
+ struct xe_device *xe = lmtt_to_xe(lmtt);
+ int err;
+
+ lmtt_assert(lmtt, IS_SRIOV_PF(xe));
+
+ err = lmtt_invalidate_hw(lmtt);
+ if (err)
+ xe_sriov_warn(xe, "LMTT%u invalidation failed (%pe)",
+ lmtt_to_tile(lmtt)->id, ERR_PTR(err));
+}
+
static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt,
u64 pte, unsigned int idx)
{
@@ -276,6 +329,7 @@ static void lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid)
return;
lmtt_write_pte(lmtt, pd, LMTT_PTE_INVALID, vfid);
+ lmtt_invalidate_hw(lmtt);
lmtt_assert(lmtt, pd->level > 0);
lmtt_assert(lmtt, pt->level == pd->level - 1);
@@ -397,11 +451,11 @@ static void lmtt_insert_bo(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo
u64 addr, vram_offset;
lmtt_assert(lmtt, IS_ALIGNED(start, page_size));
- lmtt_assert(lmtt, IS_ALIGNED(bo->size, page_size));
+ lmtt_assert(lmtt, IS_ALIGNED(xe_bo_size(bo), page_size));
lmtt_assert(lmtt, xe_bo_is_vram(bo));
vram_offset = vram_region_gpu_offset(bo->ttm.resource);
- xe_res_first(bo->ttm.resource, 0, bo->size, &cur);
+ xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur);
while (cur.remaining) {
addr = xe_res_dma(&cur);
addr += vram_offset; /* XXX */
diff --git a/drivers/gpu/drm/xe/xe_lmtt.h b/drivers/gpu/drm/xe/xe_lmtt.h
index cb10ef994db6..75a234fbf367 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.h
+++ b/drivers/gpu/drm/xe/xe_lmtt.h
@@ -15,6 +15,7 @@ struct xe_lmtt_ops;
#ifdef CONFIG_PCI_IOV
int xe_lmtt_init(struct xe_lmtt *lmtt);
void xe_lmtt_init_hw(struct xe_lmtt *lmtt);
+void xe_lmtt_invalidate_hw(struct xe_lmtt *lmtt);
int xe_lmtt_prepare_pages(struct xe_lmtt *lmtt, unsigned int vfid, u64 range);
int xe_lmtt_populate_pages(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo *bo, u64 offset);
void xe_lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid);
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 6e7b70532d11..6d38411bdeba 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -39,15 +39,46 @@
#define LRC_ENGINE_INSTANCE GENMASK_ULL(53, 48)
#define LRC_PPHWSP_SIZE SZ_4K
+#define LRC_INDIRECT_CTX_BO_SIZE SZ_4K
#define LRC_INDIRECT_RING_STATE_SIZE SZ_4K
#define LRC_WA_BB_SIZE SZ_4K
+/*
+ * Layout of the LRC and associated data allocated as
+ * lrc->bo:
+ *
+ * Region Size
+ * +============================+=================================+ <- __xe_lrc_ring_offset()
+ * | Ring | ring_size, see |
+ * | | xe_lrc_init() |
+ * +============================+=================================+ <- __xe_lrc_pphwsp_offset()
+ * | PPHWSP (includes SW state) | 4K |
+ * +----------------------------+---------------------------------+ <- __xe_lrc_regs_offset()
+ * | Engine Context Image | n * 4K, see |
+ * | | xe_gt_lrc_size() |
+ * +----------------------------+---------------------------------+ <- __xe_lrc_indirect_ring_offset()
+ * | Indirect Ring State Page | 0 or 4k, see |
+ * | | XE_LRC_FLAG_INDIRECT_RING_STATE |
+ * +============================+=================================+ <- __xe_lrc_indirect_ctx_offset()
+ * | Indirect Context Page | 0 or 4k, see |
+ * | | XE_LRC_FLAG_INDIRECT_CTX |
+ * +============================+=================================+ <- __xe_lrc_wa_bb_offset()
+ * | WA BB Per Ctx | 4k |
+ * +============================+=================================+ <- xe_bo_size(lrc->bo)
+ */
+
static struct xe_device *
lrc_to_xe(struct xe_lrc *lrc)
{
return gt_to_xe(lrc->fence_ctx.gt);
}
+static bool
+gt_engine_needs_indirect_ctx(struct xe_gt *gt, enum xe_engine_class class)
+{
+ return false;
+}
+
size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -582,8 +613,6 @@ static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
if (xe_gt_has_indirect_ring_state(hwe->gt))
regs[CTX_CONTEXT_CONTROL] |=
_MASKED_BIT_ENABLE(CTX_CTRL_INDIRECT_RING_STATE_ENABLE);
-
- /* TODO: Timestamp */
}
static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
@@ -655,8 +684,8 @@ u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc)
#define LRC_SEQNO_PPHWSP_OFFSET 512
#define LRC_START_SEQNO_PPHWSP_OFFSET (LRC_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_CTX_JOB_TIMESTAMP_OFFSET (LRC_START_SEQNO_PPHWSP_OFFSET + 8)
+#define LRC_ENGINE_ID_PPHWSP_OFFSET 1024
#define LRC_PARALLEL_PPHWSP_OFFSET 2048
-#define LRC_ENGINE_ID_PPHWSP_OFFSET 2096
u32 xe_lrc_regs_offset(struct xe_lrc *lrc)
{
@@ -717,8 +746,23 @@ static u32 __xe_lrc_ctx_timestamp_udw_offset(struct xe_lrc *lrc)
static inline u32 __xe_lrc_indirect_ring_offset(struct xe_lrc *lrc)
{
- /* Indirect ring state page is at the very end of LRC */
- return lrc->size - LRC_INDIRECT_RING_STATE_SIZE;
+ u32 offset = xe_bo_size(lrc->bo) - LRC_WA_BB_SIZE -
+ LRC_INDIRECT_RING_STATE_SIZE;
+
+ if (lrc->flags & XE_LRC_FLAG_INDIRECT_CTX)
+ offset -= LRC_INDIRECT_CTX_BO_SIZE;
+
+ return offset;
+}
+
+static inline u32 __xe_lrc_indirect_ctx_offset(struct xe_lrc *lrc)
+{
+ return xe_bo_size(lrc->bo) - LRC_WA_BB_SIZE - LRC_INDIRECT_CTX_BO_SIZE;
+}
+
+static inline u32 __xe_lrc_wa_bb_offset(struct xe_lrc *lrc)
+{
+ return xe_bo_size(lrc->bo) - LRC_WA_BB_SIZE;
}
#define DECL_MAP_ADDR_HELPERS(elem) \
@@ -913,15 +957,9 @@ static void xe_lrc_finish(struct xe_lrc *lrc)
xe_bo_unpin_map_no_vm(lrc->bo);
}
-static size_t wa_bb_offset(struct xe_lrc *lrc)
-{
- return lrc->bo->size - LRC_WA_BB_SIZE;
-}
-
/*
- * xe_lrc_setup_utilization() - Setup wa bb to assist in calculating active
- * context run ticks.
- * @lrc: Pointer to the lrc.
+ * wa_bb_setup_utilization() - Write commands to wa bb to assist
+ * in calculating active context run ticks.
*
* Context Timestamp (CTX_TIMESTAMP) in the LRC accumulates the run ticks of the
* context, but only gets updated when the context switches out. In order to
@@ -946,19 +984,15 @@ static size_t wa_bb_offset(struct xe_lrc *lrc)
* store it in the PPHSWP.
*/
#define CONTEXT_ACTIVE 1ULL
-static int xe_lrc_setup_utilization(struct xe_lrc *lrc)
+static ssize_t setup_utilization_wa(struct xe_lrc *lrc,
+ struct xe_hw_engine *hwe,
+ u32 *batch,
+ size_t max_len)
{
- const size_t max_size = LRC_WA_BB_SIZE;
- u32 *cmd, *buf = NULL;
+ u32 *cmd = batch;
- if (lrc->bo->vmap.is_iomem) {
- buf = kmalloc(max_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- cmd = buf;
- } else {
- cmd = lrc->bo->vmap.vaddr + wa_bb_offset(lrc);
- }
+ if (xe_gt_WARN_ON(lrc->gt, max_len < 12))
+ return -ENOSPC;
*cmd++ = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
*cmd++ = ENGINE_ID(0).addr;
@@ -977,42 +1011,190 @@ static int xe_lrc_setup_utilization(struct xe_lrc *lrc)
*cmd++ = upper_32_bits(CONTEXT_ACTIVE);
}
- *cmd++ = MI_BATCH_BUFFER_END;
+ return cmd - batch;
+}
+
+struct bo_setup {
+ ssize_t (*setup)(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_size);
+};
+
+struct bo_setup_state {
+ /* Input: */
+ struct xe_lrc *lrc;
+ struct xe_hw_engine *hwe;
+ size_t max_size;
+ size_t reserve_dw;
+ unsigned int offset;
+ const struct bo_setup *funcs;
+ unsigned int num_funcs;
+
+ /* State: */
+ u32 *buffer;
+ u32 *ptr;
+ unsigned int written;
+};
+
+static int setup_bo(struct bo_setup_state *state)
+{
+ ssize_t remain;
+
+ if (state->lrc->bo->vmap.is_iomem) {
+ state->buffer = kmalloc(state->max_size, GFP_KERNEL);
+ if (!state->buffer)
+ return -ENOMEM;
+ state->ptr = state->buffer;
+ } else {
+ state->ptr = state->lrc->bo->vmap.vaddr + state->offset;
+ state->buffer = NULL;
+ }
+
+ remain = state->max_size / sizeof(u32);
- if (buf) {
- xe_map_memcpy_to(gt_to_xe(lrc->gt), &lrc->bo->vmap,
- wa_bb_offset(lrc), buf,
- (cmd - buf) * sizeof(*cmd));
- kfree(buf);
+ for (size_t i = 0; i < state->num_funcs; i++) {
+ ssize_t len = state->funcs[i].setup(state->lrc, state->hwe,
+ state->ptr, remain);
+
+ remain -= len;
+
+ /*
+ * Caller has asked for at least reserve_dw to remain unused.
+ */
+ if (len < 0 ||
+ xe_gt_WARN_ON(state->lrc->gt, remain < state->reserve_dw))
+ goto fail;
+
+ state->ptr += len;
+ state->written += len;
}
- xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR, xe_bo_ggtt_addr(lrc->bo) +
- wa_bb_offset(lrc) + 1);
+ return 0;
+
+fail:
+ kfree(state->buffer);
+ return -ENOSPC;
+}
+
+static void finish_bo(struct bo_setup_state *state)
+{
+ if (!state->buffer)
+ return;
+
+ xe_map_memcpy_to(gt_to_xe(state->lrc->gt), &state->lrc->bo->vmap,
+ state->offset, state->buffer,
+ state->written * sizeof(u32));
+ kfree(state->buffer);
+}
+
+static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
+{
+ static const struct bo_setup funcs[] = {
+ { .setup = setup_utilization_wa },
+ };
+ struct bo_setup_state state = {
+ .lrc = lrc,
+ .hwe = hwe,
+ .max_size = LRC_WA_BB_SIZE,
+ .reserve_dw = 1,
+ .offset = __xe_lrc_wa_bb_offset(lrc),
+ .funcs = funcs,
+ .num_funcs = ARRAY_SIZE(funcs),
+ };
+ int ret;
+
+ ret = setup_bo(&state);
+ if (ret)
+ return ret;
+
+ *state.ptr++ = MI_BATCH_BUFFER_END;
+ state.written++;
+
+ finish_bo(&state);
+
+ xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR,
+ xe_bo_ggtt_addr(lrc->bo) + state.offset + 1);
return 0;
}
-#define PVC_CTX_ASID (0x2e + 1)
-#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
+static int
+setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
+{
+ static struct bo_setup rcs_funcs[] = {
+ };
+ struct bo_setup_state state = {
+ .lrc = lrc,
+ .hwe = hwe,
+ .max_size = (63 * 64) /* max 63 cachelines */,
+ .offset = __xe_lrc_indirect_ctx_offset(lrc),
+ };
+ int ret;
+
+ if (!(lrc->flags & XE_LRC_FLAG_INDIRECT_CTX))
+ return 0;
+
+ if (hwe->class == XE_ENGINE_CLASS_RENDER ||
+ hwe->class == XE_ENGINE_CLASS_COMPUTE) {
+ state.funcs = rcs_funcs;
+ state.num_funcs = ARRAY_SIZE(rcs_funcs);
+ }
+
+ if (xe_gt_WARN_ON(lrc->gt, !state.funcs))
+ return 0;
+
+ ret = setup_bo(&state);
+ if (ret)
+ return ret;
+
+ /*
+ * Align to 64B cacheline so there's no garbage at the end for CS to
+ * execute: size for indirect ctx must be a multiple of 64.
+ */
+ while (state.written & 0xf) {
+ *state.ptr++ = MI_NOOP;
+ state.written++;
+ }
+
+ finish_bo(&state);
+
+ xe_lrc_write_ctx_reg(lrc,
+ CTX_CS_INDIRECT_CTX,
+ (xe_bo_ggtt_addr(lrc->bo) + state.offset) |
+ /* Size in CLs. */
+ (state.written * sizeof(u32) / 64));
+ xe_lrc_write_ctx_reg(lrc,
+ CTX_CS_INDIRECT_CTX_OFFSET,
+ CTX_INDIRECT_CTX_OFFSET_DEFAULT);
+
+ return 0;
+}
static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
struct xe_vm *vm, u32 ring_size, u16 msix_vec,
u32 init_flags)
{
struct xe_gt *gt = hwe->gt;
+ const u32 lrc_size = xe_gt_lrc_size(gt, hwe->class);
+ u32 bo_size = ring_size + lrc_size + LRC_WA_BB_SIZE;
struct xe_tile *tile = gt_to_tile(gt);
struct xe_device *xe = gt_to_xe(gt);
struct iosys_map map;
- void *init_data = NULL;
u32 arb_enable;
- u32 lrc_size;
u32 bo_flags;
int err;
kref_init(&lrc->refcount);
lrc->gt = gt;
+ lrc->size = lrc_size;
lrc->flags = 0;
- lrc_size = ring_size + xe_gt_lrc_size(gt, hwe->class);
+ lrc->ring.size = ring_size;
+ lrc->ring.tail = 0;
+
+ if (gt_engine_needs_indirect_ctx(gt, hwe->class)) {
+ lrc->flags |= XE_LRC_FLAG_INDIRECT_CTX;
+ bo_size += LRC_INDIRECT_CTX_BO_SIZE;
+ }
+
if (xe_gt_has_indirect_ring_state(gt))
lrc->flags |= XE_LRC_FLAG_INDIRECT_RING_STATE;
@@ -1021,45 +1203,36 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
if (vm && vm->xef) /* userspace */
bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
- /*
- * FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
- * via VM bind calls.
- */
- lrc->bo = xe_bo_create_pin_map(xe, tile, NULL,
- lrc_size + LRC_WA_BB_SIZE,
+ lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, bo_size,
ttm_bo_type_kernel,
bo_flags);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
- lrc->size = lrc_size;
- lrc->ring.size = ring_size;
- lrc->ring.tail = 0;
-
xe_hw_fence_ctx_init(&lrc->fence_ctx, hwe->gt,
hwe->fence_irq, hwe->name);
- if (!gt->default_lrc[hwe->class]) {
- init_data = empty_lrc_data(hwe);
- if (!init_data) {
- err = -ENOMEM;
- goto err_lrc_finish;
- }
- }
-
/*
* Init Per-Process of HW status Page, LRC / context state to known
- * values
+ * values. If there's already a primed default_lrc, just copy it, otherwise
+ * it's the early submission to record the lrc: build a new empty one from
+ * scratch.
*/
map = __xe_lrc_pphwsp_map(lrc);
- if (!init_data) {
+ if (gt->default_lrc[hwe->class]) {
xe_map_memset(xe, &map, 0, 0, LRC_PPHWSP_SIZE); /* PPHWSP */
xe_map_memcpy_to(xe, &map, LRC_PPHWSP_SIZE,
gt->default_lrc[hwe->class] + LRC_PPHWSP_SIZE,
- xe_gt_lrc_size(gt, hwe->class) - LRC_PPHWSP_SIZE);
+ lrc_size - LRC_PPHWSP_SIZE);
} else {
- xe_map_memcpy_to(xe, &map, 0, init_data,
- xe_gt_lrc_size(gt, hwe->class));
+ void *init_data = empty_lrc_data(hwe);
+
+ if (!init_data) {
+ err = -ENOMEM;
+ goto err_lrc_finish;
+ }
+
+ xe_map_memcpy_to(xe, &map, 0, init_data, lrc_size);
kfree(init_data);
}
@@ -1113,7 +1286,7 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP_UDW, 0);
if (xe->info.has_asid && vm)
- xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
+ xe_lrc_write_ctx_reg(lrc, CTX_ASID, vm->usm.asid);
lrc->desc = LRC_VALID;
lrc->desc |= FIELD_PREP(LRC_ADDRESSING_MODE, LRC_LEGACY_64B_CONTEXT);
@@ -1139,7 +1312,11 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
map = __xe_lrc_start_seqno_map(lrc);
xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1);
- err = xe_lrc_setup_utilization(lrc);
+ err = setup_wa_bb(lrc, hwe);
+ if (err)
+ goto err_lrc_finish;
+
+ err = setup_indirect_ctx(lrc, hwe);
if (err)
goto err_lrc_finish;
@@ -1735,7 +1912,7 @@ static const struct instr_state xe_hpg_svg_state[] = {
{ .instr = CMD_3DSTATE_DRAWING_RECTANGLE, .num_dw = 4 },
};
-void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *bb)
+u32 *xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, u32 *cs)
{
struct xe_gt *gt = q->hwe->gt;
struct xe_device *xe = gt_to_xe(gt);
@@ -1770,7 +1947,7 @@ void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *b
if (!state_table) {
xe_gt_dbg(gt, "No non-register state to emit on graphics ver %d.%02d\n",
GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100);
- return;
+ return cs;
}
for (int i = 0; i < state_table_size; i++) {
@@ -1793,12 +1970,14 @@ void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *b
instr == CMD_3DSTATE_DRAWING_RECTANGLE)
instr = CMD_3DSTATE_DRAWING_RECTANGLE_FAST;
- bb->cs[bb->len] = instr;
+ *cs = instr;
if (!is_single_dw)
- bb->cs[bb->len] |= (num_dw - 2);
+ *cs |= (num_dw - 2);
- bb->len += num_dw;
+ cs += num_dw;
}
+
+ return cs;
}
struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
@@ -1819,8 +1998,7 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
snapshot->seqno = xe_lrc_seqno(lrc);
snapshot->lrc_bo = xe_bo_get(lrc->bo);
snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc);
- snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset -
- LRC_WA_BB_SIZE;
+ snapshot->lrc_size = lrc->size;
snapshot->lrc_snapshot = NULL;
snapshot->ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(lrc));
snapshot->ctx_job_timestamp = xe_lrc_ctx_job_timestamp(lrc);
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index eb6e8de8c939..b6c8053c581b 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -112,7 +112,7 @@ void xe_lrc_dump_default(struct drm_printer *p,
struct xe_gt *gt,
enum xe_engine_class);
-void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *bb);
+u32 *xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, u32 *cs);
struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc);
void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot);
diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h
index 883e550a9423..e9883706e004 100644
--- a/drivers/gpu/drm/xe/xe_lrc_types.h
+++ b/drivers/gpu/drm/xe/xe_lrc_types.h
@@ -22,14 +22,15 @@ struct xe_lrc {
*/
struct xe_bo *bo;
- /** @size: size of lrc including any indirect ring state page */
+ /** @size: size of the lrc and optional indirect ring state */
u32 size;
/** @gt: gt which this LRC belongs to */
struct xe_gt *gt;
/** @flags: LRC flags */
-#define XE_LRC_FLAG_INDIRECT_RING_STATE 0x1
+#define XE_LRC_FLAG_INDIRECT_CTX 0x1
+#define XE_LRC_FLAG_INDIRECT_RING_STATE 0x2
u32 flags;
/** @refcount: ref count of this lrc */
diff --git a/drivers/gpu/drm/xe/xe_map.h b/drivers/gpu/drm/xe/xe_map.h
index f62e0c8b67ab..8d67f6ba2d95 100644
--- a/drivers/gpu/drm/xe/xe_map.h
+++ b/drivers/gpu/drm/xe/xe_map.h
@@ -78,6 +78,24 @@ static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map,
iosys_map_wr(map__, offset__, type__, val__); \
})
+#define xe_map_rd_array(xe__, map__, index__, type__) \
+ xe_map_rd(xe__, map__, (index__) * sizeof(type__), type__)
+
+#define xe_map_wr_array(xe__, map__, index__, type__, val__) \
+ xe_map_wr(xe__, map__, (index__) * sizeof(type__), type__, val__)
+
+#define xe_map_rd_array_u32(xe__, map__, index__) \
+ xe_map_rd_array(xe__, map__, index__, u32)
+
+#define xe_map_wr_array_u32(xe__, map__, index__, val__) \
+ xe_map_wr_array(xe__, map__, index__, u32, val__)
+
+#define xe_map_rd_ring_u32(xe__, map__, index__, size__) \
+ xe_map_rd_array_u32(xe__, map__, (index__) % (size__))
+
+#define xe_map_wr_ring_u32(xe__, map__, index__, size__, val__) \
+ xe_map_wr_array_u32(xe__, map__, (index__) % (size__), val__)
+
#define xe_map_rd_field(xe__, map__, struct_offset__, struct_type__, field__) ({ \
struct xe_device *__xe = xe__; \
xe_device_assert_mem_access(__xe); \
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 07a5161c7d5b..ba1cff2e4cda 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -203,7 +203,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
/* Need to be sure everything fits in the first PT, or create more */
- xe_tile_assert(tile, m->batch_base_ofs + batch->size < SZ_2M);
+ xe_tile_assert(tile, m->batch_base_ofs + xe_bo_size(batch) < SZ_2M);
bo = xe_bo_create_pin_map(vm->xe, tile, vm,
num_entries * XE_PAGE_SIZE,
@@ -214,7 +214,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
return PTR_ERR(bo);
/* PT30 & PT31 reserved for 2M identity map */
- pt29_ofs = bo->size - 3 * XE_PAGE_SIZE;
+ pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE;
entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index);
xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
@@ -236,7 +236,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (!IS_DGFX(xe)) {
/* Write out batch too */
m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
- for (i = 0; i < batch->size;
+ for (i = 0; i < xe_bo_size(batch);
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
entry = vm->pt_ops->pte_encode_bo(batch, i,
@@ -247,13 +247,13 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
level++;
}
if (xe->info.has_usm) {
- xe_tile_assert(tile, batch->size == SZ_1M);
+ xe_tile_assert(tile, xe_bo_size(batch) == SZ_1M);
batch = tile->primary_gt->usm.bb_pool->bo;
m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
- xe_tile_assert(tile, batch->size == SZ_512K);
+ xe_tile_assert(tile, xe_bo_size(batch) == SZ_512K);
- for (i = 0; i < batch->size;
+ for (i = 0; i < xe_bo_size(batch);
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
entry = vm->pt_ops->pte_encode_bo(batch, i,
@@ -306,7 +306,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Identity map the entire vram at 256GiB offset */
if (IS_DGFX(xe)) {
- u64 pt30_ofs = bo->size - 2 * XE_PAGE_SIZE;
+ u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE;
xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
pat_index, pt30_ofs);
@@ -321,7 +321,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
u64 vram_offset = IDENTITY_OFFSET +
DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
- u64 pt31_ofs = bo->size - XE_PAGE_SIZE;
+ u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE;
xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE -
IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G);
@@ -768,7 +768,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
struct dma_fence *fence = NULL;
- u64 size = src_bo->size;
+ u64 size = xe_bo_size(src_bo);
struct xe_res_cursor src_it, dst_it, ccs_it;
u64 src_L0_ofs, dst_L0_ofs;
u32 src_L0_pt, dst_L0_pt;
@@ -791,7 +791,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
return ERR_PTR(-EINVAL);
- if (src_bo != dst_bo && XE_WARN_ON(src_bo->size != dst_bo->size))
+ if (src_bo != dst_bo && XE_WARN_ON(xe_bo_size(src_bo) != xe_bo_size(dst_bo)))
return ERR_PTR(-EINVAL);
if (!src_is_vram)
@@ -1064,7 +1064,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct xe_device *xe = gt_to_xe(gt);
bool clear_only_system_ccs = false;
struct dma_fence *fence = NULL;
- u64 size = bo->size;
+ u64 size = xe_bo_size(bo);
struct xe_res_cursor src_it;
struct ttm_resource *src = dst;
int err;
@@ -1076,9 +1076,9 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
clear_only_system_ccs = true;
if (!clear_vram)
- xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
+ xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &src_it);
else
- xe_res_first(src, 0, bo->size, &src_it);
+ xe_res_first(src, 0, xe_bo_size(bo), &src_it);
while (size) {
u64 clear_L0_ofs;
@@ -1407,7 +1407,7 @@ __xe_migrate_update_pgtables(struct xe_migrate *m,
if (idx == chunk)
goto next_cmd;
- xe_tile_assert(tile, pt_bo->size == SZ_4K);
+ xe_tile_assert(tile, xe_bo_size(pt_bo) == SZ_4K);
/* Map a PT at most once */
if (pt_bo->update_index < 0)
@@ -1868,7 +1868,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
if (IS_ERR(dma_addr))
return PTR_ERR(dma_addr);
- xe_res_first(bo->ttm.resource, offset, bo->size - offset, &cursor);
+ xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor);
do {
struct dma_fence *__fence;
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 7357458bc0d2..e4db8d58ea2d 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -22,6 +22,9 @@
#include "xe_macros.h"
#include "xe_sriov.h"
#include "xe_trace.h"
+#include "xe_wa.h"
+
+#include "generated/xe_device_wa_oob.h"
static void tiles_fini(void *arg)
{
@@ -55,6 +58,7 @@ static void tiles_fini(void *arg)
static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
{
struct xe_tile *tile;
+ struct xe_gt *gt;
u8 id;
/*
@@ -67,7 +71,7 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
/* Possibly override number of tile based on configuration register */
if (!xe->info.skip_mtcfg) {
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- u8 tile_count;
+ u8 tile_count, gt_count;
u32 mtcfg;
/*
@@ -84,12 +88,15 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
xe->info.tile_count = tile_count;
/*
- * FIXME: Needs some work for standalone media, but
- * should be impossible with multi-tile for now:
- * multi-tile platform with standalone media doesn't
- * exist
+ * We've already setup gt_count according to the full
+ * tile count. Re-calculate it to only include the GTs
+ * that belong to the remaining tile(s).
*/
- xe->info.gt_count = xe->info.tile_count;
+ gt_count = 0;
+ for_each_gt(gt, xe, id)
+ if (gt->info.id < tile_count * xe->info.max_gt_per_tile)
+ gt_count++;
+ xe->info.gt_count = gt_count;
}
}
@@ -163,7 +170,7 @@ static void mmio_flush_pending_writes(struct xe_mmio *mmio)
#define DUMMY_REG_OFFSET 0x130030
int i;
- if (mmio->tile->xe->info.platform != XE_LUNARLAKE)
+ if (!XE_DEVICE_WA(mmio->tile->xe, 15015404425))
return;
/* 4 dummy writes */
@@ -176,7 +183,6 @@ u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u8 val;
- /* Wa_15015404425 */
mmio_flush_pending_writes(mmio);
val = readb(mmio->regs + addr);
@@ -190,7 +196,6 @@ u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u16 val;
- /* Wa_15015404425 */
mmio_flush_pending_writes(mmio);
val = readw(mmio->regs + addr);
@@ -217,7 +222,6 @@ u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u32 val;
- /* Wa_15015404425 */
mmio_flush_pending_writes(mmio);
if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index da6793c2f991..107ffe87808c 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -18,29 +18,45 @@
#include "xe_observation.h"
#include "xe_sched_job.h"
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+#define DEFAULT_GUC_LOG_LEVEL 3
+#else
+#define DEFAULT_GUC_LOG_LEVEL 1
+#endif
+
+#define DEFAULT_PROBE_DISPLAY true
+#define DEFAULT_VRAM_BAR_SIZE 0
+#define DEFAULT_FORCE_PROBE CONFIG_DRM_XE_FORCE_PROBE
+#define DEFAULT_WEDGED_MODE 1
+#define DEFAULT_SVM_NOTIFIER_SIZE 512
+
struct xe_modparam xe_modparam = {
- .probe_display = true,
- .guc_log_level = IS_ENABLED(CONFIG_DRM_XE_DEBUG) ? 3 : 1,
- .force_probe = CONFIG_DRM_XE_FORCE_PROBE,
- .wedged_mode = 1,
- .svm_notifier_size = 512,
+ .probe_display = DEFAULT_PROBE_DISPLAY,
+ .guc_log_level = DEFAULT_GUC_LOG_LEVEL,
+ .force_probe = DEFAULT_FORCE_PROBE,
+ .wedged_mode = DEFAULT_WEDGED_MODE,
+ .svm_notifier_size = DEFAULT_SVM_NOTIFIER_SIZE,
/* the rest are 0 by default */
};
module_param_named(svm_notifier_size, xe_modparam.svm_notifier_size, uint, 0600);
-MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size(in MiB), must be power of 2");
+MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size in MiB, must be power of 2 "
+ "[default=" __stringify(DEFAULT_SVM_NOTIFIER_SIZE) "]");
module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444);
MODULE_PARM_DESC(force_execlist, "Force Execlist submission");
module_param_named(probe_display, xe_modparam.probe_display, bool, 0444);
-MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched (default: true)");
+MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched "
+ "[default=" __stringify(DEFAULT_PROBE_DISPLAY) "])");
module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, int, 0600);
-MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size (in MiB) - <0=disable-resize, 0=max-needed-size[default], >0=force-size");
+MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size in MiB (<0=disable-resize, 0=max-needed-size, >0=force-size "
+ "[default=" __stringify(DEFAULT_VRAM_BAR_SIZE) "])");
module_param_named(guc_log_level, xe_modparam.guc_log_level, int, 0600);
-MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1..5=enable with verbosity min..max)");
+MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1=normal, 2..5=verbose-levels "
+ "[default=" __stringify(DEFAULT_GUC_LOG_LEVEL) "])");
module_param_named_unsafe(guc_firmware_path, xe_modparam.guc_firmware_path, charp, 0400);
MODULE_PARM_DESC(guc_firmware_path,
@@ -56,7 +72,8 @@ MODULE_PARM_DESC(gsc_firmware_path,
module_param_named_unsafe(force_probe, xe_modparam.force_probe, charp, 0400);
MODULE_PARM_DESC(force_probe,
- "Force probe options for specified devices. See CONFIG_DRM_XE_FORCE_PROBE for details.");
+ "Force probe options for specified devices. See CONFIG_DRM_XE_FORCE_PROBE for details "
+ "[default=" DEFAULT_FORCE_PROBE "])");
#ifdef CONFIG_PCI_IOV
module_param_named(max_vfs, xe_modparam.max_vfs, uint, 0400);
@@ -67,7 +84,8 @@ MODULE_PARM_DESC(max_vfs,
module_param_named_unsafe(wedged_mode, xe_modparam.wedged_mode, int, 0600);
MODULE_PARM_DESC(wedged_mode,
- "Module's default policy for the wedged mode - 0=never, 1=upon-critical-errors[default], 2=upon-any-hang");
+ "Module's default policy for the wedged mode (0=never, 1=upon-critical-errors, 2=upon-any-hang "
+ "[default=" __stringify(DEFAULT_WEDGED_MODE) "])");
static int xe_check_nomodeset(void)
{
diff --git a/drivers/gpu/drm/xe/xe_nvm.c b/drivers/gpu/drm/xe/xe_nvm.c
new file mode 100644
index 000000000000..61b0a1531a53
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_nvm.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
+ */
+
+#include <linux/intel_dg_nvm_aux.h>
+#include <linux/pci.h>
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_mmio.h"
+#include "xe_nvm.h"
+#include "regs/xe_gsc_regs.h"
+#include "xe_sriov.h"
+
+#define GEN12_GUNIT_NVM_BASE 0x00102040
+#define GEN12_DEBUG_NVM_BASE 0x00101018
+
+#define GEN12_CNTL_PROTECTED_NVM_REG 0x0010100C
+
+#define GEN12_GUNIT_NVM_SIZE 0x80
+#define GEN12_DEBUG_NVM_SIZE 0x4
+
+#define NVM_NON_POSTED_ERASE_CHICKEN_BIT BIT(13)
+
+#define HECI_FW_STATUS_2_NVM_ACCESS_MODE BIT(3)
+
+static const struct intel_dg_nvm_region regions[INTEL_DG_NVM_REGIONS] = {
+ [0] = { .name = "DESCRIPTOR", },
+ [2] = { .name = "GSC", },
+ [9] = { .name = "PADDING", },
+ [11] = { .name = "OptionROM", },
+ [12] = { .name = "DAM", },
+};
+
+static void xe_nvm_release_dev(struct device *dev)
+{
+}
+
+static bool xe_nvm_non_posted_erase(struct xe_device *xe)
+{
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+
+ if (xe->info.platform != XE_BATTLEMAGE)
+ return false;
+ return !(xe_mmio_read32(&gt->mmio, XE_REG(GEN12_CNTL_PROTECTED_NVM_REG)) &
+ NVM_NON_POSTED_ERASE_CHICKEN_BIT);
+}
+
+static bool xe_nvm_writable_override(struct xe_device *xe)
+{
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ bool writable_override;
+ resource_size_t base;
+
+ switch (xe->info.platform) {
+ case XE_BATTLEMAGE:
+ base = DG2_GSC_HECI2_BASE;
+ break;
+ case XE_PVC:
+ base = PVC_GSC_HECI2_BASE;
+ break;
+ case XE_DG2:
+ base = DG2_GSC_HECI2_BASE;
+ break;
+ case XE_DG1:
+ base = DG1_GSC_HECI2_BASE;
+ break;
+ default:
+ drm_err(&xe->drm, "Unknown platform\n");
+ return true;
+ }
+
+ writable_override =
+ !(xe_mmio_read32(&gt->mmio, HECI_FWSTS2(base)) &
+ HECI_FW_STATUS_2_NVM_ACCESS_MODE);
+ if (writable_override)
+ drm_info(&xe->drm, "NVM access overridden by jumper\n");
+ return writable_override;
+}
+
+int xe_nvm_init(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct auxiliary_device *aux_dev;
+ struct intel_dg_nvm_dev *nvm;
+ int ret;
+
+ if (!xe->info.has_gsc_nvm)
+ return 0;
+
+ /* No access to internal NVM from VFs */
+ if (IS_SRIOV_VF(xe))
+ return 0;
+
+ /* Nvm pointer should be NULL here */
+ if (WARN_ON(xe->nvm))
+ return -EFAULT;
+
+ xe->nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
+ if (!xe->nvm)
+ return -ENOMEM;
+
+ nvm = xe->nvm;
+
+ nvm->writable_override = xe_nvm_writable_override(xe);
+ nvm->non_posted_erase = xe_nvm_non_posted_erase(xe);
+ nvm->bar.parent = &pdev->resource[0];
+ nvm->bar.start = GEN12_GUNIT_NVM_BASE + pdev->resource[0].start;
+ nvm->bar.end = nvm->bar.start + GEN12_GUNIT_NVM_SIZE - 1;
+ nvm->bar.flags = IORESOURCE_MEM;
+ nvm->bar.desc = IORES_DESC_NONE;
+ nvm->regions = regions;
+
+ nvm->bar2.parent = &pdev->resource[0];
+ nvm->bar2.start = GEN12_DEBUG_NVM_BASE + pdev->resource[0].start;
+ nvm->bar2.end = nvm->bar2.start + GEN12_DEBUG_NVM_SIZE - 1;
+ nvm->bar2.flags = IORESOURCE_MEM;
+ nvm->bar2.desc = IORES_DESC_NONE;
+
+ aux_dev = &nvm->aux_dev;
+
+ aux_dev->name = "nvm";
+ aux_dev->id = (pci_domain_nr(pdev->bus) << 16) | pci_dev_id(pdev);
+ aux_dev->dev.parent = &pdev->dev;
+ aux_dev->dev.release = xe_nvm_release_dev;
+
+ ret = auxiliary_device_init(aux_dev);
+ if (ret) {
+ drm_err(&xe->drm, "xe-nvm aux init failed %d\n", ret);
+ goto err;
+ }
+
+ ret = auxiliary_device_add(aux_dev);
+ if (ret) {
+ drm_err(&xe->drm, "xe-nvm aux add failed %d\n", ret);
+ auxiliary_device_uninit(aux_dev);
+ goto err;
+ }
+ return 0;
+
+err:
+ kfree(nvm);
+ xe->nvm = NULL;
+ return ret;
+}
+
+void xe_nvm_fini(struct xe_device *xe)
+{
+ struct intel_dg_nvm_dev *nvm = xe->nvm;
+
+ if (!xe->info.has_gsc_nvm)
+ return;
+
+ /* No access to internal NVM from VFs */
+ if (IS_SRIOV_VF(xe))
+ return;
+
+ /* Nvm pointer should not be NULL here */
+ if (WARN_ON(!nvm))
+ return;
+
+ auxiliary_device_delete(&nvm->aux_dev);
+ auxiliary_device_uninit(&nvm->aux_dev);
+ kfree(nvm);
+ xe->nvm = NULL;
+}
diff --git a/drivers/gpu/drm/xe/xe_nvm.h b/drivers/gpu/drm/xe/xe_nvm.h
new file mode 100644
index 000000000000..7f3d5f57bed0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_nvm.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2019-2025 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __XE_NVM_H__
+#define __XE_NVM_H__
+
+struct xe_device;
+
+int xe_nvm_init(struct xe_device *xe);
+
+void xe_nvm_fini(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index fb842fa0552e..d991fbd90f20 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -43,6 +43,12 @@
#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
#define XE_OA_UNIT_INVALID U32_MAX
+enum xe_oam_unit_type {
+ XE_OAM_UNIT_SAG,
+ XE_OAM_UNIT_SCMI_0,
+ XE_OAM_UNIT_SCMI_1,
+};
+
enum xe_oa_submit_deps {
XE_OA_SUBMIT_NO_DEPS,
XE_OA_SUBMIT_ADD_DEPS,
@@ -77,7 +83,7 @@ struct xe_oa_config {
struct xe_oa_open_param {
struct xe_file *xef;
- u32 oa_unit_id;
+ struct xe_oa_unit *oa_unit;
bool sample;
u32 metric_set;
enum xe_oa_format_name oa_format;
@@ -194,7 +200,7 @@ static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo, struct dma_fence *l
static const struct xe_oa_regs *__oa_regs(struct xe_oa_stream *stream)
{
- return &stream->hwe->oa_unit->regs;
+ return &stream->oa_unit->regs;
}
static u32 xe_oa_hw_tail_read(struct xe_oa_stream *stream)
@@ -397,7 +403,7 @@ static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf,
static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream)
{
u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
- int size_exponent = __ffs(stream->oa_buffer.bo->size);
+ int size_exponent = __ffs(xe_bo_size(stream->oa_buffer.bo));
u32 oa_buf = gtt_offset | OAG_OABUFFER_MEMORY_SELECT;
struct xe_mmio *mmio = &stream->gt->mmio;
unsigned long flags;
@@ -429,7 +435,7 @@ static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream)
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/* Zero out the OA buffer since we rely on zero report id and timestamp fields */
- memset(stream->oa_buffer.vaddr, 0, stream->oa_buffer.bo->size);
+ memset(stream->oa_buffer.vaddr, 0, xe_bo_size(stream->oa_buffer.bo));
}
static u32 __format_to_oactrl(const struct xe_oa_format *format, int counter_sel_mask)
@@ -454,7 +460,7 @@ static u32 __oa_ccs_select(struct xe_oa_stream *stream)
static u32 __oactrl_used_bits(struct xe_oa_stream *stream)
{
- return stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
+ return stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
OAG_OACONTROL_USED_BITS : OAM_OACONTROL_USED_BITS;
}
@@ -475,7 +481,7 @@ static void xe_oa_enable(struct xe_oa_stream *stream)
__oa_ccs_select(stream) | OAG_OACONTROL_OA_COUNTER_ENABLE;
if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
- stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
val |= OAG_OACONTROL_OA_PES_DISAG_EN;
xe_mmio_rmw32(&stream->gt->mmio, regs->oa_ctrl, __oactrl_used_bits(stream), val);
@@ -838,11 +844,16 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
/* Reset PMON Enable to save power. */
xe_mmio_rmw32(mmio, XELPMP_SQCNT1, sqcnt1, 0);
+
+ if ((stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM ||
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM_SAG) &&
+ GRAPHICS_VER(stream->oa->xe) >= 30)
+ xe_mmio_rmw32(mmio, OAM_COMPRESSION_T3_CONTROL, OAM_LAT_MEASURE_ENABLE, 0);
}
static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
{
- struct xe_oa_unit *u = stream->hwe->oa_unit;
+ struct xe_oa_unit *u = stream->oa_unit;
struct xe_gt *gt = stream->hwe->gt;
if (WARN_ON(stream != u->exclusive_stream))
@@ -1054,7 +1065,7 @@ static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream)
static u32 oag_buf_size_select(const struct xe_oa_stream *stream)
{
return _MASKED_FIELD(OAG_OA_DEBUG_BUF_SIZE_SELECT,
- stream->oa_buffer.bo->size > SZ_16M ?
+ xe_bo_size(stream->oa_buffer.bo) > SZ_16M ?
OAG_OA_DEBUG_BUF_SIZE_SELECT : 0);
}
@@ -1105,9 +1116,13 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
*/
sqcnt1 = SQCNT1_PMON_ENABLE |
(HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0);
-
xe_mmio_rmw32(mmio, XELPMP_SQCNT1, 0, sqcnt1);
+ if ((stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM ||
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM_SAG) &&
+ GRAPHICS_VER(stream->oa->xe) >= 30)
+ xe_mmio_rmw32(mmio, OAM_COMPRESSION_T3_CONTROL, 0, OAM_LAT_MEASURE_ENABLE);
+
/* Configure OAR/OAC */
if (stream->exec_q) {
ret = xe_oa_configure_oa_context(stream, true);
@@ -1139,14 +1154,31 @@ static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *n
return -EINVAL;
}
+static struct xe_oa_unit *xe_oa_lookup_oa_unit(struct xe_oa *oa, u32 oa_unit_id)
+{
+ struct xe_gt *gt;
+ int gt_id, i;
+
+ for_each_gt(gt, oa->xe, gt_id) {
+ for (i = 0; i < gt->oa.num_oa_units; i++) {
+ struct xe_oa_unit *u = &gt->oa.oa_unit[i];
+
+ if (u->oa_unit_id == oa_unit_id)
+ return u;
+ }
+ }
+
+ return NULL;
+}
+
static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value,
struct xe_oa_open_param *param)
{
- if (value >= oa->oa_unit_ids) {
+ param->oa_unit = xe_oa_lookup_oa_unit(oa, value);
+ if (!param->oa_unit) {
drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value);
return -EINVAL;
}
- param->oa_unit_id = value;
return 0;
}
@@ -1550,7 +1582,7 @@ static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg)
static long xe_oa_info_locked(struct xe_oa_stream *stream, unsigned long arg)
{
- struct drm_xe_oa_stream_info info = { .oa_buf_size = stream->oa_buffer.bo->size, };
+ struct drm_xe_oa_stream_info info = { .oa_buf_size = xe_bo_size(stream->oa_buffer.bo), };
void __user *uaddr = (void __user *)arg;
if (copy_to_user(uaddr, &info, sizeof(info)))
@@ -1636,7 +1668,7 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
}
/* Can mmap the entire OA buffer or nothing (no partial OA buffer mmaps) */
- if (vma->vm_end - vma->vm_start != stream->oa_buffer.bo->size) {
+ if (vma->vm_end - vma->vm_start != xe_bo_size(stream->oa_buffer.bo)) {
drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n");
return -EINVAL;
}
@@ -1677,13 +1709,13 @@ static const struct file_operations xe_oa_fops = {
static int xe_oa_stream_init(struct xe_oa_stream *stream,
struct xe_oa_open_param *param)
{
- struct xe_oa_unit *u = param->hwe->oa_unit;
struct xe_gt *gt = param->hwe->gt;
unsigned int fw_ref;
int ret;
stream->exec_q = param->exec_q;
stream->poll_period_ns = DEFAULT_POLL_PERIOD_NS;
+ stream->oa_unit = param->oa_unit;
stream->hwe = param->hwe;
stream->gt = stream->hwe->gt;
stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
@@ -1704,7 +1736,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
* buffer whose size, circ_size, is a multiple of the report size
*/
if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
- stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample)
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample)
stream->oa_buffer.circ_size =
param->oa_buffer_size -
param->oa_buffer_size % stream->oa_buffer.format->size;
@@ -1762,7 +1794,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
drm_dbg(&stream->oa->xe->drm, "opening stream oa config uuid=%s\n",
stream->oa_config->uuid);
- WRITE_ONCE(u->exclusive_stream, stream);
+ WRITE_ONCE(stream->oa_unit->exclusive_stream, stream);
hrtimer_setup(&stream->poll_check_timer, xe_oa_poll_check_timer_cb, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
@@ -1798,7 +1830,7 @@ static int xe_oa_stream_open_ioctl_locked(struct xe_oa *oa,
int ret;
/* We currently only allow exclusive access */
- if (param->hwe->oa_unit->exclusive_stream) {
+ if (param->oa_unit->exclusive_stream) {
drm_dbg(&oa->xe->drm, "OA unit already in use\n");
ret = -EBUSY;
goto exit;
@@ -1874,13 +1906,14 @@ static u64 oa_exponent_to_ns(struct xe_gt *gt, int exponent)
return div_u64(nom + den - 1, den);
}
-static bool engine_supports_oa_format(const struct xe_hw_engine *hwe, int type)
+static bool oa_unit_supports_oa_format(struct xe_oa_open_param *param, int type)
{
- switch (hwe->oa_unit->type) {
+ switch (param->oa_unit->type) {
case DRM_XE_OA_UNIT_TYPE_OAG:
return type == DRM_XE_OA_FMT_TYPE_OAG || type == DRM_XE_OA_FMT_TYPE_OAR ||
type == DRM_XE_OA_FMT_TYPE_OAC || type == DRM_XE_OA_FMT_TYPE_PEC;
case DRM_XE_OA_UNIT_TYPE_OAM:
+ case DRM_XE_OA_UNIT_TYPE_OAM_SAG:
return type == DRM_XE_OA_FMT_TYPE_OAM || type == DRM_XE_OA_FMT_TYPE_OAM_MPEC;
default:
return false;
@@ -1899,37 +1932,48 @@ u16 xe_oa_unit_id(struct xe_hw_engine *hwe)
hwe->oa_unit->oa_unit_id : U16_MAX;
}
+/* A hwe must be assigned to stream/oa_unit for batch submissions */
static int xe_oa_assign_hwe(struct xe_oa *oa, struct xe_oa_open_param *param)
{
- struct xe_gt *gt;
- int i, ret = 0;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ int ret = 0;
+
+ /* If not provided, OA unit defaults to OA unit 0 as per uapi */
+ if (!param->oa_unit)
+ param->oa_unit = &xe_device_get_gt(oa->xe, 0)->oa.oa_unit[0];
+ /* When we have an exec_q, get hwe from the exec_q */
if (param->exec_q) {
- /* When we have an exec_q, get hwe from the exec_q */
param->hwe = xe_gt_hw_engine(param->exec_q->gt, param->exec_q->class,
param->engine_instance, true);
- } else {
- struct xe_hw_engine *hwe;
- enum xe_hw_engine_id id;
-
- /* Else just get the first hwe attached to the oa unit */
- for_each_gt(gt, oa->xe, i) {
- for_each_hw_engine(hwe, gt, id) {
- if (xe_oa_unit_id(hwe) == param->oa_unit_id) {
- param->hwe = hwe;
- goto out;
- }
- }
- }
+ if (!param->hwe || param->hwe->oa_unit != param->oa_unit)
+ goto err;
+ goto out;
}
-out:
- if (!param->hwe || xe_oa_unit_id(param->hwe) != param->oa_unit_id) {
- drm_dbg(&oa->xe->drm, "Unable to find hwe (%d, %d) for OA unit ID %d\n",
- param->exec_q ? param->exec_q->class : -1,
- param->engine_instance, param->oa_unit_id);
- ret = -EINVAL;
+
+ /* Else just get the first hwe attached to the oa unit */
+ for_each_hw_engine(hwe, param->oa_unit->gt, id) {
+ if (hwe->oa_unit == param->oa_unit) {
+ param->hwe = hwe;
+ goto out;
+ }
}
+ /* If we still didn't find a hwe, just get one with a valid oa_unit from the same gt */
+ for_each_hw_engine(hwe, param->oa_unit->gt, id) {
+ if (!hwe->oa_unit)
+ continue;
+
+ param->hwe = hwe;
+ goto out;
+ }
+err:
+ drm_dbg(&oa->xe->drm, "Unable to find hwe (%d, %d) for OA unit ID %d\n",
+ param->exec_q ? param->exec_q->class : -1,
+ param->engine_instance, param->oa_unit->oa_unit_id);
+ ret = -EINVAL;
+out:
return ret;
}
@@ -2007,7 +2051,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
f = &oa->oa_formats[param.oa_format];
if (!param.oa_format || !f->size ||
- !engine_supports_oa_format(param.hwe, f->type)) {
+ !oa_unit_supports_oa_format(&param, f->type)) {
drm_dbg(&oa->xe->drm, "Invalid OA format %d type %d size %d for class %d\n",
param.oa_format, f->type, f->size, param.hwe->class);
ret = -EINVAL;
@@ -2155,6 +2199,7 @@ static const struct xe_mmio_range gen12_oa_mux_regs[] = {
static const struct xe_mmio_range xe2_oa_mux_regs[] = {
{ .start = 0x5194, .end = 0x5194 }, /* SYS_MEM_LAT_MEASURE_MERTF_GRP_3D */
{ .start = 0x8704, .end = 0x8704 }, /* LMEM_LAT_MEASURE_MCFG_GRP */
+ { .start = 0xB01C, .end = 0xB01C }, /* LNCF_MISC_CONFIG_REGISTER0 */
{ .start = 0xB1BC, .end = 0xB1BC }, /* L3_BANK_LAT_MEASURE_LBCF_GFX */
{ .start = 0xD0E0, .end = 0xD0F4 }, /* VISACTL */
{ .start = 0xE18C, .end = 0xE18C }, /* SAMPLER_MODE */
@@ -2448,20 +2493,38 @@ int xe_oa_register(struct xe_device *xe)
static u32 num_oa_units_per_gt(struct xe_gt *gt)
{
- return 1;
+ if (xe_gt_is_main_type(gt) || GRAPHICS_VER(gt_to_xe(gt)) < 20)
+ return 1;
+ else if (!IS_DGFX(gt_to_xe(gt)))
+ return XE_OAM_UNIT_SCMI_0 + 1; /* SAG + SCMI_0 */
+ else
+ return XE_OAM_UNIT_SCMI_1 + 1; /* SAG + SCMI_0 + SCMI_1 */
}
static u32 __hwe_oam_unit(struct xe_hw_engine *hwe)
{
- if (GRAPHICS_VERx100(gt_to_xe(hwe->gt)) >= 1270) {
- /*
- * There's 1 SAMEDIA gt and 1 OAM per SAMEDIA gt. All media slices
- * within the gt use the same OAM. All MTL/LNL SKUs list 1 SA MEDIA
- */
- xe_gt_WARN_ON(hwe->gt, hwe->gt->info.type != XE_GT_TYPE_MEDIA);
+ if (GRAPHICS_VERx100(gt_to_xe(hwe->gt)) < 1270)
+ return XE_OA_UNIT_INVALID;
+
+ xe_gt_WARN_ON(hwe->gt, xe_gt_is_main_type(hwe->gt));
+ if (GRAPHICS_VER(gt_to_xe(hwe->gt)) < 20)
return 0;
- }
+ /*
+ * XE_OAM_UNIT_SAG has only GSCCS attached to it, but only on some platforms. Also
+ * GSCCS cannot be used to submit batches to program the OAM unit. Therefore we don't
+ * assign an OA unit to GSCCS. This means that XE_OAM_UNIT_SAG is exposed as an OA
+ * unit without attached engines. Fused off engines can also result in oa_unit's with
+ * num_engines == 0. OA streams can be opened on all OA units.
+ */
+ else if (hwe->engine_id == XE_HW_ENGINE_GSCCS0)
+ return XE_OA_UNIT_INVALID;
+ else if (!IS_DGFX(gt_to_xe(hwe->gt)))
+ return XE_OAM_UNIT_SCMI_0;
+ else if (hwe->class == XE_ENGINE_CLASS_VIDEO_DECODE)
+ return (hwe->instance / 2 & 0x1) + 1;
+ else if (hwe->class == XE_ENGINE_CLASS_VIDEO_ENHANCE)
+ return (hwe->instance & 0x1) + 1;
return XE_OA_UNIT_INVALID;
}
@@ -2475,6 +2538,7 @@ static u32 __hwe_oa_unit(struct xe_hw_engine *hwe)
case XE_ENGINE_CLASS_VIDEO_DECODE:
case XE_ENGINE_CLASS_VIDEO_ENHANCE:
+ case XE_ENGINE_CLASS_OTHER:
return __hwe_oam_unit(hwe);
default:
@@ -2514,20 +2578,29 @@ static struct xe_oa_regs __oag_regs(void)
static void __xe_oa_init_oa_units(struct xe_gt *gt)
{
- const u32 mtl_oa_base[] = { 0x13000 };
+ /* Actual address is MEDIA_GT_GSI_OFFSET + oam_base_addr[i] */
+ const u32 oam_base_addr[] = {
+ [XE_OAM_UNIT_SAG] = 0x13000,
+ [XE_OAM_UNIT_SCMI_0] = 0x14000,
+ [XE_OAM_UNIT_SCMI_1] = 0x14800,
+ };
int i, num_units = gt->oa.num_oa_units;
for (i = 0; i < num_units; i++) {
struct xe_oa_unit *u = &gt->oa.oa_unit[i];
- if (gt->info.type != XE_GT_TYPE_MEDIA) {
+ if (xe_gt_is_main_type(gt)) {
u->regs = __oag_regs();
u->type = DRM_XE_OA_UNIT_TYPE_OAG;
- } else if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
- u->regs = __oam_regs(mtl_oa_base[i]);
- u->type = DRM_XE_OA_UNIT_TYPE_OAM;
+ } else {
+ xe_gt_assert(gt, GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270);
+ u->regs = __oam_regs(oam_base_addr[i]);
+ u->type = i == XE_OAM_UNIT_SAG && GRAPHICS_VER(gt_to_xe(gt)) >= 20 ?
+ DRM_XE_OA_UNIT_TYPE_OAM_SAG : DRM_XE_OA_UNIT_TYPE_OAM;
}
+ u->gt = gt;
+
xe_mmio_write32(&gt->mmio, u->regs.oa_ctrl, 0);
/* Ensure MMIO trigger remains disabled till there is a stream */
@@ -2560,10 +2633,6 @@ static int xe_oa_init_gt(struct xe_gt *gt)
}
}
- /*
- * Fused off engines can result in oa_unit's with num_engines == 0. These units
- * will appear in OA unit query, but no OA streams can be opened on them.
- */
gt->oa.num_oa_units = num_oa_units;
gt->oa.oa_unit = u;
@@ -2574,17 +2643,54 @@ static int xe_oa_init_gt(struct xe_gt *gt)
return 0;
}
+static void xe_oa_print_gt_oa_units(struct xe_gt *gt)
+{
+ enum xe_hw_engine_id hwe_id;
+ struct xe_hw_engine *hwe;
+ struct xe_oa_unit *u;
+ char buf[256];
+ int i, n;
+
+ for (i = 0; i < gt->oa.num_oa_units; i++) {
+ u = &gt->oa.oa_unit[i];
+ buf[0] = '\0';
+ n = 0;
+
+ for_each_hw_engine(hwe, gt, hwe_id)
+ if (xe_oa_unit_id(hwe) == u->oa_unit_id)
+ n += scnprintf(buf + n, sizeof(buf) - n, "%s ", hwe->name);
+
+ xe_gt_dbg(gt, "oa_unit %d, type %d, Engines: %s\n", u->oa_unit_id, u->type, buf);
+ }
+}
+
+static void xe_oa_print_oa_units(struct xe_oa *oa)
+{
+ struct xe_gt *gt;
+ int gt_id;
+
+ for_each_gt(gt, oa->xe, gt_id)
+ xe_oa_print_gt_oa_units(gt);
+}
+
static int xe_oa_init_oa_units(struct xe_oa *oa)
{
struct xe_gt *gt;
int i, ret;
+ /* Needed for OAM implementation here */
+ BUILD_BUG_ON(XE_OAM_UNIT_SAG != 0);
+ BUILD_BUG_ON(XE_OAM_UNIT_SCMI_0 != 1);
+ BUILD_BUG_ON(XE_OAM_UNIT_SCMI_1 != 2);
+
for_each_gt(gt, oa->xe, i) {
ret = xe_oa_init_gt(gt);
if (ret)
return ret;
}
+ xe_oa_print_oa_units(oa);
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
index 52e33c37d5ee..2628f78c4e8d 100644
--- a/drivers/gpu/drm/xe/xe_oa_types.h
+++ b/drivers/gpu/drm/xe/xe_oa_types.h
@@ -95,6 +95,9 @@ struct xe_oa_unit {
/** @oa_unit_id: identifier for the OA unit */
u16 oa_unit_id;
+ /** @gt: gt associated with the OA unit */
+ struct xe_gt *gt;
+
/** @type: Type of OA unit - OAM, OAG etc. */
enum drm_xe_oa_unit_type type;
@@ -182,6 +185,9 @@ struct xe_oa_stream {
/** @gt: gt associated with the oa stream */
struct xe_gt *gt;
+ /** @oa_unit: oa unit for this stream */
+ struct xe_oa_unit *oa_unit;
+
/** @hwe: hardware engine associated with this oa stream */
struct xe_hw_engine *hwe;
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index 30fdbdb9341e..2e7cb99ae87a 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -103,7 +103,8 @@ static const struct xe_pat_table_entry xelpg_pat_table[] = {
*
* Note: There is an implicit assumption in the driver that compression and
* coh_1way+ are mutually exclusive. If this is ever not true then userptr
- * and imported dma-buf from external device will have uncleared ccs state.
+ * and imported dma-buf from external device will have uncleared ccs state. See
+ * also xe_bo_needs_ccs_pages().
*/
#define XE2_PAT(no_promote, comp_en, l3clos, l3_policy, l4_policy, __coh_mode) \
{ \
@@ -162,21 +163,35 @@ u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index)
static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
int n_entries)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
for (int i = 0; i < n_entries; i++) {
struct xe_reg reg = XE_REG(_PAT_INDEX(i));
xe_mmio_write32(&gt->mmio, reg, table[i].value);
}
+
+ if (xe->pat.pat_ats)
+ xe_mmio_write32(&gt->mmio, XE_REG(_PAT_ATS), xe->pat.pat_ats->value);
+ if (xe->pat.pat_pta)
+ xe_mmio_write32(&gt->mmio, XE_REG(_PAT_PTA), xe->pat.pat_pta->value);
}
static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry table[],
int n_entries)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
for (int i = 0; i < n_entries; i++) {
struct xe_reg_mcr reg_mcr = XE_REG_MCR(_PAT_INDEX(i));
xe_gt_mcr_multicast_write(gt, reg_mcr, table[i].value);
}
+
+ if (xe->pat.pat_ats)
+ xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe->pat.pat_ats->value);
+ if (xe->pat.pat_pta)
+ xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe->pat.pat_pta->value);
}
static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
@@ -303,26 +318,6 @@ static const struct xe_pat_ops xelpg_pat_ops = {
.dump = xelpg_dump,
};
-static void xe2lpg_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
- int n_entries)
-{
- program_pat_mcr(gt, table, n_entries);
- xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe2_pat_ats.value);
-
- if (IS_DGFX(gt_to_xe(gt)))
- xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe2_pat_pta.value);
-}
-
-static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
- int n_entries)
-{
- program_pat(gt, table, n_entries);
- xe_mmio_write32(&gt->mmio, XE_REG(_PAT_ATS), xe2_pat_ats.value);
-
- if (IS_DGFX(gt_to_xe(gt)))
- xe_mmio_write32(&gt->mmio, XE_REG(_PAT_PTA), xe2_pat_pta.value);
-}
-
static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -375,8 +370,8 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
}
static const struct xe_pat_ops xe2_pat_ops = {
- .program_graphics = xe2lpg_program_pat,
- .program_media = xe2lpm_program_pat,
+ .program_graphics = program_pat_mcr,
+ .program_media = program_pat,
.dump = xe2_dump,
};
@@ -385,6 +380,9 @@ void xe_pat_init_early(struct xe_device *xe)
if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) {
xe->pat.ops = &xe2_pat_ops;
xe->pat.table = xe2_pat_table;
+ xe->pat.pat_ats = &xe2_pat_ats;
+ if (IS_DGFX(xe))
+ xe->pat.pat_pta = &xe2_pat_pta;
/* Wa_16023588340. XXX: Should use XE_WA */
if (GRAPHICS_VERx100(xe) == 2001)
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 278af53c74dc..3c40ef426f0c 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -38,43 +38,6 @@ enum toggle_d3cold {
D3COLD_ENABLE,
};
-struct xe_subplatform_desc {
- enum xe_subplatform subplatform;
- const char *name;
- const u16 *pciidlist;
-};
-
-struct xe_device_desc {
- /* Should only ever be set for platforms without GMD_ID */
- const struct xe_ip *pre_gmdid_graphics_ip;
- /* Should only ever be set for platforms without GMD_ID */
- const struct xe_ip *pre_gmdid_media_ip;
-
- const char *platform_name;
- const struct xe_subplatform_desc *subplatforms;
-
- enum xe_platform platform;
-
- u8 dma_mask_size;
- u8 max_remote_tiles:2;
-
- u8 require_force_probe:1;
- u8 is_dgfx:1;
-
- u8 has_display:1;
- u8 has_fan_control:1;
- u8 has_heci_gscfi:1;
- u8 has_heci_cscfi:1;
- u8 has_llc:1;
- u8 has_mbx_power_limits:1;
- u8 has_pxp:1;
- u8 has_sriov:1;
- u8 needs_scratch:1;
- u8 skip_guc_pc:1;
- u8 skip_mtcfg:1;
- u8 skip_pcode:1;
-};
-
__diag_push();
__diag_ignore_all("-Woverride-init", "Allow field overrides in table");
@@ -179,9 +142,11 @@ static const struct xe_ip graphics_ips[] = {
{ 1271, "Xe_LPG", &graphics_xelpg },
{ 1274, "Xe_LPG+", &graphics_xelpg },
{ 2001, "Xe2_HPG", &graphics_xe2 },
+ { 2002, "Xe2_HPG", &graphics_xe2 },
{ 2004, "Xe2_LPG", &graphics_xe2 },
{ 3000, "Xe3_LPG", &graphics_xe2 },
{ 3001, "Xe3_LPG", &graphics_xe2 },
+ { 3003, "Xe3_LPG", &graphics_xe2 },
};
/* Pre-GMDID Media IPs */
@@ -194,6 +159,7 @@ static const struct xe_ip media_ips[] = {
{ 1301, "Xe2_HPM", &media_xelpmp },
{ 2000, "Xe2_LPM", &media_xelpmp },
{ 3000, "Xe3_LPM", &media_xelpmp },
+ { 3002, "Xe3_LPM", &media_xelpmp },
};
static const struct xe_device_desc tgl_desc = {
@@ -203,6 +169,7 @@ static const struct xe_device_desc tgl_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -213,6 +180,7 @@ static const struct xe_device_desc rkl_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -225,6 +193,7 @@ static const struct xe_device_desc adl_s_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
.subplatforms = (const struct xe_subplatform_desc[]) {
{ XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids },
@@ -241,6 +210,7 @@ static const struct xe_device_desc adl_p_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
.subplatforms = (const struct xe_subplatform_desc[]) {
{ XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids },
@@ -255,6 +225,7 @@ static const struct xe_device_desc adl_n_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -268,7 +239,9 @@ static const struct xe_device_desc dg1_desc = {
PLATFORM(DG1),
.dma_mask_size = 39,
.has_display = true,
+ .has_gsc_nvm = 1,
.has_heci_gscfi = 1,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -279,6 +252,7 @@ static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
#define DG2_FEATURES \
DGFX_FEATURES, \
PLATFORM(DG2), \
+ .has_gsc_nvm = 1, \
.has_heci_gscfi = 1, \
.subplatforms = (const struct xe_subplatform_desc[]) { \
{ XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \
@@ -291,6 +265,7 @@ static const struct xe_device_desc ats_m_desc = {
.pre_gmdid_graphics_ip = &graphics_ip_xehpg,
.pre_gmdid_media_ip = &media_ip_xehpm,
.dma_mask_size = 46,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
DG2_FEATURES,
@@ -301,6 +276,7 @@ static const struct xe_device_desc dg2_desc = {
.pre_gmdid_graphics_ip = &graphics_ip_xehpg,
.pre_gmdid_media_ip = &media_ip_xehpm,
.dma_mask_size = 46,
+ .max_gt_per_tile = 1,
.require_force_probe = true,
DG2_FEATURES,
@@ -315,7 +291,9 @@ static const __maybe_unused struct xe_device_desc pvc_desc = {
PLATFORM(PVC),
.dma_mask_size = 52,
.has_display = false,
+ .has_gsc_nvm = 1,
.has_heci_gscfi = 1,
+ .max_gt_per_tile = 1,
.max_remote_tiles = 1,
.require_force_probe = true,
.has_mbx_power_limits = false,
@@ -328,6 +306,7 @@ static const struct xe_device_desc mtl_desc = {
.dma_mask_size = 46,
.has_display = true,
.has_pxp = true,
+ .max_gt_per_tile = 2,
};
static const struct xe_device_desc lnl_desc = {
@@ -335,6 +314,7 @@ static const struct xe_device_desc lnl_desc = {
.dma_mask_size = 46,
.has_display = true,
.has_pxp = true,
+ .max_gt_per_tile = 2,
.needs_scratch = true,
};
@@ -345,7 +325,10 @@ static const struct xe_device_desc bmg_desc = {
.has_display = true,
.has_fan_control = true,
.has_mbx_power_limits = true,
+ .has_gsc_nvm = 1,
.has_heci_cscfi = 1,
+ .has_sriov = true,
+ .max_gt_per_tile = 2,
.needs_scratch = true,
};
@@ -354,7 +337,7 @@ static const struct xe_device_desc ptl_desc = {
.dma_mask_size = 46,
.has_display = true,
.has_sriov = true,
- .require_force_probe = true,
+ .max_gt_per_tile = 2,
.needs_scratch = true,
};
@@ -588,6 +571,7 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.is_dgfx = desc->is_dgfx;
xe->info.has_fan_control = desc->has_fan_control;
xe->info.has_mbx_power_limits = desc->has_mbx_power_limits;
+ xe->info.has_gsc_nvm = desc->has_gsc_nvm;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
xe->info.has_heci_cscfi = desc->has_heci_cscfi;
xe->info.has_llc = desc->has_llc;
@@ -601,6 +585,10 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
xe_modparam.probe_display &&
desc->has_display;
+
+ xe_assert(xe, desc->max_gt_per_tile > 0);
+ xe_assert(xe, desc->max_gt_per_tile <= XE_MAX_GT_PER_TILE);
+ xe->info.max_gt_per_tile = desc->max_gt_per_tile;
xe->info.tile_count = 1 + desc->max_remote_tiles;
err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0);
@@ -700,10 +688,11 @@ static int xe_info_init(struct xe_device *xe,
*/
for_each_tile(tile, xe, id) {
gt = tile->primary_gt;
- gt->info.id = xe->info.gt_count++;
gt->info.type = XE_GT_TYPE_MAIN;
+ gt->info.id = tile->id * xe->info.max_gt_per_tile;
gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state;
gt->info.engine_mask = graphics_desc->hw_engine_mask;
+ xe->info.gt_count++;
if (MEDIA_VER(xe) < 13 && media_desc)
gt->info.engine_mask |= media_desc->hw_engine_mask;
@@ -721,17 +710,10 @@ static int xe_info_init(struct xe_device *xe,
gt = tile->media_gt;
gt->info.type = XE_GT_TYPE_MEDIA;
+ gt->info.id = tile->id * xe->info.max_gt_per_tile + 1;
gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state;
gt->info.engine_mask = media_desc->hw_engine_mask;
-
- /*
- * FIXME: At the moment multi-tile and standalone media are
- * mutually exclusive on current platforms. We'll need to
- * come up with a better way to number GTs if we ever wind
- * up with platforms that support both together.
- */
- drm_WARN_ON(&xe->drm, id != 0);
- gt->info.id = xe->info.gt_count++;
+ xe->info.gt_count++;
}
return 0;
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index ca6b10d35573..4de6f69ed975 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -8,6 +8,47 @@
#include <linux/types.h>
+#include "xe_platform_types.h"
+
+struct xe_subplatform_desc {
+ enum xe_subplatform subplatform;
+ const char *name;
+ const u16 *pciidlist;
+};
+
+struct xe_device_desc {
+ /* Should only ever be set for platforms without GMD_ID */
+ const struct xe_ip *pre_gmdid_graphics_ip;
+ /* Should only ever be set for platforms without GMD_ID */
+ const struct xe_ip *pre_gmdid_media_ip;
+
+ const char *platform_name;
+ const struct xe_subplatform_desc *subplatforms;
+
+ enum xe_platform platform;
+
+ u8 dma_mask_size;
+ u8 max_remote_tiles:2;
+ u8 max_gt_per_tile:2;
+
+ u8 require_force_probe:1;
+ u8 is_dgfx:1;
+
+ u8 has_display:1;
+ u8 has_fan_control:1;
+ u8 has_gsc_nvm:1;
+ u8 has_heci_gscfi:1;
+ u8 has_heci_cscfi:1;
+ u8 has_llc:1;
+ u8 has_mbx_power_limits:1;
+ u8 has_pxp:1;
+ u8 has_sriov:1;
+ u8 needs_scratch:1;
+ u8 skip_guc_pc:1;
+ u8 skip_mtcfg:1;
+ u8 skip_pcode:1;
+};
+
struct xe_graphics_desc {
u8 va_bits;
u8 vm_max_level;
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index 9189117fe825..6a7ddb9005f9 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -336,3 +336,33 @@ int xe_pcode_probe_early(struct xe_device *xe)
return xe_pcode_ready(xe, false);
}
ALLOW_ERROR_INJECTION(xe_pcode_probe_early, ERRNO); /* See xe_pci_probe */
+
+/* Helpers with drm device. These should only be called by the display side */
+#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
+
+int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_read(tile, mbox, val, val1);
+}
+
+int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_write_timeout(tile, mbox, val, timeout_ms);
+}
+
+int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_request(tile, mbox, request, reply_mask, reply, timeout_base_ms);
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h
index de38f44f3201..a5584c1c75f9 100644
--- a/drivers/gpu/drm/xe/xe_pcode.h
+++ b/drivers/gpu/drm/xe/xe_pcode.h
@@ -7,8 +7,10 @@
#define _XE_PCODE_H_
#include <linux/types.h>
-struct xe_tile;
+
+struct drm_device;
struct xe_device;
+struct xe_tile;
void xe_pcode_init(struct xe_tile *tile);
int xe_pcode_probe_early(struct xe_device *xe);
@@ -32,4 +34,12 @@ int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request,
| FIELD_PREP(PCODE_MB_PARAM1, param1)\
| FIELD_PREP(PCODE_MB_PARAM2, param2))
+/* Helpers with drm device */
+int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1);
+int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms);
+#define intel_pcode_write(drm, mbox, val) \
+ intel_pcode_write_timeout((drm), (mbox), (val), 1)
+int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h
index 0befdea77db1..92bfcba51e19 100644
--- a/drivers/gpu/drm/xe/xe_pcode_api.h
+++ b/drivers/gpu/drm/xe/xe_pcode_api.h
@@ -50,6 +50,21 @@
#define READ_PL_FROM_FW 0x1
#define READ_PL_FROM_PCODE 0x0
+#define PCODE_LATE_BINDING 0x5C
+#define GET_CAPABILITY_STATUS 0x0
+#define V1_FAN_SUPPORTED REG_BIT(0)
+#define VR_PARAMS_SUPPORTED REG_BIT(3)
+#define V1_FAN_PROVISIONED REG_BIT(16)
+#define VR_PARAMS_PROVISIONED REG_BIT(19)
+#define GET_VERSION_LOW 0x1
+#define GET_VERSION_HIGH 0x2
+#define MAJOR_VERSION_MASK REG_GENMASK(31, 16)
+#define MINOR_VERSION_MASK REG_GENMASK(15, 0)
+#define HOTFIX_VERSION_MASK REG_GENMASK(31, 16)
+#define BUILD_VERSION_MASK REG_GENMASK(15, 0)
+#define FAN_TABLE 1
+#define VR_CONFIG 2
+
#define PCODE_FREQUENCY_CONFIG 0x6e
/* Frequency Config Sub Commands (param1) */
#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index ad263de44111..e279b47ba03b 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -19,6 +19,7 @@
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_guc.h"
+#include "xe_i2c.h"
#include "xe_irq.h"
#include "xe_pcode.h"
#include "xe_pxp.h"
@@ -146,6 +147,8 @@ int xe_pm_suspend(struct xe_device *xe)
xe_display_pm_suspend_late(xe);
+ xe_i2c_pm_suspend(xe);
+
drm_dbg(&xe->drm, "Device suspended\n");
return 0;
@@ -190,6 +193,8 @@ int xe_pm_resume(struct xe_device *xe)
if (err)
goto err;
+ xe_i2c_pm_resume(xe, xe->d3cold.allowed);
+
xe_irq_resume(xe);
for_each_gt(gt, xe, id)
@@ -487,6 +492,8 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
xe_display_pm_runtime_suspend_late(xe);
+ xe_i2c_pm_suspend(xe);
+
xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return 0;
@@ -534,6 +541,8 @@ int xe_pm_runtime_resume(struct xe_device *xe)
goto out;
}
+ xe_i2c_pm_resume(xe, xe->d3cold.allowed);
+
xe_irq_resume(xe);
for_each_gt(gt, xe, id)
diff --git a/drivers/gpu/drm/xe/xe_pmu.c b/drivers/gpu/drm/xe/xe_pmu.c
index 69df0e3520a5..cab51d826345 100644
--- a/drivers/gpu/drm/xe/xe_pmu.c
+++ b/drivers/gpu/drm/xe/xe_pmu.c
@@ -157,10 +157,13 @@ static bool event_gt_forcewake(struct perf_event *event)
return true;
}
-static bool event_supported(struct xe_pmu *pmu, unsigned int gt,
+static bool event_supported(struct xe_pmu *pmu, unsigned int gt_id,
unsigned int id)
{
- if (gt >= XE_MAX_GT_PER_TILE)
+ struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
+ struct xe_gt *gt = xe_device_get_gt(xe, gt_id);
+
+ if (!gt)
return false;
return id < sizeof(pmu->supported_events) * BITS_PER_BYTE &&
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index b04756a97cdc..c8e63bd23300 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -907,6 +907,11 @@ bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
u8 pt_mask = (vma->tile_present & ~vma->tile_invalidated);
+ if (xe_vma_bo(vma))
+ xe_bo_assert_held(xe_vma_bo(vma));
+ else if (xe_vma_is_userptr(vma))
+ lockdep_assert_held(&xe_vma_vm(vma)->userptr.notifier_lock);
+
if (!(pt_mask & BIT(tile->id)))
return false;
@@ -1458,6 +1463,7 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
struct xe_vm *vm = pt_update->vops->vm;
struct xe_vma_ops *vops = pt_update->vops;
struct xe_vma_op *op;
+ unsigned long i;
int err;
err = xe_pt_pre_commit(pt_update);
@@ -1467,20 +1473,35 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
xe_svm_notifier_lock(vm);
list_for_each_entry(op, &vops->list, link) {
- struct xe_svm_range *range = op->map_range.range;
+ struct xe_svm_range *range = NULL;
if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
continue;
- xe_svm_range_debug(range, "PRE-COMMIT");
+ if (op->base.op == DRM_GPUVA_OP_PREFETCH) {
+ xe_assert(vm->xe,
+ xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va)));
+ xa_for_each(&op->prefetch_range.range, i, range) {
+ xe_svm_range_debug(range, "PRE-COMMIT");
- xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
- xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
+ if (!xe_svm_range_pages_valid(range)) {
+ xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
+ xe_svm_notifier_unlock(vm);
+ return -ENODATA;
+ }
+ }
+ } else {
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
+ xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
+ range = op->map_range.range;
- if (!xe_svm_range_pages_valid(range)) {
- xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
- xe_svm_notifier_unlock(vm);
- return -EAGAIN;
+ xe_svm_range_debug(range, "PRE-COMMIT");
+
+ if (!xe_svm_range_pages_valid(range)) {
+ xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
+ xe_svm_notifier_unlock(vm);
+ return -EAGAIN;
+ }
}
}
@@ -1974,6 +1995,32 @@ static int unbind_op_prepare(struct xe_tile *tile,
return 0;
}
+static bool
+xe_pt_op_check_range_skip_invalidation(struct xe_vm_pgtable_update_op *pt_op,
+ struct xe_svm_range *range)
+{
+ struct xe_vm_pgtable_update *update = pt_op->entries;
+
+ XE_WARN_ON(!pt_op->num_entries);
+
+ /*
+ * We can't skip the invalidation if we are removing PTEs that span more
+ * than the range, do some checks to ensure we are removing PTEs that
+ * are invalid.
+ */
+
+ if (pt_op->num_entries > 1)
+ return false;
+
+ if (update->pt->level == 0)
+ return true;
+
+ if (update->pt->level == 1)
+ return xe_svm_range_size(range) >= SZ_2M;
+
+ return false;
+}
+
static int unbind_range_prepare(struct xe_vm *vm,
struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -2002,7 +2049,10 @@ static int unbind_range_prepare(struct xe_vm *vm,
range->base.itree.last + 1);
++pt_update_ops->current_op;
pt_update_ops->needs_svm_lock = true;
- pt_update_ops->needs_invalidation = true;
+ pt_update_ops->needs_invalidation |= xe_vm_has_scratch(vm) ||
+ xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
+ range->tile_invalidated) ||
+ !xe_pt_op_check_range_skip_invalidation(pt_op, range);
xe_pt_commit_prepare_unbind(XE_INVALID_VMA, pt_op->entries,
pt_op->num_entries);
@@ -2065,11 +2115,20 @@ static int op_prepare(struct xe_vm *vm,
{
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
- if (xe_vma_is_cpu_addr_mirror(vma))
- break;
+ if (xe_vma_is_cpu_addr_mirror(vma)) {
+ struct xe_svm_range *range;
+ unsigned long i;
- err = bind_op_prepare(vm, tile, pt_update_ops, vma, false);
- pt_update_ops->wait_vm_kernel = true;
+ xa_for_each(&op->prefetch_range.range, i, range) {
+ err = bind_range_prepare(vm, tile, pt_update_ops,
+ vma, range);
+ if (err)
+ return err;
+ }
+ } else {
+ err = bind_op_prepare(vm, tile, pt_update_ops, vma, false);
+ pt_update_ops->wait_vm_kernel = true;
+ }
break;
}
case DRM_GPUVA_OP_DRIVER:
@@ -2166,10 +2225,15 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
}
- vma->tile_present |= BIT(tile->id);
- vma->tile_staged &= ~BIT(tile->id);
+ /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+ WRITE_ONCE(vma->tile_present, vma->tile_present | BIT(tile->id));
if (invalidate_on_bind)
- vma->tile_invalidated |= BIT(tile->id);
+ WRITE_ONCE(vma->tile_invalidated,
+ vma->tile_invalidated | BIT(tile->id));
+ else
+ WRITE_ONCE(vma->tile_invalidated,
+ vma->tile_invalidated & ~BIT(tile->id));
+ vma->tile_staged &= ~BIT(tile->id);
if (xe_vma_is_userptr(vma)) {
lockdep_assert_held_read(&vm->userptr.notifier_lock);
to_userptr_vma(vma)->userptr.initial_bind = true;
@@ -2216,6 +2280,18 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
}
}
+static void range_present_and_invalidated_tile(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_id)
+{
+ /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+
+ lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
+
+ WRITE_ONCE(range->tile_present, range->tile_present | BIT(tile_id));
+ WRITE_ONCE(range->tile_invalidated, range->tile_invalidated & ~BIT(tile_id));
+}
+
static void op_commit(struct xe_vm *vm,
struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -2263,27 +2339,28 @@ static void op_commit(struct xe_vm *vm,
{
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
- if (!xe_vma_is_cpu_addr_mirror(vma))
+ if (xe_vma_is_cpu_addr_mirror(vma)) {
+ struct xe_svm_range *range = NULL;
+ unsigned long i;
+
+ xa_for_each(&op->prefetch_range.range, i, range)
+ range_present_and_invalidated_tile(vm, range, tile->id);
+ } else {
bind_op_commit(vm, tile, pt_update_ops, vma, fence,
fence2, false);
+ }
break;
}
case DRM_GPUVA_OP_DRIVER:
{
- /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */
-
- if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
- WRITE_ONCE(op->map_range.range->tile_present,
- op->map_range.range->tile_present |
- BIT(tile->id));
- WRITE_ONCE(op->map_range.range->tile_invalidated,
- op->map_range.range->tile_invalidated &
- ~BIT(tile->id));
- } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
+ /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+ if (op->subop == XE_VMA_SUBOP_MAP_RANGE)
+ range_present_and_invalidated_tile(vm, op->map_range.range, tile->id);
+ else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
WRITE_ONCE(op->unmap_range.range->tile_present,
op->unmap_range.range->tile_present &
~BIT(tile->id));
- }
+
break;
}
default:
@@ -2476,7 +2553,7 @@ free_ifence:
kfree(mfence);
kfree(ifence);
kill_vm_tile1:
- if (err != -EAGAIN && tile->id)
+ if (err != -EAGAIN && err != -ENODATA && tile->id)
xe_vm_kill(vops->vm, false);
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c
index b5bc15f436fa..3d62008c99f1 100644
--- a/drivers/gpu/drm/xe/xe_pxp.c
+++ b/drivers/gpu/drm/xe/xe_pxp.c
@@ -504,69 +504,62 @@ int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 t
return 0;
}
-static void __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+static int __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
{
- spin_lock_irq(&pxp->queues.lock);
- list_add_tail(&q->pxp.link, &pxp->queues.list);
- spin_unlock_irq(&pxp->queues.lock);
+ int ret = 0;
+
+ /*
+ * A queue can be added to the list only if the PXP is in active status,
+ * otherwise the termination might not handle it correctly.
+ */
+ mutex_lock(&pxp->mutex);
+
+ if (pxp->status == XE_PXP_ACTIVE) {
+ spin_lock_irq(&pxp->queues.lock);
+ list_add_tail(&q->pxp.link, &pxp->queues.list);
+ spin_unlock_irq(&pxp->queues.lock);
+ } else if (pxp->status == XE_PXP_ERROR || pxp->status == XE_PXP_SUSPENDED) {
+ ret = -EIO;
+ } else {
+ ret = -EBUSY; /* try again later */
+ }
+
+ mutex_unlock(&pxp->mutex);
+
+ return ret;
}
-/**
- * xe_pxp_exec_queue_add - add a queue to the PXP list
- * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
- * @q: the queue to add to the list
- *
- * If PXP is enabled and the prerequisites are done, start the PXP ARB
- * session (if not already running) and add the queue to the PXP list. Note
- * that the queue must have previously been marked as using PXP with
- * xe_pxp_exec_queue_set_type.
- *
- * Returns 0 if the PXP ARB session is running and the queue is in the list,
- * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done,
- * other errno value if something goes wrong during the session start.
- */
-int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+static int pxp_start(struct xe_pxp *pxp, u8 type)
{
int ret = 0;
+ bool restart = false;
if (!xe_pxp_is_enabled(pxp))
return -ENODEV;
/* we only support HWDRM sessions right now */
- xe_assert(pxp->xe, q->pxp.type == DRM_XE_PXP_TYPE_HWDRM);
-
- /*
- * Runtime suspend kills PXP, so we take a reference to prevent it from
- * happening while we have active queues that use PXP
- */
- xe_pm_runtime_get(pxp->xe);
+ xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
/* get_readiness_status() returns 0 for in-progress and 1 for done */
ret = xe_pxp_get_readiness_status(pxp);
- if (ret <= 0) {
- if (!ret)
- ret = -EBUSY;
- goto out;
- }
+ if (ret <= 0)
+ return ret ?: -EBUSY;
+
ret = 0;
wait_for_idle:
/*
* if there is an action in progress, wait for it. We need to wait
* outside the lock because the completion is done from within the lock.
- * Note that the two action should never be pending at the same time.
+ * Note that the two actions should never be pending at the same time.
*/
if (!wait_for_completion_timeout(&pxp->termination,
- msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS))) {
- ret = -ETIMEDOUT;
- goto out;
- }
+ msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS)))
+ return -ETIMEDOUT;
if (!wait_for_completion_timeout(&pxp->activation,
- msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) {
- ret = -ETIMEDOUT;
- goto out;
- }
+ msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
+ return -ETIMEDOUT;
mutex_lock(&pxp->mutex);
@@ -574,11 +567,9 @@ wait_for_idle:
switch (pxp->status) {
case XE_PXP_ERROR:
ret = -EIO;
- break;
+ goto out_unlock;
case XE_PXP_ACTIVE:
- __exec_queue_add(pxp, q);
- mutex_unlock(&pxp->mutex);
- goto out;
+ goto out_unlock;
case XE_PXP_READY_TO_START:
pxp->status = XE_PXP_START_IN_PROGRESS;
reinit_completion(&pxp->activation);
@@ -586,8 +577,8 @@ wait_for_idle:
case XE_PXP_START_IN_PROGRESS:
/* If a start is in progress then the completion must not be done */
XE_WARN_ON(completion_done(&pxp->activation));
- mutex_unlock(&pxp->mutex);
- goto wait_for_idle;
+ restart = true;
+ goto out_unlock;
case XE_PXP_NEEDS_TERMINATION:
mark_termination_in_progress(pxp);
break;
@@ -595,29 +586,25 @@ wait_for_idle:
case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
/* If a termination is in progress then the completion must not be done */
XE_WARN_ON(completion_done(&pxp->termination));
- mutex_unlock(&pxp->mutex);
- goto wait_for_idle;
+ restart = true;
+ goto out_unlock;
case XE_PXP_SUSPENDED:
default:
drm_err(&pxp->xe->drm, "unexpected state during PXP start: %u\n", pxp->status);
ret = -EIO;
- break;
+ goto out_unlock;
}
mutex_unlock(&pxp->mutex);
- if (ret)
- goto out;
-
if (!completion_done(&pxp->termination)) {
ret = pxp_terminate_hw(pxp);
if (ret) {
drm_err(&pxp->xe->drm, "PXP termination failed before start\n");
mutex_lock(&pxp->mutex);
pxp->status = XE_PXP_ERROR;
- mutex_unlock(&pxp->mutex);
- goto out;
+ goto out_unlock;
}
goto wait_for_idle;
@@ -639,21 +626,59 @@ wait_for_idle:
if (pxp->status != XE_PXP_START_IN_PROGRESS) {
drm_err(&pxp->xe->drm, "unexpected state after PXP start: %u\n", pxp->status);
pxp->status = XE_PXP_NEEDS_TERMINATION;
- mutex_unlock(&pxp->mutex);
- goto wait_for_idle;
+ restart = true;
+ goto out_unlock;
}
/* If everything went ok, update the status and add the queue to the list */
- if (!ret) {
+ if (!ret)
pxp->status = XE_PXP_ACTIVE;
- __exec_queue_add(pxp, q);
- } else {
+ else
pxp->status = XE_PXP_ERROR;
- }
+out_unlock:
mutex_unlock(&pxp->mutex);
-out:
+ if (restart)
+ goto wait_for_idle;
+
+ return ret;
+}
+
+/**
+ * xe_pxp_exec_queue_add - add a queue to the PXP list
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ * @q: the queue to add to the list
+ *
+ * If PXP is enabled and the prerequisites are done, start the PXP default
+ * session (if not already running) and add the queue to the PXP list.
+ *
+ * Returns 0 if the PXP session is running and the queue is in the list,
+ * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done,
+ * other errno value if something goes wrong during the session start.
+ */
+int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+{
+ int ret;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ /*
+ * Runtime suspend kills PXP, so we take a reference to prevent it from
+ * happening while we have active queues that use PXP
+ */
+ xe_pm_runtime_get(pxp->xe);
+
+start:
+ ret = pxp_start(pxp, q->pxp.type);
+
+ if (!ret) {
+ ret = __exec_queue_add(pxp, q);
+ if (ret == -EBUSY)
+ goto start;
+ }
+
/*
* in the successful case the PM ref is released from
* xe_pxp_exec_queue_remove
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 2dbf4066d86f..d517ec9ddcbf 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -141,7 +141,7 @@ query_engine_cycles(struct xe_device *xe,
return -EINVAL;
eci = &resp.eci;
- if (eci->gt_id >= XE_MAX_GT_PER_TILE)
+ if (eci->gt_id >= xe->info.max_gt_per_tile)
return -EINVAL;
gt = xe_device_get_gt(xe, eci->gt_id);
@@ -368,6 +368,7 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
struct drm_xe_query_gt_list __user *query_ptr =
u64_to_user_ptr(query->data);
struct drm_xe_query_gt_list *gt_list;
+ int iter = 0;
u8 id;
if (query->size == 0) {
@@ -385,12 +386,12 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
for_each_gt(gt, xe, id) {
if (xe_gt_is_media_type(gt))
- gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
+ gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
else
- gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MAIN;
- gt_list->gt_list[id].tile_id = gt_to_tile(gt)->id;
- gt_list->gt_list[id].gt_id = gt->info.id;
- gt_list->gt_list[id].reference_clock = gt->info.reference_clock;
+ gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MAIN;
+ gt_list->gt_list[iter].tile_id = gt_to_tile(gt)->id;
+ gt_list->gt_list[iter].gt_id = gt->info.id;
+ gt_list->gt_list[iter].reference_clock = gt->info.reference_clock;
/*
* The mem_regions indexes in the mask below need to
* directly identify the struct
@@ -406,19 +407,21 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
* assumption.
*/
if (!IS_DGFX(xe))
- gt_list->gt_list[id].near_mem_regions = 0x1;
+ gt_list->gt_list[iter].near_mem_regions = 0x1;
else
- gt_list->gt_list[id].near_mem_regions =
+ gt_list->gt_list[iter].near_mem_regions =
BIT(gt_to_tile(gt)->id) << 1;
- gt_list->gt_list[id].far_mem_regions = xe->info.mem_region_mask ^
- gt_list->gt_list[id].near_mem_regions;
+ gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^
+ gt_list->gt_list[iter].near_mem_regions;
- gt_list->gt_list[id].ip_ver_major =
+ gt_list->gt_list[iter].ip_ver_major =
REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid);
- gt_list->gt_list[id].ip_ver_minor =
+ gt_list->gt_list[iter].ip_ver_minor =
REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid);
- gt_list->gt_list[id].ip_ver_rev =
+ gt_list->gt_list[iter].ip_ver_rev =
REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid);
+
+ iter++;
}
if (copy_to_user(query_ptr, gt_list, size)) {
@@ -683,8 +686,8 @@ static int query_oa_units(struct xe_device *xe,
du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt);
du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS |
DRM_XE_OA_CAPS_OA_BUFFER_SIZE |
- DRM_XE_OA_CAPS_WAIT_NUM_REPORTS;
-
+ DRM_XE_OA_CAPS_WAIT_NUM_REPORTS |
+ DRM_XE_OA_CAPS_OAM;
j = 0;
for_each_hw_engine(hwe, gt, hwe_id) {
if (!xe_hw_engine_is_reserved(hwe) &&
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 29e694bb1219..95571b87aa73 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -56,37 +56,61 @@ static bool rule_matches(const struct xe_device *xe,
xe->info.subplatform == r->subplatform;
break;
case XE_RTP_MATCH_GRAPHICS_VERSION:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.graphics_verx100 == r->ver_start &&
(!has_samedia(xe) || !xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_GRAPHICS_VERSION_RANGE:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.graphics_verx100 >= r->ver_start &&
xe->info.graphics_verx100 <= r->ver_end &&
(!has_samedia(xe) || !xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_GRAPHICS_VERSION_ANY_GT:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.graphics_verx100 == r->ver_start;
break;
case XE_RTP_MATCH_GRAPHICS_STEP:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.step.graphics >= r->step_start &&
xe->info.step.graphics < r->step_end &&
(!has_samedia(xe) || !xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_MEDIA_VERSION:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.media_verx100 == r->ver_start &&
(!has_samedia(xe) || xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_MEDIA_VERSION_RANGE:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.media_verx100 >= r->ver_start &&
xe->info.media_verx100 <= r->ver_end &&
(!has_samedia(xe) || xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_MEDIA_STEP:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.step.media >= r->step_start &&
xe->info.step.media < r->step_end &&
(!has_samedia(xe) || xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_MEDIA_VERSION_ANY_GT:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = xe->info.media_verx100 == r->ver_start;
break;
case XE_RTP_MATCH_INTEGRATED:
@@ -108,6 +132,9 @@ static bool rule_matches(const struct xe_device *xe,
match = hwe->class != r->engine_class;
break;
case XE_RTP_MATCH_FUNC:
+ if (drm_WARN_ON(&xe->drm, !gt))
+ return false;
+
match = r->match_func(gt, hwe);
break;
default:
@@ -186,6 +213,11 @@ static void rtp_get_context(struct xe_rtp_process_ctx *ctx,
struct xe_device **xe)
{
switch (ctx->type) {
+ case XE_RTP_PROCESS_TYPE_DEVICE:
+ *hwe = NULL;
+ *gt = NULL;
+ *xe = ctx->xe;
+ break;
case XE_RTP_PROCESS_TYPE_GT:
*hwe = NULL;
*gt = ctx->gt;
@@ -326,21 +358,6 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
hwe->engine_id == __ffs(render_compute_mask);
}
-bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
- const struct xe_hw_engine *hwe)
-{
- unsigned int dss_per_gslice = 4;
- unsigned int dss;
-
- if (drm_WARN(&gt_to_xe(gt)->drm, xe_dss_mask_empty(gt->fuse_topo.g_dss_mask),
- "Checking gslice for platform without geometry pipeline\n"))
- return false;
-
- dss = xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0);
-
- return dss >= dss_per_gslice;
-}
-
bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index 4fe736a11c42..5ed6c14b9ae3 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -422,7 +422,8 @@ struct xe_reg_sr;
#define XE_RTP_PROCESS_CTX_INITIALIZER(arg__) _Generic((arg__), \
struct xe_hw_engine * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_ENGINE }, \
- struct xe_gt * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_GT })
+ struct xe_gt * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_GT }, \
+ struct xe_device * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_DEVICE })
void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx,
unsigned long *active_entries,
@@ -466,17 +467,6 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
/*
- * xe_rtp_match_first_gslice_fused_off - Match when first gslice is fused off
- *
- * @gt: GT structure
- * @hwe: Engine instance
- *
- * Returns: true if first gslice is fused off, false otherwise.
- */
-bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
- const struct xe_hw_engine *hwe);
-
-/*
* xe_rtp_match_not_sriov_vf - Match when not on SR-IOV VF device
*
* @gt: GT structure
diff --git a/drivers/gpu/drm/xe/xe_rtp_types.h b/drivers/gpu/drm/xe/xe_rtp_types.h
index 1b76b947c706..f4cf30e298cf 100644
--- a/drivers/gpu/drm/xe/xe_rtp_types.h
+++ b/drivers/gpu/drm/xe/xe_rtp_types.h
@@ -110,12 +110,14 @@ struct xe_rtp_entry {
};
enum xe_rtp_process_type {
+ XE_RTP_PROCESS_TYPE_DEVICE,
XE_RTP_PROCESS_TYPE_GT,
XE_RTP_PROCESS_TYPE_ENGINE,
};
struct xe_rtp_process_ctx {
union {
+ struct xe_device *xe;
struct xe_gt *gt;
struct xe_hw_engine *hwe;
};
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 1905ca590965..d21bf8f26964 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -113,7 +113,8 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
kref_init(&job->refcount);
xe_exec_queue_get(job->q);
- err = drm_sched_job_init(&job->drm, q->entity, 1, NULL);
+ err = drm_sched_job_init(&job->drm, q->entity, 1, NULL,
+ q->xef ? q->xef->drm->client_id : 0);
if (err)
goto err_free;
@@ -216,15 +217,17 @@ void xe_sched_job_set_error(struct xe_sched_job *job, int error)
bool xe_sched_job_started(struct xe_sched_job *job)
{
+ struct dma_fence *fence = dma_fence_chain_contained(job->fence);
struct xe_lrc *lrc = job->q->lrc[0];
- return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job),
- xe_lrc_start_seqno(lrc),
- dma_fence_chain_contained(job->fence)->ops);
+ return !__dma_fence_is_later(fence,
+ xe_sched_job_lrc_seqno(job),
+ xe_lrc_start_seqno(lrc));
}
bool xe_sched_job_completed(struct xe_sched_job *job)
{
+ struct dma_fence *fence = dma_fence_chain_contained(job->fence);
struct xe_lrc *lrc = job->q->lrc[0];
/*
@@ -232,9 +235,9 @@ bool xe_sched_job_completed(struct xe_sched_job *job)
* parallel handshake is done.
*/
- return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job),
- xe_lrc_seqno(lrc),
- dma_fence_chain_contained(job->fence)->ops);
+ return !__dma_fence_is_later(fence,
+ xe_sched_job_lrc_seqno(job),
+ xe_lrc_seqno(lrc));
}
void xe_sched_job_arm(struct xe_sched_job *job)
diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c
index 86d47aaf0358..1c3c04d52f55 100644
--- a/drivers/gpu/drm/xe/xe_shrinker.c
+++ b/drivers/gpu/drm/xe/xe_shrinker.c
@@ -5,6 +5,7 @@
#include <linux/shrinker.h>
+#include <drm/drm_managed.h>
#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_tt.h>
@@ -65,11 +66,15 @@ static s64 xe_shrinker_walk(struct xe_device *xe,
struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type);
struct ttm_bo_lru_cursor curs;
struct ttm_buffer_object *ttm_bo;
+ struct ttm_lru_walk_arg arg = {
+ .ctx = ctx,
+ .trylock_only = true,
+ };
if (!man || !man->use_tt)
continue;
- ttm_bo_lru_for_each_reserved_guarded(&curs, man, ctx, ttm_bo) {
+ ttm_bo_lru_for_each_reserved_guarded(&curs, man, &arg, ttm_bo) {
if (!ttm_bo_shrink_suitable(ttm_bo, ctx))
continue;
@@ -81,6 +86,8 @@ static s64 xe_shrinker_walk(struct xe_device *xe,
if (*scanned >= to_scan)
break;
}
+ /* Trylocks should never error, just fail. */
+ xe_assert(xe, !IS_ERR(ttm_bo));
}
return freed;
@@ -213,24 +220,34 @@ static void xe_shrinker_pm(struct work_struct *work)
xe_pm_runtime_put(shrinker->xe);
}
+static void xe_shrinker_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_shrinker *shrinker = arg;
+
+ xe_assert(shrinker->xe, !shrinker->shrinkable_pages);
+ xe_assert(shrinker->xe, !shrinker->purgeable_pages);
+ shrinker_free(shrinker->shrink);
+ flush_work(&shrinker->pm_worker);
+ kfree(shrinker);
+}
+
/**
* xe_shrinker_create() - Create an xe per-device shrinker
* @xe: Pointer to the xe device.
*
- * Returns: A pointer to the created shrinker on success,
- * Negative error code on failure.
+ * Return: %0 on success. Negative error code on failure.
*/
-struct xe_shrinker *xe_shrinker_create(struct xe_device *xe)
+int xe_shrinker_create(struct xe_device *xe)
{
struct xe_shrinker *shrinker = kzalloc(sizeof(*shrinker), GFP_KERNEL);
if (!shrinker)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
shrinker->shrink = shrinker_alloc(0, "drm-xe_gem:%s", xe->drm.unique);
if (!shrinker->shrink) {
kfree(shrinker);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
INIT_WORK(&shrinker->pm_worker, xe_shrinker_pm);
@@ -240,19 +257,7 @@ struct xe_shrinker *xe_shrinker_create(struct xe_device *xe)
shrinker->shrink->scan_objects = xe_shrinker_scan;
shrinker->shrink->private_data = shrinker;
shrinker_register(shrinker->shrink);
+ xe->mem.shrinker = shrinker;
- return shrinker;
-}
-
-/**
- * xe_shrinker_destroy() - Destroy an xe per-device shrinker
- * @shrinker: Pointer to the shrinker to destroy.
- */
-void xe_shrinker_destroy(struct xe_shrinker *shrinker)
-{
- xe_assert(shrinker->xe, !shrinker->shrinkable_pages);
- xe_assert(shrinker->xe, !shrinker->purgeable_pages);
- shrinker_free(shrinker->shrink);
- flush_work(&shrinker->pm_worker);
- kfree(shrinker);
+ return drmm_add_action_or_reset(&xe->drm, xe_shrinker_fini, shrinker);
}
diff --git a/drivers/gpu/drm/xe/xe_shrinker.h b/drivers/gpu/drm/xe/xe_shrinker.h
index 28a038f4fcbf..5132ae5192e1 100644
--- a/drivers/gpu/drm/xe/xe_shrinker.h
+++ b/drivers/gpu/drm/xe/xe_shrinker.h
@@ -11,8 +11,6 @@ struct xe_device;
void xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable);
-struct xe_shrinker *xe_shrinker_create(struct xe_device *xe);
-
-void xe_shrinker_destroy(struct xe_shrinker *shrinker);
+int xe_shrinker_create(struct xe_device *xe);
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.c b/drivers/gpu/drm/xe/xe_sriov_pf.c
index 0f721ae17b26..afbdd894bd6e 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.c
@@ -3,6 +3,8 @@
* Copyright © 2023-2024 Intel Corporation
*/
+#include <linux/debugfs.h>
+#include <drm/drm_debugfs.h>
#include <drm/drm_managed.h>
#include "xe_assert.h"
@@ -10,6 +12,8 @@
#include "xe_module.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_service.h"
#include "xe_sriov_printk.h"
static unsigned int wanted_max_vfs(struct xe_device *xe)
@@ -80,9 +84,22 @@ bool xe_sriov_pf_readiness(struct xe_device *xe)
*/
int xe_sriov_pf_init_early(struct xe_device *xe)
{
+ int err;
+
xe_assert(xe, IS_SRIOV_PF(xe));
- return drmm_mutex_init(&xe->drm, &xe->sriov.pf.master_lock);
+ xe->sriov.pf.vfs = drmm_kcalloc(&xe->drm, 1 + xe_sriov_pf_get_totalvfs(xe),
+ sizeof(*xe->sriov.pf.vfs), GFP_KERNEL);
+ if (!xe->sriov.pf.vfs)
+ return -ENOMEM;
+
+ err = drmm_mutex_init(&xe->drm, &xe->sriov.pf.master_lock);
+ if (err)
+ return err;
+
+ xe_sriov_pf_service_init(xe);
+
+ return 0;
}
/**
@@ -102,3 +119,45 @@ void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p)
drm_printf(p, "supported: %u\n", xe->sriov.pf.driver_max_vfs);
drm_printf(p, "enabled: %u\n", pci_num_vf(pdev));
}
+
+static int simple_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_info_node *node = m->private;
+ struct dentry *parent = node->dent->d_parent;
+ struct xe_device *xe = parent->d_inode->i_private;
+ void (*print)(struct xe_device *, struct drm_printer *) = node->info_ent->data;
+
+ print(xe, &p);
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ { .name = "vfs", .show = simple_show, .data = xe_sriov_pf_print_vfs_summary },
+ { .name = "versions", .show = simple_show, .data = xe_sriov_pf_service_print_versions },
+};
+
+/**
+ * xe_sriov_pf_debugfs_register - Register PF debugfs attributes.
+ * @xe: the &xe_device
+ * @root: the root &dentry
+ *
+ * Prepare debugfs attributes exposed by the PF.
+ */
+void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root)
+{
+ struct drm_minor *minor = xe->drm.primary;
+ struct dentry *parent;
+
+ /*
+ * /sys/kernel/debug/dri/0/
+ * ├── pf
+ * │   ├── ...
+ */
+ parent = debugfs_create_dir("pf", root);
+ if (IS_ERR(parent))
+ return;
+ parent->d_inode->i_private = xe;
+
+ drm_debugfs_create_files(debugfs_list, ARRAY_SIZE(debugfs_list), parent, minor);
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.h b/drivers/gpu/drm/xe/xe_sriov_pf.h
index d1220e70e1c0..c392c3fcf085 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.h
@@ -8,12 +8,14 @@
#include <linux/types.h>
+struct dentry;
struct drm_printer;
struct xe_device;
#ifdef CONFIG_PCI_IOV
bool xe_sriov_pf_readiness(struct xe_device *xe);
int xe_sriov_pf_init_early(struct xe_device *xe);
+void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root);
void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p);
#else
static inline bool xe_sriov_pf_readiness(struct xe_device *xe)
@@ -25,6 +27,10 @@ static inline int xe_sriov_pf_init_early(struct xe_device *xe)
{
return 0;
}
+
+static inline void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root)
+{
+}
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_sriov_pf_service.c
new file mode 100644
index 000000000000..eee3b2a1ba41
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_service.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2025 Intel Corporation
+ */
+
+#include "abi/guc_relay_actions_abi.h"
+
+#include "xe_device_types.h"
+#include "xe_sriov.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_printk.h"
+
+#include "xe_sriov_pf_service.h"
+#include "xe_sriov_pf_service_types.h"
+
+/**
+ * xe_sriov_pf_service_init - Early initialization of the SR-IOV PF service.
+ * @xe: the &xe_device to initialize
+ *
+ * Performs early initialization of the SR-IOV PF service.
+ *
+ * This function can only be called on PF.
+ */
+void xe_sriov_pf_service_init(struct xe_device *xe)
+{
+ BUILD_BUG_ON(!GUC_RELAY_VERSION_BASE_MAJOR && !GUC_RELAY_VERSION_BASE_MINOR);
+ BUILD_BUG_ON(GUC_RELAY_VERSION_BASE_MAJOR > GUC_RELAY_VERSION_LATEST_MAJOR);
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ /* base versions may differ between platforms */
+ xe->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR;
+ xe->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR;
+
+ /* latest version is same for all platforms */
+ xe->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR;
+ xe->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR;
+}
+
+/* Return: 0 on success or a negative error code on failure. */
+static int pf_negotiate_version(struct xe_device *xe,
+ u32 wanted_major, u32 wanted_minor,
+ u32 *major, u32 *minor)
+{
+ struct xe_sriov_pf_service_version base = xe->sriov.pf.service.version.base;
+ struct xe_sriov_pf_service_version latest = xe->sriov.pf.service.version.latest;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_assert(xe, base.major);
+ xe_assert(xe, base.major <= latest.major);
+ xe_assert(xe, (base.major < latest.major) || (base.minor <= latest.minor));
+
+ /* VF doesn't care - return our latest */
+ if (wanted_major == VF2PF_HANDSHAKE_MAJOR_ANY &&
+ wanted_minor == VF2PF_HANDSHAKE_MINOR_ANY) {
+ *major = latest.major;
+ *minor = latest.minor;
+ return 0;
+ }
+
+ /* VF wants newer than our - return our latest */
+ if (wanted_major > latest.major) {
+ *major = latest.major;
+ *minor = latest.minor;
+ return 0;
+ }
+
+ /* VF wants older than min required - reject */
+ if (wanted_major < base.major ||
+ (wanted_major == base.major && wanted_minor < base.minor)) {
+ return -EPERM;
+ }
+
+ /* previous major - return wanted, as we should still support it */
+ if (wanted_major < latest.major) {
+ /* XXX: we are not prepared for multi-versions yet */
+ xe_assert(xe, base.major == latest.major);
+ return -ENOPKG;
+ }
+
+ /* same major - return common minor */
+ *major = wanted_major;
+ *minor = min_t(u32, latest.minor, wanted_minor);
+ return 0;
+}
+
+static void pf_connect(struct xe_device *xe, u32 vfid, u32 major, u32 minor)
+{
+ xe_sriov_pf_assert_vfid(xe, vfid);
+ xe_assert(xe, major || minor);
+
+ xe->sriov.pf.vfs[vfid].version.major = major;
+ xe->sriov.pf.vfs[vfid].version.minor = minor;
+}
+
+static void pf_disconnect(struct xe_device *xe, u32 vfid)
+{
+ xe_sriov_pf_assert_vfid(xe, vfid);
+
+ xe->sriov.pf.vfs[vfid].version.major = 0;
+ xe->sriov.pf.vfs[vfid].version.minor = 0;
+}
+
+/**
+ * xe_sriov_pf_service_is_negotiated - Check if VF has negotiated given ABI version.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ * @major: the major version to check
+ * @minor: the minor version to check
+ *
+ * Performs early initialization of the SR-IOV PF service.
+ *
+ * This function can only be called on PF.
+ *
+ * Returns: true if VF can use given ABI version functionality.
+ */
+bool xe_sriov_pf_service_is_negotiated(struct xe_device *xe, u32 vfid, u32 major, u32 minor)
+{
+ xe_sriov_pf_assert_vfid(xe, vfid);
+
+ return major == xe->sriov.pf.vfs[vfid].version.major &&
+ minor <= xe->sriov.pf.vfs[vfid].version.minor;
+}
+
+/**
+ * xe_sriov_pf_service_handshake_vf - Confirm a connection with the VF.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ * @wanted_major: the major service version expected by the VF
+ * @wanted_minor: the minor service version expected by the VF
+ * @major: the major service version to be used by the VF
+ * @minor: the minor service version to be used by the VF
+ *
+ * Negotiate a VF/PF ABI version to allow VF use the PF services.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_service_handshake_vf(struct xe_device *xe, u32 vfid,
+ u32 wanted_major, u32 wanted_minor,
+ u32 *major, u32 *minor)
+{
+ int err;
+
+ xe_sriov_dbg_verbose(xe, "VF%u wants ABI version %u.%u\n",
+ vfid, wanted_major, wanted_minor);
+
+ err = pf_negotiate_version(xe, wanted_major, wanted_minor, major, minor);
+
+ if (err < 0) {
+ xe_sriov_notice(xe, "VF%u failed to negotiate ABI %u.%u (%pe)\n",
+ vfid, wanted_major, wanted_minor, ERR_PTR(err));
+ pf_disconnect(xe, vfid);
+ } else {
+ xe_sriov_dbg(xe, "VF%u negotiated ABI version %u.%u\n",
+ vfid, *major, *minor);
+ pf_connect(xe, vfid, *major, *minor);
+ }
+
+ return err;
+}
+
+/**
+ * xe_sriov_pf_service_reset_vf - Reset a connection with the VF.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * Reset a VF driver negotiated VF/PF ABI version.
+ *
+ * After that point, the VF driver will have to perform new version handshake
+ * to continue use of the PF services again.
+ *
+ * This function can only be called on PF.
+ */
+void xe_sriov_pf_service_reset_vf(struct xe_device *xe, unsigned int vfid)
+{
+ pf_disconnect(xe, vfid);
+}
+
+static void print_pf_version(struct drm_printer *p, const char *name,
+ const struct xe_sriov_pf_service_version *version)
+{
+ drm_printf(p, "%s:\t%u.%u\n", name, version->major, version->minor);
+}
+
+/**
+ * xe_sriov_pf_service_print_versions - Print ABI versions negotiated with VFs.
+ * @xe: the &xe_device
+ * @p: the &drm_printer
+ *
+ * This function is for PF use only.
+ */
+void xe_sriov_pf_service_print_versions(struct xe_device *xe, struct drm_printer *p)
+{
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
+ struct xe_sriov_pf_service_version *version;
+ char name[8];
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ print_pf_version(p, "base", &xe->sriov.pf.service.version.base);
+ print_pf_version(p, "latest", &xe->sriov.pf.service.version.latest);
+
+ for (n = 1; n <= total_vfs; n++) {
+ version = &xe->sriov.pf.vfs[n].version;
+ if (!version->major && !version->minor)
+ continue;
+
+ print_pf_version(p, xe_sriov_function_name(n, name, sizeof(name)), version);
+ }
+}
+
+#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_sriov_pf_service_kunit.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_service.h b/drivers/gpu/drm/xe/xe_sriov_pf_service.h
new file mode 100644
index 000000000000..d38c18f5ed10
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_service.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_SERVICE_H_
+#define _XE_SRIOV_PF_SERVICE_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+struct xe_device;
+
+void xe_sriov_pf_service_init(struct xe_device *xe);
+void xe_sriov_pf_service_print_versions(struct xe_device *xe, struct drm_printer *p);
+
+int xe_sriov_pf_service_handshake_vf(struct xe_device *xe, u32 vfid,
+ u32 wanted_major, u32 wanted_minor,
+ u32 *major, u32 *minor);
+bool xe_sriov_pf_service_is_negotiated(struct xe_device *xe, u32 vfid, u32 major, u32 minor);
+void xe_sriov_pf_service_reset_vf(struct xe_device *xe, unsigned int vfid);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_service_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_service_types.h
new file mode 100644
index 000000000000..0835dde358c1
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_service_types.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_SERVICE_TYPES_H_
+#define _XE_SRIOV_PF_SERVICE_TYPES_H_
+
+#include <linux/types.h>
+
+/**
+ * struct xe_sriov_pf_service_version - VF/PF ABI Version.
+ * @major: the major version of the VF/PF ABI
+ * @minor: the minor version of the VF/PF ABI
+ *
+ * See `GuC Relay Communication`_.
+ */
+struct xe_sriov_pf_service_version {
+ u16 major;
+ u16 minor;
+};
+
+/**
+ * struct xe_sriov_pf_service - Data used by the PF service.
+ * @version: information about VF/PF ABI versions for current platform.
+ * @version.base: lowest VF/PF ABI version that could be negotiated with VF.
+ * @version.latest: latest VF/PF ABI version supported by the PF driver.
+ */
+struct xe_sriov_pf_service {
+ struct {
+ struct xe_sriov_pf_service_version base;
+ struct xe_sriov_pf_service_version latest;
+ } version;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_types.h
new file mode 100644
index 000000000000..956a88f9f213
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_types.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_TYPES_H_
+#define _XE_SRIOV_PF_TYPES_H_
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "xe_sriov_pf_service_types.h"
+
+/**
+ * struct xe_sriov_metadata - per-VF device level metadata
+ */
+struct xe_sriov_metadata {
+ /** @version: negotiated VF/PF ABI version */
+ struct xe_sriov_pf_service_version version;
+};
+
+/**
+ * struct xe_device_pf - Xe PF related data
+ *
+ * The data in this structure is valid only if driver is running in the
+ * @XE_SRIOV_MODE_PF mode.
+ */
+struct xe_device_pf {
+ /** @device_total_vfs: Maximum number of VFs supported by the device. */
+ u16 device_total_vfs;
+
+ /** @driver_max_vfs: Maximum number of VFs supported by the driver. */
+ u16 driver_max_vfs;
+
+ /** @master_lock: protects all VFs configurations across GTs */
+ struct mutex master_lock;
+
+ /** @service: device level service data. */
+ struct xe_sriov_pf_service service;
+
+ /** @vfs: metadata for all VFs. */
+ struct xe_sriov_metadata *vfs;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_types.h b/drivers/gpu/drm/xe/xe_sriov_types.h
index ca94382a721e..1a138108d139 100644
--- a/drivers/gpu/drm/xe/xe_sriov_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_types.h
@@ -7,9 +7,6 @@
#define _XE_SRIOV_TYPES_H_
#include <linux/build_bug.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <linux/workqueue_types.h>
/**
* VFID - Virtual Function Identifier
@@ -40,37 +37,4 @@ enum xe_sriov_mode {
};
static_assert(XE_SRIOV_MODE_NONE);
-/**
- * struct xe_device_pf - Xe PF related data
- *
- * The data in this structure is valid only if driver is running in the
- * @XE_SRIOV_MODE_PF mode.
- */
-struct xe_device_pf {
- /** @device_total_vfs: Maximum number of VFs supported by the device. */
- u16 device_total_vfs;
-
- /** @driver_max_vfs: Maximum number of VFs supported by the driver. */
- u16 driver_max_vfs;
-
- /** @master_lock: protects all VFs configurations across GTs */
- struct mutex master_lock;
-};
-
-/**
- * struct xe_device_vf - Xe Virtual Function related data
- *
- * The data in this structure is valid only if driver is running in the
- * @XE_SRIOV_MODE_VF mode.
- */
-struct xe_device_vf {
- /** @migration: VF Migration state data */
- struct {
- /** @migration.worker: VF migration recovery worker */
- struct work_struct worker;
- /** @migration.gt_flags: Per-GT request flags for VF migration recovery */
- unsigned long gt_flags;
- } migration;
-};
-
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
index c1275e64aa9c..26e243c28994 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
@@ -7,12 +7,15 @@
#include "xe_assert.h"
#include "xe_device.h"
+#include "xe_gt.h"
#include "xe_gt_sriov_printk.h"
#include "xe_gt_sriov_vf.h"
+#include "xe_guc_ct.h"
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_sriov_printk.h"
#include "xe_sriov_vf.h"
+#include "xe_tile_sriov_vf.h"
/**
* DOC: VF restore procedure in PF KMD and VF KMD
@@ -121,6 +124,15 @@
* | | |
*/
+static bool vf_migration_supported(struct xe_device *xe)
+{
+ /*
+ * TODO: Add conditions to allow specific platforms, when they're
+ * supported at production quality.
+ */
+ return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
+}
+
static void migration_worker_func(struct work_struct *w);
/**
@@ -130,86 +142,118 @@ static void migration_worker_func(struct work_struct *w);
void xe_sriov_vf_init_early(struct xe_device *xe)
{
INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func);
+
+ if (!vf_migration_supported(xe))
+ xe_sriov_info(xe, "migration not supported by this module version\n");
}
-/**
- * vf_post_migration_requery_guc - Re-query GuC for current VF provisioning.
+static bool gt_vf_post_migration_needed(struct xe_gt *gt)
+{
+ return test_bit(gt->info.id, &gt_to_xe(gt)->sriov.vf.migration.gt_flags);
+}
+
+/*
+ * Notify GuCs marked in flags about resource fixups apply finished.
* @xe: the &xe_device struct instance
- *
- * After migration, we need to re-query all VF configuration to make sure
- * they match previous provisioning. Note that most of VF provisioning
- * shall be the same, except GGTT range, since GGTT is not virtualized per-VF.
- *
- * Returns: 0 if the operation completed successfully, or a negative error
- * code otherwise.
+ * @gt_flags: flags marking to which GTs the notification shall be sent
*/
-static int vf_post_migration_requery_guc(struct xe_device *xe)
+static int vf_post_migration_notify_resfix_done(struct xe_device *xe, unsigned long gt_flags)
{
struct xe_gt *gt;
unsigned int id;
- int err, ret = 0;
+ int err = 0;
for_each_gt(gt, xe, id) {
- err = xe_gt_sriov_vf_query_config(gt);
- ret = ret ?: err;
+ if (!test_bit(id, &gt_flags))
+ continue;
+ /* skip asking GuC for RESFIX exit if new recovery request arrived */
+ if (gt_vf_post_migration_needed(gt))
+ continue;
+ err = xe_gt_sriov_vf_notify_resfix_done(gt);
+ if (err)
+ break;
+ clear_bit(id, &gt_flags);
}
- return ret;
-}
-
-/*
- * vf_post_migration_imminent - Check if post-restore recovery is coming.
- * @xe: the &xe_device struct instance
- *
- * Return: True if migration recovery worker will soon be running. Any worker currently
- * executing does not affect the result.
- */
-static bool vf_post_migration_imminent(struct xe_device *xe)
-{
- return xe->sriov.vf.migration.gt_flags != 0 ||
- work_pending(&xe->sriov.vf.migration.worker);
+ if (gt_flags && !err)
+ drm_dbg(&xe->drm, "another recovery imminent, skipped some notifications\n");
+ return err;
}
-/*
- * Notify all GuCs about resource fixups apply finished.
- */
-static void vf_post_migration_notify_resfix_done(struct xe_device *xe)
+static int vf_get_next_migrated_gt_id(struct xe_device *xe)
{
struct xe_gt *gt;
unsigned int id;
for_each_gt(gt, xe, id) {
- if (vf_post_migration_imminent(xe))
- goto skip;
- xe_gt_sriov_vf_notify_resfix_done(gt);
+ if (test_and_clear_bit(id, &xe->sriov.vf.migration.gt_flags))
+ return id;
}
- return;
+ return -1;
+}
-skip:
- drm_dbg(&xe->drm, "another recovery imminent, skipping notifications\n");
+/**
+ * Perform post-migration fixups on a single GT.
+ *
+ * After migration, GuC needs to be re-queried for VF configuration to check
+ * if it matches previous provisioning. Most of VF provisioning shall be the
+ * same, except GGTT range, since GGTT is not virtualized per-VF. If GGTT
+ * range has changed, we have to perform fixups - shift all GGTT references
+ * used anywhere within the driver. After the fixups in this function succeed,
+ * it is allowed to ask the GuC bound to this GT to continue normal operation.
+ *
+ * Returns: 0 if the operation completed successfully, or a negative error
+ * code otherwise.
+ */
+static int gt_vf_post_migration_fixups(struct xe_gt *gt)
+{
+ s64 shift;
+ int err;
+
+ err = xe_gt_sriov_vf_query_config(gt);
+ if (err)
+ return err;
+
+ shift = xe_gt_sriov_vf_ggtt_shift(gt);
+ if (shift) {
+ xe_tile_sriov_vf_fixup_ggtt_nodes(gt_to_tile(gt), shift);
+ /* FIXME: add the recovery steps */
+ xe_guc_ct_fixup_messages_with_ggtt(&gt->uc.guc.ct, shift);
+ }
+ return 0;
}
static void vf_post_migration_recovery(struct xe_device *xe)
{
- int err;
+ unsigned long fixed_gts = 0;
+ int id, err;
drm_dbg(&xe->drm, "migration recovery in progress\n");
xe_pm_runtime_get(xe);
- err = vf_post_migration_requery_guc(xe);
- if (vf_post_migration_imminent(xe))
- goto defer;
- if (unlikely(err))
+
+ if (!vf_migration_supported(xe)) {
+ xe_sriov_err(xe, "migration not supported by this module version\n");
+ err = -ENOTRECOVERABLE;
+ goto fail;
+ }
+
+ while (id = vf_get_next_migrated_gt_id(xe), id >= 0) {
+ struct xe_gt *gt = xe_device_get_gt(xe, id);
+
+ err = gt_vf_post_migration_fixups(gt);
+ if (err)
+ goto fail;
+
+ set_bit(id, &fixed_gts);
+ }
+
+ err = vf_post_migration_notify_resfix_done(xe, fixed_gts);
+ if (err)
goto fail;
- /* FIXME: add the recovery steps */
- vf_post_migration_notify_resfix_done(xe);
xe_pm_runtime_put(xe);
drm_notice(&xe->drm, "migration recovery ended\n");
return;
-defer:
- xe_pm_runtime_put(xe);
- drm_dbg(&xe->drm, "migration recovery deferred\n");
- return;
fail:
xe_pm_runtime_put(xe);
drm_err(&xe->drm, "migration recovery failed (%pe)\n", ERR_PTR(err));
@@ -224,18 +268,23 @@ static void migration_worker_func(struct work_struct *w)
vf_post_migration_recovery(xe);
}
-static bool vf_ready_to_recovery_on_all_gts(struct xe_device *xe)
+/*
+ * Check if post-restore recovery is coming on any of GTs.
+ * @xe: the &xe_device struct instance
+ *
+ * Return: True if migration recovery worker will soon be running. Any worker currently
+ * executing does not affect the result.
+ */
+static bool vf_ready_to_recovery_on_any_gts(struct xe_device *xe)
{
struct xe_gt *gt;
unsigned int id;
for_each_gt(gt, xe, id) {
- if (!test_bit(id, &xe->sriov.vf.migration.gt_flags)) {
- xe_gt_sriov_dbg_verbose(gt, "still not ready to recover\n");
- return false;
- }
+ if (test_bit(id, &xe->sriov.vf.migration.gt_flags))
+ return true;
}
- return true;
+ return false;
}
/**
@@ -250,13 +299,9 @@ void xe_sriov_vf_start_migration_recovery(struct xe_device *xe)
xe_assert(xe, IS_SRIOV_VF(xe));
- if (!vf_ready_to_recovery_on_all_gts(xe))
+ if (!vf_ready_to_recovery_on_any_gts(xe))
return;
- WRITE_ONCE(xe->sriov.vf.migration.gt_flags, 0);
- /* Ensure other threads see that no flags are set now. */
- smp_mb();
-
started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker);
drm_info(&xe->drm, "VF migration recovery %s\n", started ?
"scheduled" : "already in progress");
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
new file mode 100644
index 000000000000..8300416a6226
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_VF_TYPES_H_
+#define _XE_SRIOV_VF_TYPES_H_
+
+#include <linux/types.h>
+#include <linux/workqueue_types.h>
+
+/**
+ * struct xe_sriov_vf_relay_version - PF ABI version details.
+ */
+struct xe_sriov_vf_relay_version {
+ /** @major: major version. */
+ u16 major;
+ /** @minor: minor version. */
+ u16 minor;
+};
+
+/**
+ * struct xe_device_vf - Xe Virtual Function related data
+ *
+ * The data in this structure is valid only if driver is running in the
+ * @XE_SRIOV_MODE_VF mode.
+ */
+struct xe_device_vf {
+ /** @pf_version: negotiated VF/PF ABI version. */
+ struct xe_sriov_vf_relay_version pf_version;
+
+ /** @migration: VF Migration state data */
+ struct {
+ /** @migration.worker: VF migration recovery worker */
+ struct work_struct worker;
+ /** @migration.gt_flags: Per-GT request flags for VF migration recovery */
+ unsigned long gt_flags;
+ } migration;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_step.c b/drivers/gpu/drm/xe/xe_step.c
index c77b5c317fa0..10e88f2c9615 100644
--- a/drivers/gpu/drm/xe/xe_step.c
+++ b/drivers/gpu/drm/xe/xe_step.c
@@ -5,6 +5,7 @@
#include "xe_step.h"
+#include <kunit/visibility.h>
#include <linux/bitfield.h>
#include "xe_device.h"
@@ -255,3 +256,4 @@ const char *xe_step_name(enum xe_step step)
return "**";
}
}
+EXPORT_SYMBOL_IF_KUNIT(xe_step_name);
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c
index 1f710b3fc599..41705f5d52e3 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.c
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.c
@@ -14,6 +14,7 @@
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_heci_gsc.h"
+#include "xe_i2c.h"
#include "xe_mmio.h"
#include "xe_pcode_api.h"
#include "xe_vsec.h"
@@ -173,20 +174,22 @@ static int enable_survivability_mode(struct pci_dev *pdev)
survivability->mode = true;
ret = xe_heci_gsc_init(xe);
- if (ret) {
- /*
- * But if it fails, device can't enter survivability
- * so move it back for correct error handling
- */
- survivability->mode = false;
- return ret;
- }
+ if (ret)
+ goto err;
xe_vsec_init(xe);
+ ret = xe_i2c_probe(xe);
+ if (ret)
+ goto err;
+
dev_err(dev, "In Survivability Mode\n");
return 0;
+
+err:
+ survivability->mode = false;
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index f0b167b3fb6a..a7ff5975873f 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -3,13 +3,17 @@
* Copyright © 2024 Intel Corporation
*/
+#include <drm/drm_drv.h>
+
#include "xe_bo.h"
#include "xe_gt_stats.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_module.h"
+#include "xe_pm.h"
#include "xe_pt.h"
#include "xe_svm.h"
+#include "xe_tile.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_vm.h"
#include "xe_vm_types.h"
@@ -45,21 +49,6 @@ static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
return gpusvm_to_vm(r->gpusvm);
}
-static unsigned long xe_svm_range_start(struct xe_svm_range *range)
-{
- return drm_gpusvm_range_start(&range->base);
-}
-
-static unsigned long xe_svm_range_end(struct xe_svm_range *range)
-{
- return drm_gpusvm_range_end(&range->base);
-}
-
-static unsigned long xe_svm_range_size(struct xe_svm_range *range)
-{
- return drm_gpusvm_range_size(&range->base);
-}
-
#define range_debug(r__, operaton__) \
vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
"%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
@@ -103,11 +92,6 @@ static void xe_svm_range_free(struct drm_gpusvm_range *range)
kfree(range);
}
-static struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
-{
- return container_of(r, struct xe_svm_range, base);
-}
-
static void
xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
const struct mmu_notifier_range *mmu_range)
@@ -161,7 +145,12 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
for_each_tile(tile, xe, id)
if (xe_pt_zap_ptes_range(tile, vm, range)) {
tile_mask |= BIT(id);
- range->tile_invalidated |= BIT(id);
+ /*
+ * WRITE_ONCE pairs with READ_ONCE in
+ * xe_vm_has_valid_gpu_mapping()
+ */
+ WRITE_ONCE(range->tile_invalidated,
+ range->tile_invalidated | BIT(id));
}
return tile_mask;
@@ -187,14 +176,9 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
{
struct xe_vm *vm = gpusvm_to_vm(gpusvm);
struct xe_device *xe = vm->xe;
- struct xe_tile *tile;
struct drm_gpusvm_range *r, *first;
- struct xe_gt_tlb_invalidation_fence
- fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
u8 tile_mask = 0;
- u8 id;
- u32 fence_id = 0;
long err;
xe_svm_assert_in_notifier(vm);
@@ -240,42 +224,8 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
xe_device_wmb(xe);
- for_each_tile(tile, xe, id) {
- if (tile_mask & BIT(id)) {
- int err;
-
- xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
- &fence[fence_id], true);
-
- err = xe_gt_tlb_invalidation_range(tile->primary_gt,
- &fence[fence_id],
- adj_start,
- adj_end,
- vm->usm.asid);
- if (WARN_ON_ONCE(err < 0))
- goto wait;
- ++fence_id;
-
- if (!tile->media_gt)
- continue;
-
- xe_gt_tlb_invalidation_fence_init(tile->media_gt,
- &fence[fence_id], true);
-
- err = xe_gt_tlb_invalidation_range(tile->media_gt,
- &fence[fence_id],
- adj_start,
- adj_end,
- vm->usm.asid);
- if (WARN_ON_ONCE(err < 0))
- goto wait;
- ++fence_id;
- }
- }
-
-wait:
- for (id = 0; id < fence_id; ++id)
- xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+ err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
+ WARN_ON_ONCE(err);
range_notifier_event_end:
r = first;
@@ -349,7 +299,7 @@ static void xe_svm_garbage_collector_work_func(struct work_struct *w)
up_write(&vm->lock);
}
-#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
static struct xe_vram_region *page_to_vr(struct page *page)
{
@@ -537,16 +487,18 @@ static int xe_svm_copy_to_ram(struct page **pages, dma_addr_t *dma_addr,
return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_SRAM);
}
-static struct xe_bo *to_xe_bo(struct drm_gpusvm_devmem *devmem_allocation)
+static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
{
return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
}
-static void xe_svm_devmem_release(struct drm_gpusvm_devmem *devmem_allocation)
+static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
{
struct xe_bo *bo = to_xe_bo(devmem_allocation);
+ struct xe_device *xe = xe_bo_device(bo);
xe_bo_put_async(bo);
+ xe_pm_runtime_put(xe);
}
static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
@@ -559,7 +511,7 @@ static struct drm_buddy *tile_to_buddy(struct xe_tile *tile)
return &tile->mem.vram.ttm.mm;
}
-static int xe_svm_populate_devmem_pfn(struct drm_gpusvm_devmem *devmem_allocation,
+static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
unsigned long npages, unsigned long *pfn)
{
struct xe_bo *bo = to_xe_bo(devmem_allocation);
@@ -582,7 +534,7 @@ static int xe_svm_populate_devmem_pfn(struct drm_gpusvm_devmem *devmem_allocatio
return 0;
}
-static const struct drm_gpusvm_devmem_ops gpusvm_devmem_ops = {
+static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
.devmem_release = xe_svm_devmem_release,
.populate_devmem_pfn = xe_svm_populate_devmem_pfn,
.copy_to_devmem = xe_svm_copy_to_devmem,
@@ -662,84 +614,140 @@ static bool xe_svm_range_is_valid(struct xe_svm_range *range,
struct xe_tile *tile,
bool devmem_only)
{
- /*
- * Advisory only check whether the range currently has a valid mapping,
- * READ_ONCE pairs with WRITE_ONCE in xe_pt.c
- */
- return ((READ_ONCE(range->tile_present) &
- ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) &&
- (!devmem_only || xe_svm_range_in_vram(range));
+ return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
+ range->tile_invalidated) &&
+ (!devmem_only || xe_svm_range_in_vram(range)));
+}
+
+/** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
+ * @vm: xe_vm pointer
+ * @range: Pointer to the SVM range structure
+ *
+ * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
+ * and migrates them to SMEM
+ */
+void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
+{
+ if (xe_svm_range_in_vram(range))
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
+}
+
+/**
+ * xe_svm_range_validate() - Check if the SVM range is valid
+ * @vm: xe_vm pointer
+ * @range: Pointer to the SVM range structure
+ * @tile_mask: Mask representing the tiles to be checked
+ * @devmem_preferred : if true range needs to be in devmem
+ *
+ * The xe_svm_range_validate() function checks if a range is
+ * valid and located in the desired memory region.
+ *
+ * Return: true if the range is valid, false otherwise
+ */
+bool xe_svm_range_validate(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_mask, bool devmem_preferred)
+{
+ bool ret;
+
+ xe_svm_notifier_lock(vm);
+
+ ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
+ (devmem_preferred == range->base.flags.has_devmem_pages);
+
+ xe_svm_notifier_unlock(vm);
+
+ return ret;
+}
+
+/**
+ * xe_svm_find_vma_start - Find start of CPU VMA
+ * @vm: xe_vm pointer
+ * @start: start address
+ * @end: end address
+ * @vma: Pointer to struct xe_vma
+ *
+ *
+ * This function searches for a cpu vma, within the specified
+ * range [start, end] in the given VM. It adjusts the range based on the
+ * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
+ *
+ * Return: The starting address of the VMA within the range,
+ * or ULONG_MAX if no VMA is found
+ */
+u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
+{
+ return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
+ max(start, xe_vma_start(vma)),
+ min(end, xe_vma_end(vma)));
}
-#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
{
return &tile->mem.vram;
}
-static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
- struct xe_svm_range *range,
- const struct drm_gpusvm_ctx *ctx)
+static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms)
{
- struct mm_struct *mm = vm->svm.gpusvm.mm;
+ struct xe_tile *tile = container_of(dpagemap, typeof(*tile), mem.vram.dpagemap);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct device *dev = xe->drm.dev;
struct xe_vram_region *vr = tile_to_vr(tile);
struct drm_buddy_block *block;
struct list_head *blocks;
struct xe_bo *bo;
- ktime_t end = 0;
- int err;
+ ktime_t time_end = 0;
+ int err, idx;
- range_debug(range, "ALLOCATE VRAM");
+ if (!drm_dev_enter(&xe->drm, &idx))
+ return -ENODEV;
- if (!mmget_not_zero(mm))
- return -EFAULT;
- mmap_read_lock(mm);
+ xe_pm_runtime_get(xe);
-retry:
- bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL,
- xe_svm_range_size(range),
+ retry:
+ bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL, end - start,
ttm_bo_type_device,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_CPU_ADDR_MIRROR);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
- if (xe_vm_validate_should_retry(NULL, err, &end))
+ if (xe_vm_validate_should_retry(NULL, err, &time_end))
goto retry;
- goto unlock;
+ goto out_pm_put;
}
- drm_gpusvm_devmem_init(&bo->devmem_allocation,
- vm->xe->drm.dev, mm,
- &gpusvm_devmem_ops,
- &tile->mem.vram.dpagemap,
- xe_svm_range_size(range));
+ drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
+ &dpagemap_devmem_ops,
+ &tile->mem.vram.dpagemap,
+ end - start);
blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
list_for_each_entry(block, blocks, link)
block->private = vr;
xe_bo_get(bo);
- err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range->base,
- &bo->devmem_allocation, ctx);
+
+ /* Ensure the device has a pm ref while there are device pages active. */
+ xe_pm_runtime_get_noresume(xe);
+ err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
+ start, end, timeslice_ms,
+ xe_svm_devm_owner(xe));
if (err)
xe_svm_devmem_release(&bo->devmem_allocation);
xe_bo_unlock(bo);
xe_bo_put(bo);
-unlock:
- mmap_read_unlock(mm);
- mmput(mm);
+out_pm_put:
+ xe_pm_runtime_put(xe);
+ drm_dev_exit(idx);
return err;
}
-#else
-static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
- struct xe_svm_range *range,
- const struct drm_gpusvm_ctx *ctx)
-{
- return -EOPNOTSUPP;
-}
#endif
static bool supports_4K_migration(struct xe_device *xe)
@@ -750,21 +758,31 @@ static bool supports_4K_migration(struct xe_device *xe)
return true;
}
-static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range,
- struct xe_vma *vma)
+/**
+ * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
+ * @range: SVM range for which migration needs to be decided
+ * @vma: vma which has range
+ * @preferred_region_is_vram: preferred region for range is vram
+ *
+ * Return: True for range needing migration and migration is supported else false
+ */
+bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
+ bool preferred_region_is_vram)
{
struct xe_vm *vm = range_to_vm(&range->base);
u64 range_size = xe_svm_range_size(range);
- if (!range->base.flags.migrate_devmem)
+ if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
return false;
- if (xe_svm_range_in_vram(range)) {
- drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
+ xe_assert(vm->xe, IS_DGFX(vm->xe));
+
+ if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
+ drm_info(&vm->xe->drm, "Range is already in VRAM\n");
return false;
}
- if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
+ if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
return false;
}
@@ -792,20 +810,19 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
struct drm_gpusvm_ctx ctx = {
.read_only = xe_vma_read_only(vma),
.devmem_possible = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
.check_pages_threshold = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0,
.devmem_only = atomic && IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
.timeslice_ms = atomic && IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? 5 : 0,
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ?
+ vm->xe->atomic_svm_timeslice_ms : 0,
};
struct xe_svm_range *range;
- struct drm_gpusvm_range *r;
- struct drm_exec exec;
struct dma_fence *fence;
- int migrate_try_count = ctx.devmem_only ? 3 : 1;
struct xe_tile *tile = gt_to_tile(gt);
+ int migrate_try_count = ctx.devmem_only ? 3 : 1;
ktime_t end = 0;
int err;
@@ -820,24 +837,22 @@ retry:
if (err)
return err;
- r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, fault_addr,
- xe_vma_start(vma), xe_vma_end(vma),
- &ctx);
- if (IS_ERR(r))
- return PTR_ERR(r);
+ range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
- if (ctx.devmem_only && !r->flags.migrate_devmem)
+ if (IS_ERR(range))
+ return PTR_ERR(range);
+
+ if (ctx.devmem_only && !range->base.flags.migrate_devmem)
return -EACCES;
- range = to_xe_range(r);
if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
return 0;
range_debug(range, "PAGE FAULT");
if (--migrate_try_count >= 0 &&
- xe_svm_range_needs_migrate_to_vram(range, vma)) {
- err = xe_svm_alloc_vram(vm, tile, range, &ctx);
+ xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
+ err = xe_svm_alloc_vram(tile, range, &ctx);
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (err) {
if (migrate_try_count || !ctx.devmem_only) {
@@ -855,16 +870,11 @@ retry:
}
range_debug(range, "GET PAGES");
- err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
+ err = xe_svm_range_get_pages(vm, range, &ctx);
/* Corner where CPU mappings have changed */
if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (migrate_try_count > 0 || !ctx.devmem_only) {
- if (err == -EOPNOTSUPP) {
- range_debug(range, "PAGE FAULT - EVICT PAGES");
- drm_gpusvm_range_evict(&vm->svm.gpusvm,
- &range->base);
- }
drm_dbg(&vm->xe->drm,
"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
@@ -884,30 +894,21 @@ retry:
range_debug(range, "PAGE FAULT - BIND");
retry_bind:
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- err = drm_exec_lock_obj(&exec, vm->gpuvm.r_obj);
- drm_exec_retry_on_contention(&exec);
- if (err) {
- drm_exec_fini(&exec);
- goto err_out;
- }
-
- fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
- if (IS_ERR(fence)) {
- drm_exec_fini(&exec);
- err = PTR_ERR(fence);
- if (err == -EAGAIN) {
- ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
- range_debug(range, "PAGE FAULT - RETRY BIND");
- goto retry;
- }
- if (xe_vm_validate_should_retry(&exec, err, &end))
- goto retry_bind;
- goto err_out;
+ xe_vm_lock(vm, false);
+ fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
+ if (IS_ERR(fence)) {
+ xe_vm_unlock(vm);
+ err = PTR_ERR(fence);
+ if (err == -EAGAIN) {
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
+ range_debug(range, "PAGE FAULT - RETRY BIND");
+ goto retry;
}
+ if (xe_vm_validate_should_retry(NULL, err, &end))
+ goto retry_bind;
+ goto err_out;
}
- drm_exec_fini(&exec);
+ xe_vm_unlock(vm);
dma_fence_wait(fence, false);
dma_fence_put(fence);
@@ -943,10 +944,84 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
*/
int xe_svm_bo_evict(struct xe_bo *bo)
{
- return drm_gpusvm_evict_to_ram(&bo->devmem_allocation);
+ return drm_pagemap_evict_to_ram(&bo->devmem_allocation);
+}
+
+/**
+ * xe_svm_range_find_or_insert- Find or insert GPU SVM range
+ * @vm: xe_vm pointer
+ * @addr: address for which range needs to be found/inserted
+ * @vma: Pointer to struct xe_vma which mirrors CPU
+ * @ctx: GPU SVM context
+ *
+ * This function finds or inserts a newly allocated a SVM range based on the
+ * address.
+ *
+ * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
+ */
+struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
+ struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
+{
+ struct drm_gpusvm_range *r;
+
+ r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
+ xe_vma_start(vma), xe_vma_end(vma), ctx);
+ if (IS_ERR(r))
+ return ERR_PTR(PTR_ERR(r));
+
+ return to_xe_range(r);
+}
+
+/**
+ * xe_svm_range_get_pages() - Get pages for a SVM range
+ * @vm: Pointer to the struct xe_vm
+ * @range: Pointer to the xe SVM range structure
+ * @ctx: GPU SVM context
+ *
+ * This function gets pages for a SVM range and ensures they are mapped for
+ * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
+ struct drm_gpusvm_ctx *ctx)
+{
+ int err = 0;
+
+ err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
+ if (err == -EOPNOTSUPP) {
+ range_debug(range, "PAGE FAULT - EVICT PAGES");
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
+ }
+
+ return err;
}
-#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+
+/**
+ * xe_svm_alloc_vram()- Allocate device memory pages for range,
+ * migrating existing data.
+ * @tile: tile to allocate vram from
+ * @range: SVM range
+ * @ctx: DRM GPU SVM context
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ struct drm_pagemap *dpagemap;
+
+ xe_assert(tile_to_xe(tile), range->base.flags.migrate_devmem);
+ range_debug(range, "ALLOCATE VRAM");
+
+ dpagemap = xe_tile_local_pagemap(tile);
+ return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
+ xe_svm_range_end(range),
+ range->base.gpusvm->mm,
+ ctx->timeslice_ms);
+}
static struct drm_pagemap_device_addr
xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
@@ -972,6 +1047,7 @@ xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
.device_map = xe_drm_pagemap_device_map,
+ .populate_mm = xe_drm_pagemap_populate_mm,
};
/**
@@ -1003,7 +1079,7 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
vr->pagemap.range.start = res->start;
vr->pagemap.range.end = res->end;
vr->pagemap.nr_range = 1;
- vr->pagemap.ops = drm_gpusvm_pagemap_ops_get();
+ vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
vr->pagemap.owner = xe_svm_devm_owner(xe);
addr = devm_memremap_pages(dev, &vr->pagemap);
@@ -1024,6 +1100,13 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
return 0;
}
#else
+int xe_svm_alloc_vram(struct xe_tile *tile,
+ struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
{
return 0;
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 30fc78b85b30..da9a69ea0bb1 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -70,6 +70,26 @@ int xe_svm_bo_evict(struct xe_bo *bo);
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
+int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx);
+
+struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
+ struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
+
+int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
+ struct drm_gpusvm_ctx *ctx);
+
+bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
+ bool preferred_region_is_vram);
+
+void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
+
+bool xe_svm_range_validate(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_mask, bool devmem_preferred);
+
+u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
+
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
@@ -82,6 +102,53 @@ static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
return range->base.flags.has_dma_mapping;
}
+/**
+ * to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range
+ * @r: Pointer to the drm_gpusvm_range structure
+ *
+ * This function takes a pointer to a drm_gpusvm_range structure and
+ * converts it to a pointer to the containing xe_svm_range structure.
+ *
+ * Return: Pointer to the xe_svm_range structure
+ */
+static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
+{
+ return container_of(r, struct xe_svm_range, base);
+}
+
+/**
+ * xe_svm_range_start() - SVM range start address
+ * @range: SVM range
+ *
+ * Return: start address of range.
+ */
+static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_start(&range->base);
+}
+
+/**
+ * xe_svm_range_end() - SVM range end address
+ * @range: SVM range
+ *
+ * Return: end address of range.
+ */
+static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_end(&range->base);
+}
+
+/**
+ * xe_svm_range_size() - SVM range size
+ * @range: SVM range
+ *
+ * Return: Size of range.
+ */
+static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_size(&range->base);
+}
+
#define xe_svm_assert_in_notifier(vm__) \
lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
@@ -97,6 +164,8 @@ void xe_svm_flush(struct xe_vm *vm);
#include <linux/interval_tree.h>
struct drm_pagemap_device_addr;
+struct drm_gpusvm_ctx;
+struct drm_gpusvm_range;
struct xe_bo;
struct xe_gt;
struct xe_vm;
@@ -167,6 +236,73 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
{
}
+static inline int
+xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
+ struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline
+int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
+ struct drm_gpusvm_ctx *ctx)
+{
+ return -EINVAL;
+}
+
+static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
+{
+ return NULL;
+}
+
+static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
+{
+ return 0;
+}
+
+static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
+{
+ return 0;
+}
+
+static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
+{
+ return 0;
+}
+
+static inline
+bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
+ u32 region)
+{
+ return false;
+}
+
+static inline
+void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
+{
+}
+
+static inline
+bool xe_svm_range_validate(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_mask, bool devmem_preferred)
+{
+ return false;
+}
+
+static inline
+u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma)
+{
+ return ULONG_MAX;
+}
+
#define xe_svm_assert_in_notifier(...) do {} while (0)
#define xe_svm_range_has_dma_mapping(...) false
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 0771acbbf367..86e9811e60ba 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -10,6 +10,7 @@
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
+#include "xe_memirq.h"
#include "xe_migrate.h"
#include "xe_pcode.h"
#include "xe_sa.h"
@@ -87,13 +88,9 @@
*/
static int xe_tile_alloc(struct xe_tile *tile)
{
- struct drm_device *drm = &tile_to_xe(tile)->drm;
-
- tile->mem.ggtt = drmm_kzalloc(drm, sizeof(*tile->mem.ggtt),
- GFP_KERNEL);
+ tile->mem.ggtt = xe_ggtt_alloc(tile);
if (!tile->mem.ggtt)
return -ENOMEM;
- tile->mem.ggtt->tile = tile;
return 0;
}
@@ -178,6 +175,12 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
int xe_tile_init(struct xe_tile *tile)
{
+ int err;
+
+ err = xe_memirq_init(&tile->memirq);
+ if (err)
+ return err;
+
tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16);
if (IS_ERR(tile->mem.kernel_bb_pool))
return PTR_ERR(tile->mem.kernel_bb_pool);
diff --git a/drivers/gpu/drm/xe/xe_tile.h b/drivers/gpu/drm/xe/xe_tile.h
index eb939316d55b..cc33e8733983 100644
--- a/drivers/gpu/drm/xe/xe_tile.h
+++ b/drivers/gpu/drm/xe/xe_tile.h
@@ -16,4 +16,21 @@ int xe_tile_init(struct xe_tile *tile);
void xe_tile_migrate_wait(struct xe_tile *tile);
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
+{
+ return &tile->mem.vram.dpagemap;
+}
+#else
+static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
+{
+ return NULL;
+}
+#endif
+
+static inline bool xe_tile_is_root(struct xe_tile *tile)
+{
+ return tile->id == 0;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
new file mode 100644
index 000000000000..f221dbed16f0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_gtt_defs.h"
+
+#include "xe_assert.h"
+#include "xe_ggtt.h"
+#include "xe_gt_sriov_vf.h"
+#include "xe_sriov.h"
+#include "xe_sriov_printk.h"
+#include "xe_tile_sriov_vf.h"
+#include "xe_wopcm.h"
+
+static int vf_init_ggtt_balloons(struct xe_tile *tile)
+{
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ tile->sriov.vf.ggtt_balloon[0] = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(tile->sriov.vf.ggtt_balloon[0]))
+ return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]);
+
+ tile->sriov.vf.ggtt_balloon[1] = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) {
+ xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[0]);
+ return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]);
+ }
+
+ return 0;
+}
+
+/**
+ * xe_tile_sriov_vf_balloon_ggtt_locked - Insert balloon nodes to limit used GGTT address range.
+ * @tile: the &xe_tile struct instance
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile)
+{
+ u64 ggtt_base = xe_gt_sriov_vf_ggtt_base(tile->primary_gt);
+ u64 ggtt_size = xe_gt_sriov_vf_ggtt(tile->primary_gt);
+ struct xe_device *xe = tile_to_xe(tile);
+ u64 wopcm = xe_wopcm_size(xe);
+ u64 start, end;
+ int err;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(xe));
+ xe_tile_assert(tile, ggtt_size);
+ lockdep_assert_held(&tile->mem.ggtt->lock);
+
+ /*
+ * VF can only use part of the GGTT as allocated by the PF:
+ *
+ * WOPCM GUC_GGTT_TOP
+ * |<------------ Total GGTT size ------------------>|
+ *
+ * VF GGTT base -->|<- size ->|
+ *
+ * +--------------------+----------+-----------------+
+ * |////////////////////| block |\\\\\\\\\\\\\\\\\|
+ * +--------------------+----------+-----------------+
+ *
+ * |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->|
+ */
+
+ if (ggtt_base < wopcm || ggtt_base > GUC_GGTT_TOP ||
+ ggtt_size > GUC_GGTT_TOP - ggtt_base) {
+ xe_sriov_err(xe, "tile%u: Invalid GGTT configuration: %#llx-%#llx\n",
+ tile->id, ggtt_base, ggtt_base + ggtt_size - 1);
+ return -ERANGE;
+ }
+
+ start = wopcm;
+ end = ggtt_base;
+ if (end != start) {
+ err = xe_ggtt_node_insert_balloon_locked(tile->sriov.vf.ggtt_balloon[0],
+ start, end);
+ if (err)
+ return err;
+ }
+
+ start = ggtt_base + ggtt_size;
+ end = GUC_GGTT_TOP;
+ if (end != start) {
+ err = xe_ggtt_node_insert_balloon_locked(tile->sriov.vf.ggtt_balloon[1],
+ start, end);
+ if (err) {
+ xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[0]);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int vf_balloon_ggtt(struct xe_tile *tile)
+{
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+ int err;
+
+ mutex_lock(&ggtt->lock);
+ err = xe_tile_sriov_vf_balloon_ggtt_locked(tile);
+ mutex_unlock(&ggtt->lock);
+
+ return err;
+}
+
+/**
+ * xe_tile_sriov_vf_deballoon_ggtt_locked - Remove balloon nodes.
+ * @tile: the &xe_tile struct instance
+ */
+void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile)
+{
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[1]);
+ xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[0]);
+}
+
+static void vf_deballoon_ggtt(struct xe_tile *tile)
+{
+ mutex_lock(&tile->mem.ggtt->lock);
+ xe_tile_sriov_vf_deballoon_ggtt_locked(tile);
+ mutex_unlock(&tile->mem.ggtt->lock);
+}
+
+static void vf_fini_ggtt_balloons(struct xe_tile *tile)
+{
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[1]);
+ xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[0]);
+}
+
+static void cleanup_ggtt(struct drm_device *drm, void *arg)
+{
+ struct xe_tile *tile = arg;
+
+ vf_deballoon_ggtt(tile);
+ vf_fini_ggtt_balloons(tile);
+}
+
+/**
+ * xe_tile_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration.
+ * @tile: the &xe_tile
+ *
+ * This function is for VF use only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ int err;
+
+ err = vf_init_ggtt_balloons(tile);
+ if (err)
+ return err;
+
+ err = vf_balloon_ggtt(tile);
+ if (err) {
+ vf_fini_ggtt_balloons(tile);
+ return err;
+ }
+
+ return drmm_add_action_or_reset(&xe->drm, cleanup_ggtt, tile);
+}
+
+/**
+ * DOC: GGTT nodes shifting during VF post-migration recovery
+ *
+ * The first fixup applied to the VF KMD structures as part of post-migration
+ * recovery is shifting nodes within &xe_ggtt instance. The nodes are moved
+ * from range previously assigned to this VF, into newly provisioned area.
+ * The changes include balloons, which are resized accordingly.
+ *
+ * The balloon nodes are there to eliminate unavailable ranges from use: one
+ * reserves the GGTT area below the range for current VF, and another one
+ * reserves area above.
+ *
+ * Below is a GGTT layout of example VF, with a certain address range assigned to
+ * said VF, and inaccessible areas above and below:
+ *
+ * 0 4GiB
+ * |<--------------------------- Total GGTT size ----------------------------->|
+ * WOPCM GUC_TOP
+ * |<-------------- Area mappable by xe_ggtt instance ---------------->|
+ *
+ * +---+---------------------------------+----------+----------------------+---+
+ * |\\\|/////////////////////////////////| VF mem |//////////////////////|\\\|
+ * +---+---------------------------------+----------+----------------------+---+
+ *
+ * Hardware enforced access rules before migration:
+ *
+ * |<------- inaccessible for VF ------->|<VF owned>|<-- inaccessible for VF ->|
+ *
+ * GGTT nodes used for tracking allocations:
+ *
+ * |<---------- balloon ------------>|<- nodes->|<----- balloon ------>|
+ *
+ * After the migration, GGTT area assigned to the VF might have shifted, either
+ * to lower or to higher address. But we expect the total size and extra areas to
+ * be identical, as migration can only happen between matching platforms.
+ * Below is an example of GGTT layout of the VF after migration. Content of the
+ * GGTT for VF has been moved to a new area, and we receive its address from GuC:
+ *
+ * +---+----------------------+----------+---------------------------------+---+
+ * |\\\|//////////////////////| VF mem |/////////////////////////////////|\\\|
+ * +---+----------------------+----------+---------------------------------+---+
+ *
+ * Hardware enforced access rules after migration:
+ *
+ * |<- inaccessible for VF -->|<VF owned>|<------- inaccessible for VF ------->|
+ *
+ * So the VF has a new slice of GGTT assigned, and during migration process, the
+ * memory content was copied to that new area. But the &xe_ggtt nodes are still
+ * tracking allocations using the old addresses. The nodes within VF owned area
+ * have to be shifted, and balloon nodes need to be resized to properly mask out
+ * areas not owned by the VF.
+ *
+ * Fixed &xe_ggtt nodes used for tracking allocations:
+ *
+ * |<------ balloon ------>|<- nodes->|<----------- balloon ----------->|
+ *
+ * Due to use of GPU profiles, we do not expect the old and new GGTT ares to
+ * overlap; but our node shifting will fix addresses properly regardless.
+ */
+
+/**
+ * xe_tile_sriov_vf_fixup_ggtt_nodes - Shift GGTT allocations to match assigned range.
+ * @tile: the &xe_tile struct instance
+ * @shift: the shift value
+ *
+ * Since Global GTT is not virtualized, each VF has an assigned range
+ * within the global space. This range might have changed during migration,
+ * which requires all memory addresses pointing to GGTT to be shifted.
+ */
+void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift)
+{
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+
+ mutex_lock(&ggtt->lock);
+
+ xe_tile_sriov_vf_deballoon_ggtt_locked(tile);
+ xe_ggtt_shift_nodes_locked(ggtt, shift);
+ xe_tile_sriov_vf_balloon_ggtt_locked(tile);
+
+ mutex_unlock(&ggtt->lock);
+}
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
new file mode 100644
index 000000000000..93eb043171e8
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TILE_SRIOV_VF_H_
+#define _XE_TILE_SRIOV_VF_H_
+
+#include <linux/types.h>
+
+struct xe_tile;
+
+int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile);
+int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile);
+void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile);
+void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h
index ccebd5f0878e..86323cf3be2c 100644
--- a/drivers/gpu/drm/xe/xe_trace_bo.h
+++ b/drivers/gpu/drm/xe/xe_trace_bo.h
@@ -33,7 +33,7 @@ DECLARE_EVENT_CLASS(xe_bo,
TP_fast_assign(
__assign_str(dev);
- __entry->size = bo->size;
+ __entry->size = xe_bo_size(bo);
__entry->flags = bo->flags;
__entry->vm = bo->vm;
),
@@ -73,7 +73,7 @@ TRACE_EVENT(xe_bo_move,
TP_fast_assign(
__entry->bo = bo;
- __entry->size = bo->size;
+ __entry->size = xe_bo_size(bo);
__assign_str(new_placement_name);
__assign_str(old_placement_name);
__assign_str(device_id);
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 49ddbda7cdef..828b45b24c23 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -98,6 +98,11 @@ static const struct xe_rtp_entry_sr engine_tunings[] = {
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE))
},
+ { XE_RTP_NAME("Tuning: Disable NULL query for Anyhit Shader"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, XE_RTP_END_VERSION_UNDEFINED),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY))
+ },
};
static const struct xe_rtp_entry_sr lrc_tunings[] = {
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index 3a8751a8b92d..3e0c3af235f2 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -33,6 +33,22 @@ uc_to_xe(struct xe_uc *uc)
}
/* Should be called once at driver load only */
+int xe_uc_init_noalloc(struct xe_uc *uc)
+{
+ int ret;
+
+ ret = xe_guc_init_noalloc(&uc->guc);
+ if (ret)
+ goto err;
+
+ /* HuC and GSC have no early dependencies and will be initialized during xe_uc_init(). */
+ return 0;
+
+err:
+ xe_gt_err(uc_to_gt(uc), "Failed to early initialize uC (%pe)\n", ERR_PTR(ret));
+ return ret;
+}
+
int xe_uc_init(struct xe_uc *uc)
{
int ret;
@@ -56,15 +72,17 @@ int xe_uc_init(struct xe_uc *uc)
if (!xe_device_uc_enabled(uc_to_xe(uc)))
return 0;
- if (IS_SRIOV_VF(uc_to_xe(uc)))
- return 0;
+ if (!IS_SRIOV_VF(uc_to_xe(uc))) {
+ ret = xe_wopcm_init(&uc->wopcm);
+ if (ret)
+ goto err;
+ }
- ret = xe_wopcm_init(&uc->wopcm);
+ ret = xe_guc_min_load_for_hwconfig(&uc->guc);
if (ret)
goto err;
return 0;
-
err:
xe_gt_err(uc_to_gt(uc), "Failed to initialize uC (%pe)\n", ERR_PTR(ret));
return ret;
@@ -126,28 +144,7 @@ int xe_uc_sanitize_reset(struct xe_uc *uc)
return uc_reset(uc);
}
-/**
- * xe_uc_init_hwconfig - minimally init Uc, read and parse hwconfig
- * @uc: The UC object
- *
- * Return: 0 on success, negative error code on error.
- */
-int xe_uc_init_hwconfig(struct xe_uc *uc)
-{
- int ret;
-
- /* GuC submission not enabled, nothing to do */
- if (!xe_device_uc_enabled(uc_to_xe(uc)))
- return 0;
-
- ret = xe_guc_min_load_for_hwconfig(&uc->guc);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int vf_uc_init_hw(struct xe_uc *uc)
+static int vf_uc_load_hw(struct xe_uc *uc)
{
int err;
@@ -161,22 +158,30 @@ static int vf_uc_init_hw(struct xe_uc *uc)
err = xe_gt_sriov_vf_connect(uc_to_gt(uc));
if (err)
- return err;
+ goto err_out;
uc->guc.submission_state.enabled = true;
- err = xe_gt_record_default_lrcs(uc_to_gt(uc));
+ err = xe_guc_opt_in_features_enable(&uc->guc);
if (err)
return err;
+ err = xe_gt_record_default_lrcs(uc_to_gt(uc));
+ if (err)
+ goto err_out;
+
return 0;
+
+err_out:
+ xe_guc_sanitize(&uc->guc);
+ return err;
}
/*
* Should be called during driver load, after every GT reset, and after every
* suspend to reload / auth the firmwares.
*/
-int xe_uc_init_hw(struct xe_uc *uc)
+int xe_uc_load_hw(struct xe_uc *uc)
{
int ret;
@@ -185,7 +190,7 @@ int xe_uc_init_hw(struct xe_uc *uc)
return 0;
if (IS_SRIOV_VF(uc_to_xe(uc)))
- return vf_uc_init_hw(uc);
+ return vf_uc_load_hw(uc);
ret = xe_huc_upload(&uc->huc);
if (ret)
@@ -201,15 +206,15 @@ int xe_uc_init_hw(struct xe_uc *uc)
ret = xe_gt_record_default_lrcs(uc_to_gt(uc));
if (ret)
- return ret;
+ goto err_out;
ret = xe_guc_post_load_init(&uc->guc);
if (ret)
- return ret;
+ goto err_out;
ret = xe_guc_pc_start(&uc->guc.pc);
if (ret)
- return ret;
+ goto err_out;
xe_guc_engine_activity_enable_stats(&uc->guc);
@@ -221,11 +226,10 @@ int xe_uc_init_hw(struct xe_uc *uc)
xe_gsc_load_start(&uc->gsc);
return 0;
-}
-int xe_uc_fini_hw(struct xe_uc *uc)
-{
- return xe_uc_sanitize_reset(uc);
+err_out:
+ xe_guc_sanitize(&uc->guc);
+ return ret;
}
int xe_uc_reset_prepare(struct xe_uc *uc)
diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h
index c23e6f5e2514..21c9306098cf 100644
--- a/drivers/gpu/drm/xe/xe_uc.h
+++ b/drivers/gpu/drm/xe/xe_uc.h
@@ -8,11 +8,10 @@
struct xe_uc;
+int xe_uc_init_noalloc(struct xe_uc *uc);
int xe_uc_init(struct xe_uc *uc);
-int xe_uc_init_hwconfig(struct xe_uc *uc);
int xe_uc_init_post_hwconfig(struct xe_uc *uc);
-int xe_uc_init_hw(struct xe_uc *uc);
-int xe_uc_fini_hw(struct xe_uc *uc);
+int xe_uc_load_hw(struct xe_uc *uc);
void xe_uc_gucrc_disable(struct xe_uc *uc);
int xe_uc_reset_prepare(struct xe_uc *uc);
void xe_uc_stop_prepare(struct xe_uc *uc);
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index a6612105201a..9bbdde604923 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -16,6 +16,7 @@
#include "xe_gsc.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
+#include "xe_gt_sriov_vf.h"
#include "xe_guc.h"
#include "xe_map.h"
#include "xe_mmio.h"
@@ -114,6 +115,7 @@ struct fw_blobs_by_type {
#define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED
#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
+ fw_def(PANTHERLAKE, GT_TYPE_ANY, major_ver(xe, guc, ptl, 70, 47, 0)) \
fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 45, 2)) \
fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 45, 2)) \
fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \
@@ -126,6 +128,7 @@ struct fw_blobs_by_type {
fw_def(TIGERLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1))
#define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \
+ fw_def(PANTHERLAKE, GT_TYPE_ANY, no_ver(xe, huc, ptl)) \
fw_def(BATTLEMAGE, GT_TYPE_ANY, no_ver(xe, huc, bmg)) \
fw_def(LUNARLAKE, GT_TYPE_ANY, no_ver(xe, huc, lnl)) \
fw_def(METEORLAKE, GT_TYPE_ANY, no_ver(i915, huc_gsc, mtl)) \
@@ -662,11 +665,39 @@ do { \
ver_->major, ver_->minor, ver_->patch); \
} while (0)
+static void uc_fw_vf_override(struct xe_uc_fw *uc_fw)
+{
+ struct xe_uc_fw_version *compat = &uc_fw->versions.found[XE_UC_FW_VER_COMPATIBILITY];
+ struct xe_uc_fw_version *wanted = &uc_fw->versions.wanted;
+
+ /* Only GuC/HuC are supported */
+ if (uc_fw->type != XE_UC_FW_TYPE_GUC && uc_fw->type != XE_UC_FW_TYPE_HUC)
+ uc_fw->path = NULL;
+
+ /* VF will support only firmwares that driver can autoselect */
+ xe_uc_fw_change_status(uc_fw, uc_fw->path ?
+ XE_UC_FIRMWARE_PRELOADED :
+ XE_UC_FIRMWARE_NOT_SUPPORTED);
+
+ if (!xe_uc_fw_is_supported(uc_fw))
+ return;
+
+ /* PF is doing the loading, so we don't need a path on the VF */
+ uc_fw->path = "Loaded by PF";
+
+ /* The GuC versions are set up during the VF bootstrap */
+ if (uc_fw->type == XE_UC_FW_TYPE_GUC) {
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY;
+ xe_gt_sriov_vf_guc_versions(uc_fw_to_gt(uc_fw), wanted, compat);
+ }
+}
+
static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmware_p)
{
struct xe_device *xe = uc_fw_to_xe(uc_fw);
+ struct xe_gt *gt = uc_fw_to_gt(uc_fw);
+ struct drm_printer p = xe_gt_info_printer(gt);
struct device *dev = xe->drm.dev;
- struct drm_printer p = drm_info_printer(dev);
const struct firmware *fw = NULL;
int err;
@@ -675,20 +706,13 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
* before we're looked at the HW caps to see if we have uc support
*/
BUILD_BUG_ON(XE_UC_FIRMWARE_UNINITIALIZED);
- xe_assert(xe, !uc_fw->status);
- xe_assert(xe, !uc_fw->path);
+ xe_gt_assert(gt, !uc_fw->status);
+ xe_gt_assert(gt, !uc_fw->path);
uc_fw_auto_select(xe, uc_fw);
if (IS_SRIOV_VF(xe)) {
- /* Only GuC/HuC are supported */
- if (uc_fw->type != XE_UC_FW_TYPE_GUC &&
- uc_fw->type != XE_UC_FW_TYPE_HUC)
- uc_fw->path = NULL;
- /* VF will support only firmwares that driver can autoselect */
- xe_uc_fw_change_status(uc_fw, uc_fw->path ?
- XE_UC_FIRMWARE_PRELOADED :
- XE_UC_FIRMWARE_NOT_SUPPORTED);
+ uc_fw_vf_override(uc_fw);
return 0;
}
@@ -700,7 +724,7 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
if (!xe_uc_fw_is_supported(uc_fw)) {
if (uc_fw->type == XE_UC_FW_TYPE_GUC) {
- drm_err(&xe->drm, "No GuC firmware defined for platform\n");
+ xe_gt_err(gt, "No GuC firmware defined for platform\n");
return -ENOENT;
}
return 0;
@@ -709,7 +733,7 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
/* an empty path means the firmware is disabled */
if (!xe_device_uc_enabled(xe) || !(*uc_fw->path)) {
xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_DISABLED);
- drm_dbg(&xe->drm, "%s disabled", xe_uc_fw_type_repr(uc_fw->type));
+ xe_gt_dbg(gt, "%s disabled\n", xe_uc_fw_type_repr(uc_fw->type));
return 0;
}
@@ -742,10 +766,10 @@ fail:
XE_UC_FIRMWARE_MISSING :
XE_UC_FIRMWARE_ERROR);
- drm_notice(&xe->drm, "%s firmware %s: fetch failed with error %d\n",
- xe_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
- drm_info(&xe->drm, "%s firmware(s) can be downloaded from %s\n",
- xe_uc_fw_type_repr(uc_fw->type), XE_UC_FIRMWARE_URL);
+ xe_gt_notice(gt, "%s firmware %s: fetch failed with error %pe\n",
+ xe_uc_fw_type_repr(uc_fw->type), uc_fw->path, ERR_PTR(err));
+ xe_gt_info(gt, "%s firmware(s) can be downloaded from %s\n",
+ xe_uc_fw_type_repr(uc_fw->type), XE_UC_FIRMWARE_URL);
release_firmware(fw); /* OK even if fw is NULL */
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index ad3b35a0e6eb..914026015019 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -65,6 +65,8 @@ enum xe_uc_fw_type {
* struct xe_uc_fw_version - Version for XE micro controller firmware
*/
struct xe_uc_fw_version {
+ /** @branch: branch version of the FW (not always available) */
+ u16 branch;
/** @major: major version of the FW */
u16 major;
/** @minor: minor version of the FW */
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 861577746929..2035604121e6 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -732,7 +732,9 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
DMA_RESV_USAGE_BOOKKEEP,
false, MAX_SCHEDULE_TIMEOUT);
+ down_read(&vm->userptr.notifier_lock);
err = xe_vm_invalidate_vma(&uvma->vma);
+ up_read(&vm->userptr.notifier_lock);
xe_vm_unlock(vm);
if (err)
break;
@@ -798,21 +800,47 @@ static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
}
ALLOW_ERROR_INJECTION(xe_vma_ops_alloc, ERRNO);
+static void xe_vma_svm_prefetch_op_fini(struct xe_vma_op *op)
+{
+ struct xe_vma *vma;
+
+ vma = gpuva_to_vma(op->base.prefetch.va);
+
+ if (op->base.op == DRM_GPUVA_OP_PREFETCH && xe_vma_is_cpu_addr_mirror(vma))
+ xa_destroy(&op->prefetch_range.range);
+}
+
+static void xe_vma_svm_prefetch_ops_fini(struct xe_vma_ops *vops)
+{
+ struct xe_vma_op *op;
+
+ if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH))
+ return;
+
+ list_for_each_entry(op, &vops->list, link)
+ xe_vma_svm_prefetch_op_fini(op);
+}
+
static void xe_vma_ops_fini(struct xe_vma_ops *vops)
{
int i;
+ xe_vma_svm_prefetch_ops_fini(vops);
+
for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
kfree(vops->pt_update_ops[i].ops);
}
-static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask)
+static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask, int inc_val)
{
int i;
+ if (!inc_val)
+ return;
+
for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
if (BIT(i) & tile_mask)
- ++vops->pt_update_ops[i].num_ops;
+ vops->pt_update_ops[i].num_ops += inc_val;
}
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
@@ -842,7 +870,7 @@ static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
xe_vm_populate_rebind(op, vma, tile_mask);
list_add_tail(&op->link, &vops->list);
- xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
+ xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1);
return 0;
}
@@ -977,7 +1005,7 @@ xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops,
xe_vm_populate_range_rebind(op, vma, range, tile_mask);
list_add_tail(&op->link, &vops->list);
- xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
+ xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1);
return 0;
}
@@ -1062,7 +1090,7 @@ xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops,
xe_vm_populate_range_unbind(op, range);
list_add_tail(&op->link, &vops->list);
- xe_vma_ops_incr_pt_update_ops(vops, range->tile_present);
+ xe_vma_ops_incr_pt_update_ops(vops, range->tile_present, 1);
return 0;
}
@@ -2141,6 +2169,35 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
return err;
}
+static bool vma_matches(struct xe_vma *vma, u64 page_addr)
+{
+ if (page_addr > xe_vma_end(vma) - 1 ||
+ page_addr + SZ_4K - 1 < xe_vma_start(vma))
+ return false;
+
+ return true;
+}
+
+/**
+ * xe_vm_find_vma_by_addr() - Find a VMA by its address
+ *
+ * @vm: the xe_vm the vma belongs to
+ * @page_addr: address to look up
+ */
+struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr)
+{
+ struct xe_vma *vma = NULL;
+
+ if (vm->usm.last_fault_vma) { /* Fast lookup */
+ if (vma_matches(vm->usm.last_fault_vma, page_addr))
+ vma = vm->usm.last_fault_vma;
+ }
+ if (!vma)
+ vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
+
+ return vma;
+}
+
static const u32 region_to_mem_type[] = {
XE_PL_TT,
XE_PL_VRAM0,
@@ -2221,13 +2278,25 @@ static bool __xe_vm_needs_clear_scratch_pages(struct xe_vm *vm, u32 bind_flags)
return true;
}
+static void xe_svm_prefetch_gpuva_ops_fini(struct drm_gpuva_ops *ops)
+{
+ struct drm_gpuva_op *__op;
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+
+ xe_vma_svm_prefetch_op_fini(op);
+ }
+}
+
/*
* Create operations list from IOCTL arguments, setup operations fields so parse
* and commit steps are decoupled from IOCTL arguments. This step can fail.
*/
static struct drm_gpuva_ops *
-vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
- u64 bo_offset_or_userptr, u64 addr, u64 range,
+vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
+ struct xe_bo *bo, u64 bo_offset_or_userptr,
+ u64 addr, u64 range,
u32 operation, u32 flags,
u32 prefetch_region, u16 pat_index)
{
@@ -2235,6 +2304,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct drm_gpuva_ops *ops;
struct drm_gpuva_op *__op;
struct drm_gpuvm_bo *vm_bo;
+ u64 range_end = addr + range;
int err;
lockdep_assert_held_write(&vm->lock);
@@ -2296,14 +2366,80 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
op->map.invalidate_on_bind =
__xe_vm_needs_clear_scratch_pages(vm, flags);
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
- op->prefetch.region = prefetch_region;
- }
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ struct xe_svm_range *svm_range;
+ struct drm_gpusvm_ctx ctx = {};
+ struct xe_tile *tile;
+ u8 id, tile_mask = 0;
+ u32 i;
+
+ if (!xe_vma_is_cpu_addr_mirror(vma)) {
+ op->prefetch.region = prefetch_region;
+ break;
+ }
+
+ ctx.read_only = xe_vma_read_only(vma);
+ ctx.devmem_possible = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
+
+ for_each_tile(tile, vm->xe, id)
+ tile_mask |= 0x1 << id;
+
+ xa_init_flags(&op->prefetch_range.range, XA_FLAGS_ALLOC);
+ op->prefetch_range.region = prefetch_region;
+ op->prefetch_range.ranges_count = 0;
+alloc_next_range:
+ svm_range = xe_svm_range_find_or_insert(vm, addr, vma, &ctx);
+
+ if (PTR_ERR(svm_range) == -ENOENT) {
+ u64 ret = xe_svm_find_vma_start(vm, addr, range_end, vma);
+
+ addr = ret == ULONG_MAX ? 0 : ret;
+ if (addr)
+ goto alloc_next_range;
+ else
+ goto print_op_label;
+ }
+
+ if (IS_ERR(svm_range)) {
+ err = PTR_ERR(svm_range);
+ goto unwind_prefetch_ops;
+ }
+
+ if (xe_svm_range_validate(vm, svm_range, tile_mask, !!prefetch_region)) {
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE IS VALID");
+ goto check_next_range;
+ }
+
+ err = xa_alloc(&op->prefetch_range.range,
+ &i, svm_range, xa_limit_32b,
+ GFP_KERNEL);
+ if (err)
+ goto unwind_prefetch_ops;
+
+ op->prefetch_range.ranges_count++;
+ vops->flags |= XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH;
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE CREATED");
+check_next_range:
+ if (range_end > xe_svm_range_end(svm_range) &&
+ xe_svm_range_end(svm_range) < xe_vma_end(vma)) {
+ addr = xe_svm_range_end(svm_range);
+ goto alloc_next_range;
+ }
+ }
+print_op_label:
print_op(vm->xe, __op);
}
return ops;
+
+unwind_prefetch_ops:
+ xe_svm_prefetch_gpuva_ops_fini(ops);
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return ERR_PTR(err);
}
+
ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
@@ -2498,7 +2634,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
!op->map.is_cpu_addr_mirror) ||
op->map.invalidate_on_bind)
xe_vma_ops_incr_pt_update_ops(vops,
- op->tile_mask);
+ op->tile_mask, 1);
break;
}
case DRM_GPUVA_OP_REMAP:
@@ -2507,6 +2643,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
gpuva_to_vma(op->base.remap.unmap->va);
bool skip = xe_vma_is_cpu_addr_mirror(old);
u64 start = xe_vma_start(old), end = xe_vma_end(old);
+ int num_remap_ops = 0;
if (op->base.remap.prev)
start = op->base.remap.prev->va.addr +
@@ -2559,7 +2696,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
(ULL)op->remap.start,
(ULL)op->remap.range);
} else {
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ num_remap_ops++;
}
}
@@ -2588,11 +2725,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
(ULL)op->remap.start,
(ULL)op->remap.range);
} else {
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ num_remap_ops++;
}
}
if (!skip)
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ num_remap_ops++;
+
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, num_remap_ops);
break;
}
case DRM_GPUVA_OP_UNMAP:
@@ -2604,7 +2743,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return -EBUSY;
if (!xe_vma_is_cpu_addr_mirror(vma))
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
break;
case DRM_GPUVA_OP_PREFETCH:
vma = gpuva_to_vma(op->base.prefetch.va);
@@ -2615,8 +2754,12 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return err;
}
- if (!xe_vma_is_cpu_addr_mirror(vma))
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask,
+ op->prefetch_range.ranges_count);
+ else
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
+
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
@@ -2742,6 +2885,57 @@ static int check_ufence(struct xe_vma *vma)
return 0;
}
+static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
+{
+ bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ int err = 0;
+
+ struct xe_svm_range *svm_range;
+ struct drm_gpusvm_ctx ctx = {};
+ struct xe_tile *tile;
+ unsigned long i;
+ u32 region;
+
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ return 0;
+
+ region = op->prefetch_range.region;
+
+ ctx.read_only = xe_vma_read_only(vma);
+ ctx.devmem_possible = devmem_possible;
+ ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
+
+ /* TODO: Threading the migration */
+ xa_for_each(&op->prefetch_range.range, i, svm_range) {
+ if (!region)
+ xe_svm_range_migrate_to_smem(vm, svm_range);
+
+ if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) {
+ tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
+ err = xe_svm_alloc_vram(tile, svm_range, &ctx);
+ if (err) {
+ drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ return -ENODATA;
+ }
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE MIGRATED TO VRAM");
+ }
+
+ err = xe_svm_range_get_pages(vm, svm_range, &ctx);
+ if (err) {
+ drm_dbg(&vm->xe->drm, "Get pages failed, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM)
+ err = -ENODATA;
+ return err;
+ }
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE GET PAGES DONE");
+ }
+
+ return err;
+}
+
static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
struct xe_vma_op *op)
{
@@ -2779,7 +2973,12 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
case DRM_GPUVA_OP_PREFETCH:
{
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
- u32 region = op->prefetch.region;
+ u32 region;
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ region = op->prefetch_range.region;
+ else
+ region = op->prefetch.region;
xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
@@ -2798,6 +2997,25 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
return err;
}
+static int vm_bind_ioctl_ops_prefetch_ranges(struct xe_vm *vm, struct xe_vma_ops *vops)
+{
+ struct xe_vma_op *op;
+ int err;
+
+ if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH))
+ return 0;
+
+ list_for_each_entry(op, &vops->list, link) {
+ if (op->base.op == DRM_GPUVA_OP_PREFETCH) {
+ err = prefetch_ranges(vm, op);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
struct xe_vm *vm,
struct xe_vma_ops *vops)
@@ -3239,6 +3457,7 @@ static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
vops->q = q;
vops->syncs = syncs;
vops->num_syncs = num_syncs;
+ vops->flags = 0;
}
static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
@@ -3247,9 +3466,9 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
{
u16 coh_mode;
- if (XE_IOCTL_DBG(xe, range > bo->size) ||
+ if (XE_IOCTL_DBG(xe, range > xe_bo_size(bo)) ||
XE_IOCTL_DBG(xe, obj_offset >
- bo->size - range)) {
+ xe_bo_size(bo) - range)) {
return -EINVAL;
}
@@ -3446,7 +3665,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
u16 pat_index = bind_ops[i].pat_index;
- ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
+ ops[i] = vm_bind_ioctl_ops_create(vm, &vops, bos[i], obj_offset,
addr, range, op, flags,
prefetch_region, pat_index);
if (IS_ERR(ops[i])) {
@@ -3479,6 +3698,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (err)
goto unwind_ops;
+ err = vm_bind_ioctl_ops_prefetch_ranges(vm, &vops);
+ if (err)
+ goto unwind_ops;
+
fence = vm_bind_ioctl_ops_execute(vm, &vops);
if (IS_ERR(fence))
err = PTR_ERR(fence);
@@ -3548,7 +3771,7 @@ struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
xe_vma_ops_init(&vops, vm, q, NULL, 0);
- ops = vm_bind_ioctl_ops_create(vm, bo, 0, addr, bo->size,
+ ops = vm_bind_ioctl_ops_create(vm, &vops, bo, 0, addr, xe_bo_size(bo),
DRM_XE_VM_BIND_OP_MAP, 0, 0,
vm->xe->pat.idx[cache_lvl]);
if (IS_ERR(ops)) {
@@ -3620,6 +3843,68 @@ void xe_vm_unlock(struct xe_vm *vm)
}
/**
+ * xe_vm_range_tilemask_tlb_invalidation - Issue a TLB invalidation on this tilemask for an
+ * address range
+ * @vm: The VM
+ * @start: start address
+ * @end: end address
+ * @tile_mask: mask for which gt's issue tlb invalidation
+ *
+ * Issue a range based TLB invalidation for gt's in tilemask
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask)
+{
+ struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
+ struct xe_tile *tile;
+ u32 fence_id = 0;
+ u8 id;
+ int err;
+
+ if (!tile_mask)
+ return 0;
+
+ for_each_tile(tile, vm->xe, id) {
+ if (tile_mask & BIT(id)) {
+ xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->primary_gt,
+ &fence[fence_id],
+ start,
+ end,
+ vm->usm.asid);
+ if (err)
+ goto wait;
+ ++fence_id;
+
+ if (!tile->media_gt)
+ continue;
+
+ xe_gt_tlb_invalidation_fence_init(tile->media_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->media_gt,
+ &fence[fence_id],
+ start,
+ end,
+ vm->usm.asid);
+ if (err)
+ goto wait;
+ ++fence_id;
+ }
+ }
+
+wait:
+ for (id = 0; id < fence_id; ++id)
+ xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+
+ return err;
+}
+
+/**
* xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
* @vma: VMA to invalidate
*
@@ -3632,28 +3917,34 @@ void xe_vm_unlock(struct xe_vm *vm)
int xe_vm_invalidate_vma(struct xe_vma *vma)
{
struct xe_device *xe = xe_vma_vm(vma)->xe;
+ struct xe_vm *vm = xe_vma_vm(vma);
struct xe_tile *tile;
- struct xe_gt_tlb_invalidation_fence
- fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
- u8 id;
- u32 fence_id = 0;
+ u8 tile_mask = 0;
int ret = 0;
+ u8 id;
xe_assert(xe, !xe_vma_is_null(vma));
xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
trace_xe_vma_invalidate(vma);
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ vm_dbg(&vm->xe->drm,
"INVALIDATE: addr=0x%016llx, range=0x%016llx",
xe_vma_start(vma), xe_vma_size(vma));
- /* Check that we don't race with page-table updates */
+ /*
+ * Check that we don't race with page-table updates, tile_invalidated
+ * update is safe
+ */
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (xe_vma_is_userptr(vma)) {
+ lockdep_assert(lockdep_is_held_type(&vm->userptr.notifier_lock, 0) ||
+ (lockdep_is_held_type(&vm->userptr.notifier_lock, 1) &&
+ lockdep_is_held(&xe_vm_resv(vm)->lock.base)));
+
WARN_ON_ONCE(!mmu_interval_check_retry
(&to_userptr_vma(vma)->userptr.notifier,
to_userptr_vma(vma)->userptr.notifier_seq));
- WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
+ WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(vm),
DMA_RESV_USAGE_BOOKKEEP));
} else {
@@ -3661,39 +3952,17 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
}
}
- for_each_tile(tile, xe, id) {
- if (xe_pt_zap_ptes(tile, vma)) {
- xe_device_wmb(xe);
- xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
- &fence[fence_id],
- true);
-
- ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
- &fence[fence_id], vma);
- if (ret)
- goto wait;
- ++fence_id;
-
- if (!tile->media_gt)
- continue;
-
- xe_gt_tlb_invalidation_fence_init(tile->media_gt,
- &fence[fence_id],
- true);
+ for_each_tile(tile, xe, id)
+ if (xe_pt_zap_ptes(tile, vma))
+ tile_mask |= BIT(id);
- ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
- &fence[fence_id], vma);
- if (ret)
- goto wait;
- ++fence_id;
- }
- }
+ xe_device_wmb(xe);
-wait:
- for (id = 0; id < fence_id; ++id)
- xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+ ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
+ xe_vma_end(vma), tile_mask);
- vma->tile_invalidated = vma->tile_mask;
+ /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+ WRITE_ONCE(vma->tile_invalidated, vma->tile_mask);
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 494af6bdc646..3475a118f666 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -169,6 +169,8 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
!xe_vma_is_cpu_addr_mirror(vma);
}
+struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
+
/**
* to_userptr_vma() - Return a pointer to an embedding userptr vma
* @vma: Pointer to the embedded struct xe_vma
@@ -226,6 +228,9 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
struct xe_svm_range *range);
+int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask);
+
int xe_vm_invalidate_vma(struct xe_vma *vma);
int xe_vm_validate_protected(struct xe_vm *vm);
@@ -370,6 +375,25 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm)
return false;
}
+/**
+ * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has
+ * a valid GPU mapping
+ * @tile: The tile which the GPU mapping belongs to
+ * @tile_present: Tile present mask
+ * @tile_invalidated: Tile invalidated mask
+ *
+ * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths
+ * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable
+ * without the notifier lock in userptr or SVM cases, and not reliable without
+ * the BO dma-resv lock in the BO case. As such, they should only be used in
+ * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB
+ * invalidation) where it is harmless.
+ *
+ * Return: True is there are valid GPU pages, False otherwise
+ */
+#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \
+ ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
+
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
#else
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 1979e9bdbdf3..bed6088e1bb3 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -100,14 +100,21 @@ struct xe_vma {
struct work_struct destroy_work;
};
- /** @tile_invalidated: VMA has been invalidated */
+ /**
+ * @tile_invalidated: Tile mask of binding are invalidated for this VMA.
+ * protected by BO's resv and for userptrs, vm->userptr.notifier_lock in
+ * write mode for writing or vm->userptr.notifier_lock in read mode and
+ * the vm->resv. For stable reading, BO's resv or userptr
+ * vm->userptr.notifier_lock in read mode is required. Can be
+ * opportunistically read with READ_ONCE outside of locks.
+ */
u8 tile_invalidated;
/** @tile_mask: Tile mask of where to create binding for this VMA */
u8 tile_mask;
/**
- * @tile_present: GT mask of binding are present for this VMA.
+ * @tile_present: Tile mask of binding are present for this VMA.
* protected by vm->lock, vm->resv and for userptrs,
* vm->userptr.notifier_lock for writing. Needs either for reading,
* but if reading is done under the vm->lock only, it needs to be held
@@ -382,6 +389,16 @@ struct xe_vma_op_unmap_range {
struct xe_svm_range *range;
};
+/** struct xe_vma_op_prefetch_range - VMA prefetch range operation */
+struct xe_vma_op_prefetch_range {
+ /** @range: xarray for SVM ranges data */
+ struct xarray range;
+ /** @ranges_count: number of svm ranges to map */
+ u32 ranges_count;
+ /** @region: memory region to prefetch to */
+ u32 region;
+};
+
/** enum xe_vma_op_flags - flags for VMA operation */
enum xe_vma_op_flags {
/** @XE_VMA_OP_COMMITTED: VMA operation committed */
@@ -424,6 +441,8 @@ struct xe_vma_op {
struct xe_vma_op_map_range map_range;
/** @unmap_range: VMA unmap range operation specific data */
struct xe_vma_op_unmap_range unmap_range;
+ /** @prefetch_range: VMA prefetch range operation specific data */
+ struct xe_vma_op_prefetch_range prefetch_range;
};
};
@@ -441,6 +460,9 @@ struct xe_vma_ops {
u32 num_syncs;
/** @pt_update_ops: page table update operations */
struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
+ /** @flag: signify the properties within xe_vma_ops*/
+#define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0)
+ u32 flags;
#ifdef TEST_VM_OPS_ERROR
/** @inject_error: inject error to test error handling */
bool inject_error;
diff --git a/drivers/gpu/drm/xe/xe_vsec.c b/drivers/gpu/drm/xe/xe_vsec.c
index 56930ad42962..8f23a27871b6 100644
--- a/drivers/gpu/drm/xe/xe_vsec.c
+++ b/drivers/gpu/drm/xe/xe_vsec.c
@@ -141,8 +141,8 @@ static int xe_guid_decode(u32 guid, int *index, u32 *offset)
return 0;
}
-static int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset,
- u32 count)
+int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset,
+ u32 count)
{
struct xe_device *xe = pdev_to_xe_device(pdev);
void __iomem *telem_addr = xe->mmio.regs + BMG_TELEMETRY_OFFSET;
diff --git a/drivers/gpu/drm/xe/xe_vsec.h b/drivers/gpu/drm/xe/xe_vsec.h
index 5777c53faec2..dabfb4e02d70 100644
--- a/drivers/gpu/drm/xe/xe_vsec.h
+++ b/drivers/gpu/drm/xe/xe_vsec.h
@@ -4,8 +4,12 @@
#ifndef _XE_VSEC_H_
#define _XE_VSEC_H_
+#include <linux/types.h>
+
+struct pci_dev;
struct xe_device;
void xe_vsec_init(struct xe_device *xe);
+int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset, u32 count);
#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 67196baa4249..22a98600fd8f 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -10,6 +10,7 @@
#include <linux/compiler_types.h>
#include <linux/fault-inject.h>
+#include <generated/xe_device_wa_oob.h>
#include <generated/xe_wa_oob.h>
#include "regs/xe_engine_regs.h"
@@ -285,6 +286,18 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)),
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
+ { XE_RTP_NAME("16021865536"),
+ XE_RTP_RULES(MEDIA_VERSION(3002),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("16021867713"),
+ XE_RTP_RULES(MEDIA_VERSION(3002),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
{ XE_RTP_NAME("14021486841"),
XE_RTP_RULES(MEDIA_VERSION(3000), MEDIA_STEP(A0, B0),
ENGINE_CLASS(VIDEO_DECODE)),
@@ -503,10 +516,6 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, ENABLE_SMP_LD_RENDER_SURFACE_CONTROL))
},
- { XE_RTP_NAME("16018737384"),
- XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS))
- },
/*
* These two workarounds are the same, just applying to different
* engines. Although Wa_18032095049 (for the RCS) isn't required on
@@ -533,31 +542,38 @@ static const struct xe_rtp_entry_sr engine_was[] = {
/* Xe2_HPG */
{ XE_RTP_NAME("16018712365"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, XE2_ALLOC_DPA_STARVE_FIX_DIS))
},
{ XE_RTP_NAME("16018737384"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS))
},
{ XE_RTP_NAME("14019988906"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD))
},
{ XE_RTP_NAME("14019877138"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT))
},
{ XE_RTP_NAME("14020338487"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN3, XE2_EUPEND_CHK_FLUSH_DIS))
},
{ XE_RTP_NAME("18032247524"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, SEQUENTIAL_ACCESS_UPGRADE_DISABLE))
},
{ XE_RTP_NAME("14018471104"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, ENABLE_SMP_LD_RENDER_SURFACE_CONTROL))
},
/*
@@ -566,7 +582,7 @@ static const struct xe_rtp_entry_sr engine_was[] = {
* apply this to all engines for simplicity.
*/
{ XE_RTP_NAME("16021639441"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002)),
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0),
GHWSP_CSB_REPORT_DIS |
PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS,
@@ -578,11 +594,12 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, WR_REQ_CHAINING_DIS))
},
{ XE_RTP_NAME("14021402888"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
},
- { XE_RTP_NAME("14021821874"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ { XE_RTP_NAME("14021821874, 14022954250"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, STK_ID_RESTRICT))
},
@@ -640,6 +657,10 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTIONS(SET(RING_PSMI_CTL(0), RC_SEMA_IDLE_MSG_DISABLE,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
+ { XE_RTP_NAME("14021402888"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3003), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
+ },
};
static const struct xe_rtp_entry_sr lrc_was[] = {
@@ -774,7 +795,7 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_ACTIONS(SET(INSTPM(RENDER_RING_BASE), ENABLE_SEMAPHORE_POLL_BIT))
},
{ XE_RTP_NAME("18033852989"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2004), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN1, DISABLE_BOTTOM_CLIP_RECTANGLE_TEST))
},
{ XE_RTP_NAME("14021567978"),
@@ -807,7 +828,7 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN))
},
{ XE_RTP_NAME("14019386621"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(VF_SCRATCHPAD, XE2_VFG_TED_CREDIT_INTERFACE_DISABLE))
},
{ XE_RTP_NAME("14020756599"),
@@ -824,13 +845,17 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_AUTOSTRIP))
},
{ XE_RTP_NAME("15016589081"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
},
{ XE_RTP_NAME("22021007897"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
},
+ { XE_RTP_NAME("18033852989"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN1, DISABLE_BOTTOM_CLIP_RECTANGLE_TEST))
+ },
/* Xe3_LPG */
{ XE_RTP_NAME("14021490052"),
@@ -852,9 +877,34 @@ static __maybe_unused const struct xe_rtp_entry oob_was[] = {
static_assert(ARRAY_SIZE(oob_was) - 1 == _XE_WA_OOB_COUNT);
+static __maybe_unused const struct xe_rtp_entry device_oob_was[] = {
+#include <generated/xe_device_wa_oob.c>
+ {}
+};
+
+static_assert(ARRAY_SIZE(device_oob_was) - 1 == _XE_DEVICE_WA_OOB_COUNT);
+
__diag_pop();
/**
+ * xe_wa_process_device_oob - process OOB workaround table
+ * @xe: device instance to process workarounds for
+ *
+ * process OOB workaround table for this device, marking in @xe the
+ * workarounds that are active.
+ */
+
+void xe_wa_process_device_oob(struct xe_device *xe)
+{
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(xe);
+
+ xe_rtp_process_ctx_enable_active_tracking(&ctx, xe->wa_active.oob, ARRAY_SIZE(device_oob_was));
+
+ xe->wa_active.oob_initialized = true;
+ xe_rtp_process(&ctx, device_oob_was);
+}
+
+/**
* xe_wa_process_oob - process OOB workaround table
* @gt: GT instance to process workarounds for
*
@@ -923,6 +973,28 @@ void xe_wa_process_lrc(struct xe_hw_engine *hwe)
}
/**
+ * xe_wa_device_init - initialize device with workaround oob bookkeeping
+ * @xe: Xe device instance to initialize
+ *
+ * Returns 0 for success, negative with error code otherwise
+ */
+int xe_wa_device_init(struct xe_device *xe)
+{
+ unsigned long *p;
+
+ p = drmm_kzalloc(&xe->drm,
+ sizeof(*p) * BITS_TO_LONGS(ARRAY_SIZE(device_oob_was)),
+ GFP_KERNEL);
+
+ if (!p)
+ return -ENOMEM;
+
+ xe->wa_active.oob = p;
+
+ return 0;
+}
+
+/**
* xe_wa_init - initialize gt with workaround bookkeeping
* @gt: GT instance to initialize
*
@@ -956,6 +1028,16 @@ int xe_wa_init(struct xe_gt *gt)
}
ALLOW_ERROR_INJECTION(xe_wa_init, ERRNO); /* See xe_pci_probe() */
+void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p)
+{
+ size_t idx;
+
+ drm_printf(p, "Device OOB Workarounds\n");
+ for_each_set_bit(idx, xe->wa_active.oob, ARRAY_SIZE(device_oob_was))
+ if (device_oob_was[idx].name)
+ drm_printf_indent(p, 1, "%s\n", device_oob_was[idx].name);
+}
+
void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p)
{
size_t idx;
diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h
index 52337405b5bc..f3880c65cb8d 100644
--- a/drivers/gpu/drm/xe/xe_wa.h
+++ b/drivers/gpu/drm/xe/xe_wa.h
@@ -13,17 +13,19 @@ struct xe_gt;
struct xe_hw_engine;
struct xe_tile;
+int xe_wa_device_init(struct xe_device *xe);
int xe_wa_init(struct xe_gt *gt);
+void xe_wa_process_device_oob(struct xe_device *xe);
void xe_wa_process_oob(struct xe_gt *gt);
void xe_wa_process_gt(struct xe_gt *gt);
void xe_wa_process_engine(struct xe_hw_engine *hwe);
void xe_wa_process_lrc(struct xe_hw_engine *hwe);
void xe_wa_apply_tile_workarounds(struct xe_tile *tile);
+void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p);
void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
/**
- * XE_WA - Out-of-band workarounds, that don't fit the lifecycle any
- * other more specific type
+ * XE_WA - Out-of-band workarounds, to be queried and called as needed.
* @gt__: gt instance
* @id__: XE_OOB_<id__>, as generated by build system in generated/xe_wa_oob.h
*/
@@ -32,4 +34,20 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob); \
})
+/**
+ * XE_DEVICE_WA - Out-of-band Device workarounds, to be queried and called
+ * as needed.
+ * @xe__: xe_device
+ * @id__: XE_DEVICE_WA_OOB_<id__>, as generated by build system in generated/xe_device_wa_oob.h
+ */
+#define XE_DEVICE_WA(xe__, id__) ({ \
+ xe_assert(xe__, (xe__)->wa_active.oob_initialized); \
+ test_bit(XE_DEVICE_WA_OOB_ ## id__, (xe__)->wa_active.oob); \
+})
+
+#define XE_DEVICE_WA_DISABLE(xe__, id__) ({ \
+ xe_assert(xe__, (xe__)->wa_active.oob_initialized); \
+ clear_bit(XE_DEVICE_WA_OOB_ ## id__, (xe__)->wa_active.oob); \
+})
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 6d70109fcc43..e990f20eccfe 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -31,12 +31,14 @@
GRAPHICS_VERSION(2004)
13011645652 GRAPHICS_VERSION(2004)
GRAPHICS_VERSION(3001)
-14022293748 GRAPHICS_VERSION(2001)
+14022293748 GRAPHICS_VERSION_RANGE(2001, 2002)
GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
-22019794406 GRAPHICS_VERSION(2001)
+ GRAPHICS_VERSION(3003)
+22019794406 GRAPHICS_VERSION_RANGE(2001, 2002)
GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
+ GRAPHICS_VERSION(3003)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
@@ -58,9 +60,15 @@ no_media_l3 MEDIA_VERSION(3000)
GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0)
16023105232 GRAPHICS_VERSION_RANGE(2001, 3001)
MEDIA_VERSION_RANGE(1301, 3000)
+ MEDIA_VERSION(3002)
+ GRAPHICS_VERSION(3003)
16026508708 GRAPHICS_VERSION_RANGE(1200, 3001)
MEDIA_VERSION_RANGE(1300, 3000)
+ MEDIA_VERSION(3002)
+ GRAPHICS_VERSION(3003)
# SoC workaround - currently applies to all platforms with the following
# primary GT GMDID
14022085890 GRAPHICS_VERSION(2001)
+
+15015404425_disable PLATFORM(PANTHERLAKE), MEDIA_STEP(B0, FOREVER)
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index dfa78a49a6d9..806ec66ee7f7 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -54,6 +54,7 @@ static const struct drm_framebuffer_funcs fb_funcs = {
static struct drm_framebuffer *
fb_create(struct drm_device *dev, struct drm_file *filp,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct xen_drm_front_drm_info *drm_info = dev->dev_private;
@@ -61,7 +62,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp,
struct drm_gem_object *gem_obj;
int ret;
- fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
+ fb = drm_gem_fb_create_with_funcs(dev, filp, info, mode_cmd, &fb_funcs);
if (IS_ERR(fb))
return fb;
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 197defe4f928..34ddbf98e81d 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1720,7 +1720,8 @@ disconnected:
return connector_status_disconnected;
}
-static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status
+zynqmp_dp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
@@ -2409,9 +2410,9 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
struct zynqmp_dp *dp;
int ret;
- dp = kzalloc(sizeof(*dp), GFP_KERNEL);
- if (!dp)
- return -ENOMEM;
+ dp = devm_drm_bridge_alloc(&pdev->dev, struct zynqmp_dp, bridge, &zynqmp_dp_bridge_funcs);
+ if (IS_ERR(dp))
+ return PTR_ERR(dp);
dp->dev = &pdev->dev;
dp->dpsub = dpsub;
@@ -2424,31 +2425,25 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
/* Acquire all resources (IOMEM, IRQ and PHYs). */
dp->iomem = devm_platform_ioremap_resource_byname(pdev, "dp");
- if (IS_ERR(dp->iomem)) {
- ret = PTR_ERR(dp->iomem);
- goto err_free;
- }
+ if (IS_ERR(dp->iomem))
+ return PTR_ERR(dp->iomem);
dp->irq = platform_get_irq(pdev, 0);
- if (dp->irq < 0) {
- ret = dp->irq;
- goto err_free;
- }
+ if (dp->irq < 0)
+ return dp->irq;
dp->reset = devm_reset_control_get(dp->dev, NULL);
- if (IS_ERR(dp->reset)) {
- ret = dev_err_probe(dp->dev, PTR_ERR(dp->reset),
+ if (IS_ERR(dp->reset))
+ return dev_err_probe(dp->dev, PTR_ERR(dp->reset),
"failed to get reset\n");
- goto err_free;
- }
ret = zynqmp_dp_reset(dp, true);
if (ret < 0)
- goto err_free;
+ return ret;
ret = zynqmp_dp_reset(dp, false);
if (ret < 0)
- goto err_free;
+ return ret;
ret = zynqmp_dp_phy_probe(dp);
if (ret)
@@ -2456,7 +2451,6 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
/* Initialize the bridge. */
bridge = &dp->bridge;
- bridge->funcs = &zynqmp_dp_bridge_funcs;
bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
bridge->type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -2509,8 +2503,6 @@ err_phy_exit:
zynqmp_dp_phy_exit(dp);
err_reset:
zynqmp_dp_reset(dp, true);
-err_free:
- kfree(dp);
return ret;
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index 3a9544b97bc5..2764c4b17c5e 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -180,7 +180,6 @@ static int zynqmp_dpsub_parse_dt(struct zynqmp_dpsub *dpsub)
void zynqmp_dpsub_release(struct zynqmp_dpsub *dpsub)
{
kfree(dpsub->disp);
- kfree(dpsub->dp);
kfree(dpsub);
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index b47463473472..2bee0a2275ed 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -373,6 +373,7 @@ static int zynqmp_dpsub_dumb_create(struct drm_file *file_priv,
static struct drm_framebuffer *
zynqmp_dpsub_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(drm);
@@ -383,7 +384,7 @@ zynqmp_dpsub_fb_create(struct drm_device *drm, struct drm_file *file_priv,
for (i = 0; i < ARRAY_SIZE(cmd.pitches); ++i)
cmd.pitches[i] = ALIGN(cmd.pitches[i], dpsub->dma_align);
- return drm_gem_fb_create(drm, file_priv, &cmd);
+ return drm_gem_fb_create(drm, file_priv, info, &cmd);
}
static const struct drm_mode_config_funcs zynqmp_dpsub_mode_config_funcs = {