summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c430
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c262
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c222
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c197
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c388
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c422
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c147
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c640
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c174
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c347
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sid.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v1_0.c839
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v1_0.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c85
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c506
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c768
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c209
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c47
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c59
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c39
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c95
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c286
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c1123
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c2853
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h581
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c132
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile141
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/Makefile140
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h)18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h)23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h)11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h)7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c)123
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h)45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c)477
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h)76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dsc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h136
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c147
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c144
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c1412
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h104
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h1446
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h121
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h130
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_service.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c173
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c143
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c131
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h32
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h719
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c50
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c40
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c17
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c33
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h16
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c11
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c13
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c87
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c61
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c122
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h6
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h148
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c33
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h5
-rw-r--r--drivers/gpu/drm/amd/include/amd_cper.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h10
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h125
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c40
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c37
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h5
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c33
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h557
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c101
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c249
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c326
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h165
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h67
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h1
-rw-r--r--drivers/gpu/drm/amd/ras/Makefile34
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/Makefile33
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c285
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h54
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c182
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h27
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c648
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h83
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c94
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c125
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h30
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c190
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h41
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c279
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h110
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/Makefile44
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras.h370
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca.c672
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca.h164
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c379
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h71
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cmd.c522
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cmd.h426
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_core.c603
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cper.c315
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cper.h304
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c1339
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h197
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx.c70
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx.h43
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c426
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h259
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c317
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h93
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1.c81
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1.h50
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c105
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio.c96
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio.h46
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c123
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h31
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_process.c322
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_process.h53
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp.c750
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp.h145
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c46
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h31
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h231
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc.c707
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc.h166
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c511
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h314
505 files changed, 32772 insertions, 4197 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 1acfed2f92ef..7f515be5185d 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -43,14 +43,16 @@ config DRM_AMDGPU_SI
bool "Enable amdgpu support for SI parts"
depends on DRM_AMDGPU
help
- Choose this option if you want to enable experimental support
+ Choose this option if you want to enable support
for SI (Southern Islands) asics.
- SI is already supported in radeon. Experimental support for SI
- in amdgpu will be disabled by default and is still provided by
- radeon. Use module options to override this:
+ SI (Southern Islands) are first generation GCN GPUs,
+ supported by both drivers: radeon (old) and amdgpu (new).
+ By default, SI dedicated GPUs are supported by amdgpu.
- radeon.si_support=0 amdgpu.si_support=1
+ Use module options to override this:
+ To use radeon for SI,
+ radeon.si_support=1 amdgpu.si_support=0
config DRM_AMDGPU_CIK
bool "Enable amdgpu support for CIK parts"
@@ -59,11 +61,17 @@ config DRM_AMDGPU_CIK
Choose this option if you want to enable support for CIK (Sea
Islands) asics.
- CIK is already supported in radeon. Support for CIK in amdgpu
- will be disabled by default and is still provided by radeon.
- Use module options to override this:
+ CIK (Sea Islands) are second generation GCN GPUs,
+ supported by both drivers: radeon (old) and amdgpu (new).
+ By default,
+ CIK dedicated GPUs are supported by amdgpu
+ CIK APUs are supported by radeon
+ Use module options to override this:
+ To use amdgpu for CIK,
radeon.cik_support=0 amdgpu.cik_support=1
+ To use radeon for CIK,
+ radeon.cik_support=1 amdgpu.cik_support=0
config DRM_AMDGPU_USERPTR
bool "Always enable userptr write support"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 64e7acff8f18..c88760fb52ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -37,7 +37,8 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_DISPLAY_PATH)/modules/inc \
-I$(FULL_AMD_DISPLAY_PATH)/dc \
-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
- -I$(FULL_AMD_PATH)/amdkfd
+ -I$(FULL_AMD_PATH)/amdkfd \
+ -I$(FULL_AMD_PATH)/ras/ras_mgr
# Locally disable W=1 warnings enabled in drm subsystem Makefile
subdir-ccflags-y += -Wno-override-init
@@ -77,7 +78,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o \
dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o \
- uvd_v3_1.o
+ uvd_v3_1.o vce_v1_0.o
amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
@@ -324,4 +325,9 @@ amdgpu-y += \
isp_v4_1_1.o
endif
+AMD_GPU_RAS_PATH := ../ras
+AMD_GPU_RAS_FULL_PATH := $(FULL_AMD_PATH)/ras
+include $(AMD_GPU_RAS_FULL_PATH)/Makefile
+amdgpu-y += $(AMD_GPU_RAS_FILES)
+
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index 9569dc16dd3d..daa7b23bc775 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -88,6 +88,10 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
uint32_t ip_block;
int r, i;
+ /* Skip suspend of SDMA IP versions >= 4.4.2. They are multi-aid */
+ if (adev->aid_mask)
+ ip_block_mask &= ~BIT(AMD_IP_BLOCK_TYPE_SDMA);
+
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2a0df4cabb99..9f9774f58ce1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -372,13 +372,15 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags);
int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
+bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block);
int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block);
-#define AMDGPU_MAX_IP_NUM 16
+#define AMDGPU_MAX_IP_NUM AMD_IP_BLOCK_TYPE_NUM
struct amdgpu_ip_block_status {
bool valid;
@@ -839,8 +841,6 @@ struct amd_powerplay {
const struct amd_pm_funcs *pp_funcs;
};
-struct ip_discovery_top;
-
/* polaris10 kickers */
#define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \
((rid == 0xE3) || \
@@ -972,8 +972,7 @@ struct amdgpu_device {
struct notifier_block acpi_nb;
struct notifier_block pm_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
- struct debugfs_blob_wrapper debugfs_vbios_blob;
- struct debugfs_blob_wrapper debugfs_discovery_blob;
+ struct debugfs_blob_wrapper debugfs_vbios_blob;
struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrent access to GRBM index */
struct mutex grbm_idx_mutex;
@@ -1063,6 +1062,9 @@ struct amdgpu_device {
u32 log2_max_MBps;
} mm_stats;
+ /* discovery*/
+ struct amdgpu_discovery_info discovery;
+
/* display */
bool enable_virtual_display;
struct amdgpu_vkms_output *amdgpu_vkms_output;
@@ -1174,6 +1176,12 @@ struct amdgpu_device {
* queue fence.
*/
struct xarray userq_xa;
+ /**
+ * @userq_doorbell_xa: Global user queue map (doorbell index → queue)
+ * Key: doorbell_index (unique global identifier for the queue)
+ * Value: struct amdgpu_usermode_queue
+ */
+ struct xarray userq_doorbell_xa;
/* df */
struct amdgpu_df df;
@@ -1265,8 +1273,6 @@ struct amdgpu_device {
struct list_head ras_list;
- struct ip_discovery_top *ip_top;
-
struct amdgpu_reset_domain *reset_domain;
struct mutex benchmark_mutex;
@@ -1290,6 +1296,7 @@ struct amdgpu_device {
bool debug_disable_gpu_ring_reset;
bool debug_vm_userptr;
bool debug_disable_ce_logs;
+ bool debug_enable_ce_cs;
/* Protection for the following isolation structure */
struct mutex enforce_isolation_mutex;
@@ -1308,9 +1315,8 @@ struct amdgpu_device {
*/
bool apu_prefer_gtt;
- struct list_head userq_mgr_list;
- struct mutex userq_mutex;
bool userq_halt_for_enforce_isolation;
+ struct work_struct userq_reset_work;
struct amdgpu_uid *uid_info;
/* KFD
@@ -1534,11 +1540,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
-#define amdgpu_asic_flush_hdp(adev, r) \
- ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
-#define amdgpu_asic_invalidate_hdp(adev, r) \
- ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \
- ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0))
#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
@@ -1637,7 +1638,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
void amdgpu_driver_release_kms(struct drm_device *dev);
-int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_prepare(struct drm_device *dev);
void amdgpu_device_complete(struct drm_device *dev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 4926996f94da..381ef205b0df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -302,17 +302,19 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
adev->acp.acp_res[2].end = adev->acp.acp_res[2].start;
adev->acp.acp_cell[0].name = "acp_audio_dma";
+ adev->acp.acp_cell[0].id = 0;
adev->acp.acp_cell[0].num_resources = 3;
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
adev->acp.acp_cell[1].name = "designware-i2s";
+ adev->acp.acp_cell[1].id = 1;
adev->acp.acp_cell[1].num_resources = 1;
adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
- r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 2);
+ r = mfd_add_devices(adev->acp.parent, 0, adev->acp.acp_cell, 2, NULL, 0, NULL);
if (r)
goto failure;
r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
@@ -410,30 +412,34 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
adev->acp.acp_cell[0].name = "acp_audio_dma";
+ adev->acp.acp_cell[0].id = 0;
adev->acp.acp_cell[0].num_resources = 5;
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
adev->acp.acp_cell[1].name = "designware-i2s";
+ adev->acp.acp_cell[1].id = 1;
adev->acp.acp_cell[1].num_resources = 1;
adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
adev->acp.acp_cell[2].name = "designware-i2s";
+ adev->acp.acp_cell[2].id = 2;
adev->acp.acp_cell[2].num_resources = 1;
adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
adev->acp.acp_cell[3].name = "designware-i2s";
+ adev->acp.acp_cell[3].id = 3;
adev->acp.acp_cell[3].num_resources = 1;
adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
- r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS);
+ r = mfd_add_devices(adev->acp.parent, 0, adev->acp.acp_cell, ACP_DEVS, NULL, 0, NULL);
if (r)
goto failure;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 6c62e27b9800..d31460a9e958 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -507,7 +507,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
pm_runtime_get_sync(adev_to_drm(adev)->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(adev_to_drm(adev));
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 9e120c934cc1..8bdfcde2029b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -71,7 +71,7 @@ struct kgd_mem {
struct mutex lock;
struct amdgpu_bo *bo;
struct dma_buf *dmabuf;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
struct list_head attachments;
/* protected by amdkfd_process_info.lock */
struct list_head validate_list;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 83020963dfde..b1c24c8fa686 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1057,7 +1057,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
struct amdkfd_process_info *process_info = mem->process_info;
struct amdgpu_bo *bo = mem->bo;
struct ttm_operation_ctx ctx = { true, false };
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
int ret = 0;
mutex_lock(&process_info->lock);
@@ -1089,8 +1089,15 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
return 0;
}
- ret = amdgpu_ttm_tt_get_user_pages(bo, &range);
+ range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!range)) {
+ ret = -ENOMEM;
+ goto unregister_out;
+ }
+
+ ret = amdgpu_ttm_tt_get_user_pages(bo, range);
if (ret) {
+ amdgpu_hmm_range_free(range);
if (ret == -EAGAIN)
pr_debug("Failed to get user pages, try again\n");
else
@@ -1113,7 +1120,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
amdgpu_bo_unreserve(bo);
release_out:
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
+ amdgpu_hmm_range_free(range);
unregister_out:
if (ret)
amdgpu_hmm_unregister(bo);
@@ -1267,6 +1274,10 @@ static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
(void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
+ /* VM entity stopped if process killed, don't clear freed pt bo */
+ if (!amdgpu_vm_ready(vm))
+ return 0;
+
(void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
(void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
@@ -1916,7 +1927,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
amdgpu_hmm_unregister(mem->bo);
mutex_lock(&process_info->notifier_lock);
- amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
+ amdgpu_hmm_range_free(mem->range);
mutex_unlock(&process_info->notifier_lock);
}
@@ -1954,9 +1965,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
*/
if (size) {
if (!is_imported &&
- (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
- (adev->apu_prefer_gtt &&
- mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
+ mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
*size = bo_size;
else
*size = 0;
@@ -2329,10 +2338,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
struct kfd_vm_fault_info *mem)
{
- if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
+ if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) {
*mem = *adev->gmc.vm_fault_info;
- mb(); /* make sure read happened */
- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
}
return 0;
}
@@ -2543,7 +2551,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
bo = mem->bo;
- amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
+ amdgpu_hmm_range_free(mem->range);
mem->range = NULL;
/* BO reservations and getting user pages (hmm_range_fault)
@@ -2567,9 +2575,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
}
}
+ mem->range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!mem->range))
+ return -ENOMEM;
/* Get updated user pages */
- ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range);
if (ret) {
+ amdgpu_hmm_range_free(mem->range);
+ mem->range = NULL;
pr_debug("Failed %d to get user pages\n", ret);
/* Return -EFAULT bad address error as success. It will
@@ -2742,8 +2755,8 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
continue;
/* Only check mem with hmm range associated */
- valid = amdgpu_ttm_tt_get_user_pages_done(
- mem->bo->tbo.ttm, mem->range);
+ valid = amdgpu_hmm_range_valid(mem->range);
+ amdgpu_hmm_range_free(mem->range);
mem->range = NULL;
if (!valid) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index c7d32fb216e4..636385c80f64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -181,19 +181,22 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
u8 frev, crev;
int usage_bytes = 0;
- if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
- if (frev == 2 && crev == 1) {
- fw_usage_v2_1 =
- (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
- amdgpu_atomfirmware_allocate_fb_v2_1(adev,
- fw_usage_v2_1,
- &usage_bytes);
- } else if (frev >= 2 && crev >= 2) {
- fw_usage_v2_2 =
- (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
- amdgpu_atomfirmware_allocate_fb_v2_2(adev,
- fw_usage_v2_2,
- &usage_bytes);
+ /* Skip atomfirmware allocation for SRIOV VFs when dynamic crit regn is enabled */
+ if (!(amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled)) {
+ if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
+ if (frev == 2 && crev == 1) {
+ fw_usage_v2_1 =
+ (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
+ amdgpu_atomfirmware_allocate_fb_v2_1(adev,
+ fw_usage_v2_1,
+ &usage_bytes);
+ } else if (frev >= 2 && crev >= 2) {
+ fw_usage_v2_2 =
+ (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
+ amdgpu_atomfirmware_allocate_fb_v2_2(adev,
+ fw_usage_v2_2,
+ &usage_bytes);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 00e96419fcda..35d04e69aec0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -96,13 +96,14 @@ void amdgpu_bios_release(struct amdgpu_device *adev)
* part of the system bios. On boot, the system bios puts a
* copy of the igp rom at the start of vram if a discrete card is
* present.
- * For SR-IOV, the vbios image is also put in VRAM in the VF.
+ * For SR-IOV, if dynamic critical region is not enabled,
+ * the vbios image is also put at the start of VRAM in the VF.
*/
static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
{
- uint8_t __iomem *bios;
+ uint8_t __iomem *bios = NULL;
resource_size_t vram_base;
- resource_size_t size = 256 * 1024; /* ??? */
+ u32 size = 256U * 1024U; /* ??? */
if (!(adev->flags & AMD_IS_APU))
if (amdgpu_device_need_post(adev))
@@ -114,18 +115,33 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
adev->bios = NULL;
vram_base = pci_resource_start(adev->pdev, 0);
- bios = ioremap_wc(vram_base, size);
- if (!bios)
- return false;
adev->bios = kmalloc(size, GFP_KERNEL);
- if (!adev->bios) {
- iounmap(bios);
+ if (!adev->bios)
return false;
+
+ /* For SRIOV with dynamic critical region is enabled,
+ * the vbios image is put at a dynamic offset of VRAM in the VF.
+ * If dynamic critical region is disabled, follow the existing logic as on baremetal.
+ */
+ if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
+ if (amdgpu_virt_get_dynamic_data_info(adev,
+ AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID, adev->bios, &size)) {
+ amdgpu_bios_release(adev);
+ return false;
+ }
+ } else {
+ bios = ioremap_wc(vram_base, size);
+ if (!bios) {
+ amdgpu_bios_release(adev);
+ return false;
+ }
+
+ memcpy_fromio(adev->bios, bios, size);
+ iounmap(bios);
}
+
adev->bios_size = size;
- memcpy_fromio(adev->bios, bios, size);
- iounmap(bios);
if (!check_atom_bios(adev, size)) {
amdgpu_bios_release(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index a716c9886c74..2b5e7c46a39d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -38,7 +38,7 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo *bo;
struct amdgpu_bo_va *bo_va;
uint32_t priority;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
bool user_invalidated;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 47e9bfba0642..9f96d568acf2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -734,10 +734,8 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -919,10 +917,8 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1146,10 +1142,8 @@ out:
amdgpu_connector_update_scratch_regs(connector, ret);
exit:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1486,10 +1480,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index ef996493115f..425a3e564360 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
index bcb97d245673..353421807387 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9cd7741d2254..ecdfe6cb36cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -29,7 +29,6 @@
#include <linux/pagemap.h>
#include <linux/sync_file.h>
#include <linux/dma-buf.h>
-#include <linux/hmm.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
@@ -41,6 +40,7 @@
#include "amdgpu_gmc.h"
#include "amdgpu_gem.h"
#include "amdgpu_ras.h"
+#include "amdgpu_hmm.h"
static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
struct amdgpu_device *adev,
@@ -364,6 +364,12 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
if (p->uf_bo && ring->funcs->no_user_fence)
return -EINVAL;
+ if (!p->adev->debug_enable_ce_cs &&
+ chunk_ib->flags & AMDGPU_IB_FLAG_CE) {
+ dev_err_ratelimited(p->adev->dev, "CE CS is blocked, use debug=0x400 to override\n");
+ return -EINVAL;
+ }
+
if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
@@ -702,7 +708,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
*/
const s64 us_upper_bound = 200000;
- if (!adev->mm_stats.log2_max_MBps) {
+ if ((!adev->mm_stats.log2_max_MBps) || !ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) {
*max_bytes = 0;
*max_vis_bytes = 0;
return;
@@ -885,12 +891,17 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
bool userpage_invalidated = false;
struct amdgpu_bo *bo = e->bo;
- r = amdgpu_ttm_tt_get_user_pages(bo, &e->range);
+ e->range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!e->range))
+ return -ENOMEM;
+
+ r = amdgpu_ttm_tt_get_user_pages(bo, e->range);
if (r)
goto out_free_user_pages;
for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
- if (bo->tbo.ttm->pages[i] != hmm_pfn_to_page(e->range->hmm_pfns[i])) {
+ if (bo->tbo.ttm->pages[i] !=
+ hmm_pfn_to_page(e->range->hmm_range.hmm_pfns[i])) {
userpage_invalidated = true;
break;
}
@@ -984,9 +995,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
out_free_user_pages:
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = e->bo;
-
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
+ amdgpu_hmm_range_free(e->range);
e->range = NULL;
}
mutex_unlock(&p->bo_list->bo_list_mutex);
@@ -1317,8 +1326,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
*/
r = 0;
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
- e->range);
+ r |= !amdgpu_hmm_range_valid(e->range);
+ amdgpu_hmm_range_free(e->range);
e->range = NULL;
}
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f5d5c45ddc0d..afedea02188d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -236,7 +236,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
r = amdgpu_xcp_select_scheds(adev, hw_ip, hw_prio, fpriv,
&num_scheds, &scheds);
if (r)
- goto cleanup_entity;
+ goto error_free_entity;
}
/* disable load balance if the hw engine retains context among dependent jobs */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a70651050acf..62d43b8cbe58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -129,7 +129,6 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
@@ -179,7 +178,6 @@ end:
if (pm_pg_lock)
mutex_unlock(&adev->pm.mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
@@ -255,7 +253,6 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off
if (rd->id.use_grbm) {
if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
(rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
mutex_unlock(&rd->lock);
@@ -310,7 +307,6 @@ end:
mutex_unlock(&rd->lock);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
@@ -446,7 +442,6 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (!x) {
@@ -557,7 +552,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -617,7 +611,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -676,7 +669,6 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -736,7 +728,6 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -795,7 +786,6 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -855,7 +845,6 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -1003,7 +992,6 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r) {
@@ -1094,7 +1082,6 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (!x) {
@@ -1192,7 +1179,6 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
while (size) {
@@ -1266,7 +1252,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1315,7 +1300,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1365,7 +1349,6 @@ static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1414,7 +1397,6 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1460,7 +1442,6 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1501,7 +1482,6 @@ static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *bu
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1701,7 +1681,6 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
up_write(&adev->reset_domain->sem);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -1721,7 +1700,6 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
*val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -1742,7 +1720,6 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
*val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -1762,7 +1739,6 @@ static int amdgpu_debugfs_benchmark(void *data, u64 val)
r = amdgpu_benchmark(adev, val);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return r;
@@ -1902,7 +1878,7 @@ no_preempt:
continue;
}
job = to_amdgpu_job(s_job);
- if (preempted && (&job->hw_fence.base) == fence)
+ if (preempted && (&job->hw_fence->base) == fence)
/* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED;
}
@@ -2014,7 +1990,6 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
ret = -EINVAL;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
@@ -2123,10 +2098,9 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
debugfs_create_blob("amdgpu_vbios", 0444, root,
&adev->debugfs_vbios_blob);
- adev->debugfs_discovery_blob.data = adev->mman.discovery_bin;
- adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size;
- debugfs_create_blob("amdgpu_discovery", 0444, root,
- &adev->debugfs_discovery_blob);
+ if (adev->discovery.debugfs_blob.size)
+ debugfs_create_blob("amdgpu_discovery", 0444, root,
+ &adev->discovery.debugfs_blob);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index 8a026bc9ea44..4e2fe6674db8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -217,8 +217,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n");
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
- drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
- coredump->reset_time.tv_nsec);
+ drm_printf(&p, "time: %ptSp\n", &coredump->reset_time);
if (coredump->reset_task_info.task.pid)
drm_printf(&p, "process_name: %s PID: %d\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7a899fb4de29..a1817b4b5173 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -71,6 +71,7 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_ras.h"
+#include "amdgpu_ras_mgr.h"
#include "amdgpu_pmu.h"
#include "amdgpu_fru_eeprom.h"
#include "amdgpu_reset.h"
@@ -179,6 +180,10 @@ struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
BIT(AMD_IP_BLOCK_TYPE_PSP)
};
+static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev);
+static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev);
+static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev);
+
static void amdgpu_device_load_switch_state(struct amdgpu_device *adev);
static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
@@ -1882,6 +1887,13 @@ static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device
static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
{
+ /* Enabling ASPM causes randoms hangs on Tahiti and Oland on Zen4.
+ * It's unclear if this is a platform-specific or GPU-specific issue.
+ * Disable ASPM on SI for the time being.
+ */
+ if (adev->family == AMDGPU_FAMILY_SI)
+ return true;
+
#if IS_ENABLED(CONFIG_X86)
struct cpuinfo_x86 *c = &cpu_data(0);
@@ -2380,7 +2392,7 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
}
/**
- * amdgpu_device_ip_is_valid - is the hardware IP enabled
+ * amdgpu_device_ip_is_hw - is the hardware IP enabled
*
* @adev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
@@ -2388,6 +2400,27 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
* Check if the hardware IP is enable or not.
* Returns true if it the IP is enable, false if not.
*/
+bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
+{
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (adev->ip_blocks[i].version->type == block_type)
+ return adev->ip_blocks[i].status.hw;
+ }
+ return false;
+}
+
+/**
+ * amdgpu_device_ip_is_valid - is the hardware IP valid
+ *
+ * @adev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ *
+ * Check if the hardware IP is valid or not.
+ * Returns true if it the IP is valid, false if not.
+ */
bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
enum amd_ip_block_type block_type)
{
@@ -2466,6 +2499,7 @@ static const char *ip_block_names[] = {
[AMD_IP_BLOCK_TYPE_VPE] = "vpe",
[AMD_IP_BLOCK_TYPE_UMSCH_MM] = "umsch_mm",
[AMD_IP_BLOCK_TYPE_ISP] = "isp",
+ [AMD_IP_BLOCK_TYPE_RAS] = "ras",
};
static const char *ip_block_name(struct amdgpu_device *adev, enum amd_ip_block_type type)
@@ -2626,11 +2660,13 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_NAVI12:
- if (adev->mman.discovery_bin)
+ if (adev->discovery.bin)
return 0;
chip_name = "navi12";
break;
case CHIP_CYAN_SKILLFISH:
+ if (adev->discovery.bin)
+ return 0;
chip_name = "cyan_skillfish";
break;
}
@@ -2754,6 +2790,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
return r;
+
+ r = amdgpu_virt_init_critical_region(adev);
+ if (r)
+ return r;
}
switch (adev->asic_type) {
@@ -3407,10 +3447,11 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
- /* skip CG for VCE/UVD, it's handled specially */
+ /* skip CG for VCE/UVD/VPE, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
@@ -3642,6 +3683,20 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
"failed to release exclusive mode on fini\n");
}
+ /*
+ * Driver reload on the APU can fail due to firmware validation because
+ * the PSP is always running, as it is shared across the whole SoC.
+ * This same issue does not occur on dGPU because it has a mechanism
+ * that checks whether the PSP is running. A solution for those issues
+ * in the APU is to trigger a GPU reset, but this should be done during
+ * the unload phase to avoid adding boot latency and screen flicker.
+ */
+ if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu) {
+ r = amdgpu_asic_reset(adev);
+ if (r)
+ dev_err(adev->dev, "asic reset on %s failed\n", __func__);
+ }
+
return 0;
}
@@ -3752,7 +3807,7 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
*/
static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
- int i, r;
+ int i, r, rec;
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
@@ -3773,13 +3828,25 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
continue;
- /* XXX handle errors */
r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
if (r)
- return r;
+ goto unwind;
}
return 0;
+unwind:
+ rec = amdgpu_device_ip_resume_phase3(adev);
+ if (rec)
+ dev_err(adev->dev,
+ "amdgpu_device_ip_resume_phase3 failed during unwind: %d\n",
+ rec);
+
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW);
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+
+ return r;
}
/**
@@ -3795,7 +3862,7 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
*/
static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
{
- int i, r;
+ int i, r, rec;
if (adev->in_s0ix)
amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
@@ -3856,9 +3923,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
continue;
- /* XXX handle errors */
r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
- adev->ip_blocks[i].status.hw = false;
+ if (r)
+ goto unwind;
/* handle putting the SMC in the appropriate state */
if (!amdgpu_sriov_vf(adev)) {
@@ -3868,13 +3935,40 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
dev_err(adev->dev,
"SMC failed to set mp1 state %d, %d\n",
adev->mp1_state, r);
- return r;
+ goto unwind;
}
}
}
}
return 0;
+unwind:
+ /* suspend phase 2 = resume phase 1 + resume phase 2 */
+ rec = amdgpu_device_ip_resume_phase1(adev);
+ if (rec) {
+ dev_err(adev->dev,
+ "amdgpu_device_ip_resume_phase1 failed during unwind: %d\n",
+ rec);
+ return r;
+ }
+
+ rec = amdgpu_device_fw_loading(adev);
+ if (rec) {
+ dev_err(adev->dev,
+ "amdgpu_device_fw_loading failed during unwind: %d\n",
+ rec);
+ return r;
+ }
+
+ rec = amdgpu_device_ip_resume_phase2(adev);
+ if (rec) {
+ dev_err(adev->dev,
+ "amdgpu_device_ip_resume_phase2 failed during unwind: %d\n",
+ rec);
+ return r;
+ }
+
+ return r;
}
/**
@@ -3888,7 +3982,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
* in each IP into a state suitable for suspend.
* Returns 0 on success, negative error code on failure.
*/
-int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+static int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
{
int r;
@@ -4172,25 +4266,13 @@ bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
- /*
- * We have systems in the wild with these ASICs that require
- * LVDS and VGA support which is not supported with DC.
- *
- * Fallback to the non-DC driver here by default so as not to
- * cause regressions.
- */
-#if defined(CONFIG_DRM_AMD_DC_SI)
- return amdgpu_dc > 0;
-#else
- return false;
-#endif
- case CHIP_BONAIRE:
+ return amdgpu_dc != 0 && IS_ENABLED(CONFIG_DRM_AMD_DC_SI);
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
/*
* We have systems in the wild with these ASICs that require
- * VGA support which is not supported with DC.
+ * TRAVIS and NUTMEG support which is not supported with DC.
*
* Fallback to the non-DC driver here by default so as not to
* cause regressions.
@@ -4278,58 +4360,53 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
long timeout;
int ret = 0;
- /*
- * By default timeout for jobs is 10 sec
- */
- adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000);
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+ /* By default timeout for all queues is 2 sec */
+ adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
+ adev->video_timeout = msecs_to_jiffies(2000);
- if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
- while ((timeout_setting = strsep(&input, ",")) &&
- strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
- ret = kstrtol(timeout_setting, 0, &timeout);
- if (ret)
- return ret;
+ if (!strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH))
+ return 0;
- if (timeout == 0) {
- index++;
- continue;
- } else if (timeout < 0) {
- timeout = MAX_SCHEDULE_TIMEOUT;
- dev_warn(adev->dev, "lockup timeout disabled");
- add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
- } else {
- timeout = msecs_to_jiffies(timeout);
- }
+ while ((timeout_setting = strsep(&input, ",")) &&
+ strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ ret = kstrtol(timeout_setting, 0, &timeout);
+ if (ret)
+ return ret;
- switch (index++) {
- case 0:
- adev->gfx_timeout = timeout;
- break;
- case 1:
- adev->compute_timeout = timeout;
- break;
- case 2:
- adev->sdma_timeout = timeout;
- break;
- case 3:
- adev->video_timeout = timeout;
- break;
- default:
- break;
- }
+ if (timeout == 0) {
+ index++;
+ continue;
+ } else if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ dev_warn(adev->dev, "lockup timeout disabled");
+ add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
+ } else {
+ timeout = msecs_to_jiffies(timeout);
}
- /*
- * There is only one value specified and
- * it should apply to all non-compute jobs.
- */
- if (index == 1) {
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
- adev->compute_timeout = adev->gfx_timeout;
+
+ switch (index++) {
+ case 0:
+ adev->gfx_timeout = timeout;
+ break;
+ case 1:
+ adev->compute_timeout = timeout;
+ break;
+ case 2:
+ adev->sdma_timeout = timeout;
+ break;
+ case 3:
+ adev->video_timeout = timeout;
+ break;
+ default:
+ break;
}
}
+ /* When only one value specified apply it to all queues. */
+ if (index == 1)
+ adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
+ adev->video_timeout = timeout;
+
return ret;
}
@@ -4384,6 +4461,55 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
dev_info(adev->dev, "MCBP is enabled\n");
}
+static int amdgpu_device_sys_interface_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_atombios_sysfs_init(adev);
+ if (r)
+ drm_err(&adev->ddev,
+ "registering atombios sysfs failed (%d).\n", r);
+
+ r = amdgpu_pm_sysfs_init(adev);
+ if (r)
+ dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
+
+ r = amdgpu_ucode_sysfs_init(adev);
+ if (r) {
+ adev->ucode_sysfs_en = false;
+ dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
+ } else
+ adev->ucode_sysfs_en = true;
+
+ r = amdgpu_device_attr_sysfs_init(adev);
+ if (r)
+ dev_err(adev->dev, "Could not create amdgpu device attr\n");
+
+ r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
+ if (r)
+ dev_err(adev->dev,
+ "Could not create amdgpu board attributes\n");
+
+ amdgpu_fru_sysfs_init(adev);
+ amdgpu_reg_state_sysfs_init(adev);
+ amdgpu_xcp_sysfs_init(adev);
+
+ return r;
+}
+
+static void amdgpu_device_sys_interface_fini(struct amdgpu_device *adev)
+{
+ if (adev->pm.sysfs_initialized)
+ amdgpu_pm_sysfs_fini(adev);
+ if (adev->ucode_sysfs_en)
+ amdgpu_ucode_sysfs_fini(adev);
+ amdgpu_device_attr_sysfs_fini(adev);
+ amdgpu_fru_sysfs_fini(adev);
+
+ amdgpu_reg_state_sysfs_fini(adev);
+ amdgpu_xcp_sysfs_fini(adev);
+}
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -4483,7 +4609,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->gfx.userq_sch_mutex);
mutex_init(&adev->gfx.workload_profile_mutex);
mutex_init(&adev->vcn.workload_profile_mutex);
- mutex_init(&adev->userq_mutex);
amdgpu_device_init_apu_flags(adev);
@@ -4511,7 +4636,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->pm.od_kobj_list);
- INIT_LIST_HEAD(&adev->userq_mgr_list);
+ xa_init(&adev->userq_doorbell_xa);
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
@@ -4534,6 +4659,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
+ INIT_WORK(&adev->userq_reset_work, amdgpu_userq_reset_work);
adev->gfx.gfx_off_req_count = 1;
adev->gfx.gfx_off_residency = 0;
@@ -4807,39 +4933,14 @@ fence_driver_init:
flush_delayed_work(&adev->delayed_init_work);
}
+ if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
+ amdgpu_xgmi_reset_on_init(adev);
/*
* Place those sysfs registering after `late_init`. As some of those
* operations performed in `late_init` might affect the sysfs
* interfaces creating.
*/
- r = amdgpu_atombios_sysfs_init(adev);
- if (r)
- drm_err(&adev->ddev,
- "registering atombios sysfs failed (%d).\n", r);
-
- r = amdgpu_pm_sysfs_init(adev);
- if (r)
- dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
-
- r = amdgpu_ucode_sysfs_init(adev);
- if (r) {
- adev->ucode_sysfs_en = false;
- dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
- } else
- adev->ucode_sysfs_en = true;
-
- r = amdgpu_device_attr_sysfs_init(adev);
- if (r)
- dev_err(adev->dev, "Could not create amdgpu device attr\n");
-
- r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
- if (r)
- dev_err(adev->dev,
- "Could not create amdgpu board attributes\n");
-
- amdgpu_fru_sysfs_init(adev);
- amdgpu_reg_state_sysfs_init(adev);
- amdgpu_xcp_sysfs_init(adev);
+ r = amdgpu_device_sys_interface_init(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
r = amdgpu_pmu_init(adev);
@@ -4867,9 +4968,6 @@ fence_driver_init:
if (px)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
- if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
- amdgpu_xgmi_reset_on_init(adev);
-
amdgpu_device_check_iommu_direct_map(adev);
adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
@@ -4961,15 +5059,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
}
amdgpu_fence_driver_hw_fini(adev);
- if (adev->pm.sysfs_initialized)
- amdgpu_pm_sysfs_fini(adev);
- if (adev->ucode_sysfs_en)
- amdgpu_ucode_sysfs_fini(adev);
- amdgpu_device_attr_sysfs_fini(adev);
- amdgpu_fru_sysfs_fini(adev);
-
- amdgpu_reg_state_sysfs_fini(adev);
- amdgpu_xcp_sysfs_fini(adev);
+ amdgpu_device_sys_interface_fini(adev);
/* disable ras feature must before hw fini */
amdgpu_ras_pre_fini(adev);
@@ -5044,7 +5134,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
- if (adev->mman.discovery_bin)
+ if (adev->discovery.bin)
amdgpu_discovery_fini(adev);
amdgpu_reset_put_reset_domain(adev->reset_domain);
@@ -5192,7 +5282,7 @@ void amdgpu_device_complete(struct drm_device *dev)
int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- int r = 0;
+ int r, rec;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -5208,39 +5298,92 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
return r;
}
- if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3))
- dev_warn(adev->dev, "smart shift update failed\n");
+ r = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3);
+ if (r)
+ goto unwind_sriov;
if (notify_clients)
- drm_client_dev_suspend(adev_to_drm(adev), false);
+ drm_client_dev_suspend(adev_to_drm(adev));
cancel_delayed_work_sync(&adev->delayed_init_work);
amdgpu_ras_suspend(adev);
- amdgpu_device_ip_suspend_phase1(adev);
+ r = amdgpu_device_ip_suspend_phase1(adev);
+ if (r)
+ goto unwind_smartshift;
amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
- amdgpu_userq_suspend(adev);
+ r = amdgpu_userq_suspend(adev);
+ if (r)
+ goto unwind_ip_phase1;
r = amdgpu_device_evict_resources(adev);
if (r)
- return r;
+ goto unwind_userq;
amdgpu_ttm_set_buffer_funcs_status(adev, false);
amdgpu_fence_driver_hw_fini(adev);
- amdgpu_device_ip_suspend_phase2(adev);
+ r = amdgpu_device_ip_suspend_phase2(adev);
+ if (r)
+ goto unwind_evict;
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
- r = amdgpu_dpm_notify_rlc_state(adev, false);
- if (r)
+ return 0;
+
+unwind_evict:
+ if (adev->mman.buffer_funcs_ring->sched.ready)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ amdgpu_fence_driver_hw_init(adev);
+
+unwind_userq:
+ rec = amdgpu_userq_resume(adev);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-initialize user queues: %d\n", rec);
+ return r;
+ }
+ rec = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-initialize kfd: %d\n", rec);
return r;
+ }
- return 0;
+unwind_ip_phase1:
+ /* suspend phase 1 = resume phase 3 */
+ rec = amdgpu_device_ip_resume_phase3(adev);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-initialize IPs phase1: %d\n", rec);
+ return r;
+ }
+
+unwind_smartshift:
+ rec = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-update smart shift: %d\n", rec);
+ return r;
+ }
+
+ if (notify_clients)
+ drm_client_dev_resume(adev_to_drm(adev));
+
+ amdgpu_ras_resume(adev);
+
+unwind_sriov:
+ if (amdgpu_sriov_vf(adev)) {
+ rec = amdgpu_virt_request_full_gpu(adev, true);
+ if (rec) {
+ dev_warn(adev->dev, "failed to reinitialize sriov: %d\n", rec);
+ return r;
+ }
+ }
+
+ adev->in_suspend = adev->in_s0ix = adev->in_s3 = false;
+
+ return r;
}
static inline int amdgpu_virt_resume(struct amdgpu_device *adev)
@@ -5346,7 +5489,7 @@ exit:
flush_delayed_work(&adev->delayed_init_work);
if (notify_clients)
- drm_client_dev_resume(adev_to_drm(adev), false);
+ drm_client_dev_resume(adev_to_drm(adev));
amdgpu_ras_resume(adev);
@@ -5802,11 +5945,6 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (!amdgpu_ring_sched_ready(ring))
continue;
- /* Clear job fence from fence drv to avoid force_completion
- * leave NULL and vm flush fence in fence drv
- */
- amdgpu_fence_driver_clear_job_fences(ring);
-
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}
@@ -5951,7 +6089,11 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
if (r)
goto out;
- drm_client_dev_resume(adev_to_drm(tmp_adev), false);
+ r = amdgpu_userq_post_reset(tmp_adev, vram_lost);
+ if (r)
+ goto out;
+
+ drm_client_dev_resume(adev_to_drm(tmp_adev));
/*
* The GPU enters bad state once faulty pages
@@ -6173,6 +6315,7 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev))
cancel_work(&adev->reset_work);
#endif
+ cancel_work(&adev->userq_reset_work);
if (adev->kfd.dev)
cancel_work(&adev->kfd.reset_work);
@@ -6286,13 +6429,15 @@ static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
*/
amdgpu_unregister_gpu_instance(tmp_adev);
- drm_client_dev_suspend(adev_to_drm(tmp_adev), false);
+ drm_client_dev_suspend(adev_to_drm(tmp_adev));
/* disable ras on ALL IPs */
if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) &&
amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
+ amdgpu_userq_pre_reset(tmp_adev);
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -6522,6 +6667,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
goto end_reset;
}
+ /* Cannot be called after locking reset domain */
+ amdgpu_ras_pre_reset(adev, &device_list);
+
/* We need to lock reset domain only once both for XGMI and single device */
amdgpu_device_recovery_get_reset_lock(adev, &device_list);
@@ -6535,7 +6683,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*
* job->base holds a reference to parent fence
*/
- if (job && dma_fence_is_signaled(&job->hw_fence.base)) {
+ if (job && dma_fence_is_signaled(&job->hw_fence->base)) {
job_signaled = true;
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
goto skip_hw_reset;
@@ -6552,6 +6700,7 @@ skip_sched_resume:
amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
reset_unlock:
amdgpu_device_recovery_put_reset_lock(adev, &device_list);
+ amdgpu_ras_post_reset(adev, &device_list);
end_reset:
if (hive) {
mutex_unlock(&hive->hive_lock);
@@ -7279,10 +7428,17 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
if (adev->gmc.xgmi.connected_to_cpu)
return;
- if (ring && ring->funcs->emit_hdp_flush)
+ if (ring && ring->funcs->emit_hdp_flush) {
amdgpu_ring_emit_hdp_flush(ring);
- else
- amdgpu_asic_flush_hdp(adev, ring);
+ return;
+ }
+
+ if (!ring && amdgpu_sriov_runtime(adev)) {
+ if (!amdgpu_kiq_hdp_flush(adev))
+ return;
+ }
+
+ amdgpu_hdp_flush(adev, ring);
}
void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
@@ -7295,7 +7451,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
if (adev->gmc.xgmi.connected_to_cpu)
return;
- amdgpu_asic_invalidate_hdp(adev, ring);
+ amdgpu_hdp_invalidate(adev, ring);
}
int amdgpu_in_reset(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 73401f0aeb34..fa2a22dfa048 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -107,6 +107,7 @@
#include "vcn_v5_0_1.h"
#include "jpeg_v5_0_0.h"
#include "jpeg_v5_0_1.h"
+#include "amdgpu_ras_mgr.h"
#include "amdgpu_vpe.h"
#if defined(CONFIG_DRM_AMD_ISP)
@@ -254,9 +255,9 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
/* This region is read-only and reserved from system use */
- discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
+ discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC);
if (discv_regn) {
- memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
+ memcpy(binary, discv_regn, adev->discovery.size);
memunmap(discv_regn);
return 0;
}
@@ -298,10 +299,31 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
else
vram_size <<= 20;
+ /*
+ * If in VRAM, discovery TMR is marked for reservation. If it is in system mem,
+ * then it is not required to be reserved.
+ */
if (sz_valid) {
- uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
- amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
- adev->mman.discovery_tmr_size, false);
+ if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
+ /* For SRIOV VFs with dynamic critical region enabled,
+ * we will get the IPD binary via below call.
+ * If dynamic critical is disabled, fall through to normal seq.
+ */
+ if (amdgpu_virt_get_dynamic_data_info(adev,
+ AMD_SRIOV_MSG_IPD_TABLE_ID, binary,
+ &adev->discovery.size)) {
+ dev_err(adev->dev,
+ "failed to read discovery info from dynamic critical region.");
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
+ adev->discovery.size, false);
+ adev->discovery.reserve_tmr = true;
+ }
} else {
ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
}
@@ -310,7 +332,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
dev_err(adev->dev,
"failed to read discovery info from memory, vram size read: %llx",
vram_size);
-
+exit:
return ret;
}
@@ -389,6 +411,7 @@ static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
struct binary_header *bhdr)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct table_info *info;
uint16_t checksum;
uint16_t offset;
@@ -398,14 +421,14 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
checksum = le16_to_cpu(info->checksum);
struct nps_info_header *nhdr =
- (struct nps_info_header *)(adev->mman.discovery_bin + offset);
+ (struct nps_info_header *)(discovery_bin + offset);
if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
return -EINVAL;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset,
le32_to_cpu(nhdr->size_bytes),
checksum)) {
dev_dbg(adev->dev, "invalid nps info data table checksum\n");
@@ -417,8 +440,11 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
{
- if (amdgpu_discovery == 2)
+ if (amdgpu_discovery == 2) {
+ /* Assume there is valid discovery TMR in VRAM even if binary is sideloaded */
+ adev->discovery.reserve_tmr = true;
return "amdgpu/ip_discovery.bin";
+ }
switch (adev->asic_type) {
case CHIP_VEGA10:
@@ -447,49 +473,53 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
struct binary_header *bhdr;
+ uint8_t *discovery_bin;
const char *fw_name;
uint16_t offset;
uint16_t size;
uint16_t checksum;
int r;
- adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
- adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
- if (!adev->mman.discovery_bin)
+ adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
+ if (!adev->discovery.bin)
return -ENOMEM;
+ adev->discovery.size = DISCOVERY_TMR_SIZE;
+ adev->discovery.debugfs_blob.data = adev->discovery.bin;
+ adev->discovery.debugfs_blob.size = adev->discovery.size;
+ discovery_bin = adev->discovery.bin;
/* Read from file if it is the preferred option */
fw_name = amdgpu_discovery_get_fw_name(adev);
if (fw_name != NULL) {
drm_dbg(&adev->ddev, "use ip discovery information from file");
- r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
+ r = amdgpu_discovery_read_binary_from_file(adev, discovery_bin,
+ fw_name);
if (r)
goto out;
} else {
drm_dbg(&adev->ddev, "use ip discovery information from memory");
- r = amdgpu_discovery_read_binary_from_mem(
- adev, adev->mman.discovery_bin);
+ r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin);
if (r)
goto out;
}
/* check the ip discovery binary signature */
- if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
+ if (!amdgpu_discovery_verify_binary_signature(discovery_bin)) {
dev_err(adev->dev,
"get invalid ip discovery binary signature\n");
r = -EINVAL;
goto out;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = offsetof(struct binary_header, binary_checksum) +
sizeof(bhdr->binary_checksum);
size = le16_to_cpu(bhdr->binary_size) - offset;
checksum = le16_to_cpu(bhdr->binary_checksum);
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- size, checksum)) {
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, size,
+ checksum)) {
dev_err(adev->dev, "invalid ip discovery binary checksum\n");
r = -EINVAL;
goto out;
@@ -501,15 +531,16 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct ip_discovery_header *ihdr =
- (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
+ (struct ip_discovery_header *)(discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
dev_err(adev->dev, "invalid ip discovery data table signature\n");
r = -EINVAL;
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le16_to_cpu(ihdr->size), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset,
+ le16_to_cpu(ihdr->size),
+ checksum)) {
dev_err(adev->dev, "invalid ip discovery data table checksum\n");
r = -EINVAL;
goto out;
@@ -522,7 +553,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct gpu_info_header *ghdr =
- (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
+ (struct gpu_info_header *)(discovery_bin + offset);
if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery gc table id\n");
@@ -530,8 +561,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le32_to_cpu(ghdr->size), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset,
+ le32_to_cpu(ghdr->size),
+ checksum)) {
dev_err(adev->dev, "invalid gc data table checksum\n");
r = -EINVAL;
goto out;
@@ -544,7 +576,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct harvest_info_header *hhdr =
- (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
+ (struct harvest_info_header *)(discovery_bin + offset);
if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
@@ -552,8 +584,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- sizeof(struct harvest_table), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(
+ discovery_bin + offset,
+ sizeof(struct harvest_table), checksum)) {
dev_err(adev->dev, "invalid harvest data table checksum\n");
r = -EINVAL;
goto out;
@@ -566,7 +599,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct vcn_info_header *vhdr =
- (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
+ (struct vcn_info_header *)(discovery_bin + offset);
if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery vcn table id\n");
@@ -574,8 +607,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le32_to_cpu(vhdr->size_bytes), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(
+ discovery_bin + offset,
+ le32_to_cpu(vhdr->size_bytes), checksum)) {
dev_err(adev->dev, "invalid vcn data table checksum\n");
r = -EINVAL;
goto out;
@@ -588,7 +622,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (0 && offset) {
struct mall_info_header *mhdr =
- (struct mall_info_header *)(adev->mman.discovery_bin + offset);
+ (struct mall_info_header *)(discovery_bin + offset);
if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery mall table id\n");
@@ -596,8 +630,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le32_to_cpu(mhdr->size_bytes), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(
+ discovery_bin + offset,
+ le32_to_cpu(mhdr->size_bytes), checksum)) {
dev_err(adev->dev, "invalid mall data table checksum\n");
r = -EINVAL;
goto out;
@@ -607,8 +642,8 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
return 0;
out:
- kfree(adev->mman.discovery_bin);
- adev->mman.discovery_bin = NULL;
+ kfree(adev->discovery.bin);
+ adev->discovery.bin = NULL;
if ((amdgpu_discovery != 2) &&
(RREG32(mmIP_DISCOVERY_VERSION) == 4))
amdgpu_ras_query_boot_status(adev, 4);
@@ -620,8 +655,8 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
void amdgpu_discovery_fini(struct amdgpu_device *adev)
{
amdgpu_discovery_sysfs_fini(adev);
- kfree(adev->mman.discovery_bin);
- adev->mman.discovery_bin = NULL;
+ kfree(adev->discovery.bin);
+ adev->discovery.bin = NULL;
}
static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
@@ -646,6 +681,7 @@ static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
uint32_t *vcn_harvest_count)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
@@ -655,21 +691,21 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
uint8_t inst;
int i, j;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ bhdr = (struct binary_header *)discovery_bin;
+ ihdr = (struct ip_discovery_header
+ *)(discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
/* scan harvest bit of all IP data structures */
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ dhdr = (struct die_header *)(discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->mman.discovery_bin +
- ip_offset);
+ ip = (struct ip *)(discovery_bin + ip_offset);
inst = ip->number_instance;
hw_id = le16_to_cpu(ip->hw_id);
if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
@@ -711,13 +747,14 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
uint32_t *vcn_harvest_count,
uint32_t *umc_harvest_count)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
struct harvest_table *harvest_info;
u16 offset;
int i;
uint32_t umc_harvest_config = 0;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
if (!offset) {
@@ -725,7 +762,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
return;
}
- harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
+ harvest_info = (struct harvest_table *)(discovery_bin + offset);
for (i = 0; i < 32; i++) {
if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
@@ -1021,8 +1058,8 @@ static void ip_disc_release(struct kobject *kobj)
kobj);
struct amdgpu_device *adev = ip_top->adev;
- adev->ip_top = NULL;
kfree(ip_top);
+ adev->discovery.ip_top = NULL;
}
static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
@@ -1033,7 +1070,9 @@ static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
/* Until a uniform way is figured, get mask based on hwid */
switch (hw_id) {
case VCN_HWID:
- harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
+ /* VCN vs UVD+VCE */
+ if (!amdgpu_ip_version(adev, VCE_HWIP, 0))
+ harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
break;
case DMU_HWID:
if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
@@ -1060,6 +1099,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
const size_t _ip_offset, const int num_ips,
bool reg_base_64)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
int ii, jj, kk, res;
uint16_t hw_id;
uint8_t inst;
@@ -1077,7 +1117,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
struct ip_v4 *ip;
struct ip_hw_instance *ip_hw_instance;
- ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+ ip = (struct ip_v4 *)(discovery_bin + ip_offset);
inst = ip->instance_number;
hw_id = le16_to_cpu(ip->hw_id);
if (amdgpu_discovery_validate_ip(adev, inst, hw_id) ||
@@ -1164,17 +1204,20 @@ next_ip:
static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
{
+ struct ip_discovery_top *ip_top = adev->discovery.ip_top;
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
- struct kset *die_kset = &adev->ip_top->die_kset;
+ struct kset *die_kset = &ip_top->die_kset;
u16 num_dies, die_offset, num_ips;
size_t ip_offset;
int ii, res;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ bhdr = (struct binary_header *)discovery_bin;
+ ihdr = (struct ip_discovery_header
+ *)(discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
@@ -1183,7 +1226,7 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
struct ip_die_entry *ip_die_entry;
die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ dhdr = (struct die_header *)(discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@@ -1217,30 +1260,32 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
+ struct ip_discovery_top *ip_top;
struct kset *die_kset;
int res, ii;
- if (!adev->mman.discovery_bin)
+ if (!discovery_bin)
return -EINVAL;
- adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
- if (!adev->ip_top)
+ ip_top = kzalloc(sizeof(*ip_top), GFP_KERNEL);
+ if (!ip_top)
return -ENOMEM;
- adev->ip_top->adev = adev;
-
- res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
+ ip_top->adev = adev;
+ adev->discovery.ip_top = ip_top;
+ res = kobject_init_and_add(&ip_top->kobj, &ip_discovery_ktype,
&adev->dev->kobj, "ip_discovery");
if (res) {
DRM_ERROR("Couldn't init and add ip_discovery/");
goto Err;
}
- die_kset = &adev->ip_top->die_kset;
+ die_kset = &ip_top->die_kset;
kobject_set_name(&die_kset->kobj, "%s", "die");
- die_kset->kobj.parent = &adev->ip_top->kobj;
+ die_kset->kobj.parent = &ip_top->kobj;
die_kset->kobj.ktype = &die_kobj_ktype;
- res = kset_register(&adev->ip_top->die_kset);
+ res = kset_register(&ip_top->die_kset);
if (res) {
DRM_ERROR("Couldn't register die_kset");
goto Err;
@@ -1254,7 +1299,7 @@ static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
return res;
Err:
- kobject_put(&adev->ip_top->kobj);
+ kobject_put(&ip_top->kobj);
return res;
}
@@ -1299,10 +1344,11 @@ static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
{
+ struct ip_discovery_top *ip_top = adev->discovery.ip_top;
struct list_head *el, *tmp;
struct kset *die_kset;
- die_kset = &adev->ip_top->die_kset;
+ die_kset = &ip_top->die_kset;
spin_lock(&die_kset->list_lock);
list_for_each_prev_safe(el, tmp, &die_kset->list) {
list_del_init(el);
@@ -1311,8 +1357,8 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
spin_lock(&die_kset->list_lock);
}
spin_unlock(&die_kset->list_lock);
- kobject_put(&adev->ip_top->die_kset.kobj);
- kobject_put(&adev->ip_top->kobj);
+ kobject_put(&ip_top->die_kset.kobj);
+ kobject_put(&ip_top->kobj);
}
/* ================================================== */
@@ -1323,6 +1369,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
+ uint8_t *discovery_bin;
struct ip_v4 *ip;
uint16_t die_offset;
uint16_t ip_offset;
@@ -1338,22 +1385,23 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
r = amdgpu_discovery_init(adev);
if (r)
return r;
-
+ discovery_bin = adev->discovery.bin;
wafl_ver = 0;
adev->gfx.xcc_mask = 0;
adev->sdma.sdma_mask = 0;
adev->vcn.inst_mask = 0;
adev->jpeg.inst_mask = 0;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ bhdr = (struct binary_header *)discovery_bin;
+ ihdr = (struct ip_discovery_header
+ *)(discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ dhdr = (struct die_header *)(discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@@ -1367,7 +1415,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
le16_to_cpu(dhdr->die_id), num_ips);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+ ip = (struct ip_v4 *)(discovery_bin + ip_offset);
inst = ip->instance_number;
hw_id = le16_to_cpu(ip->hw_id);
@@ -1517,16 +1565,16 @@ next_ip:
static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct ip_discovery_header *ihdr;
struct binary_header *bhdr;
int vcn_harvest_count = 0;
int umc_harvest_count = 0;
uint16_t offset, ihdr_ver;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset);
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- offset);
+ ihdr = (struct ip_discovery_header *)(discovery_bin + offset);
ihdr_ver = le16_to_cpu(ihdr->version);
/*
* Harvest table does not fit Navi1x and legacy GPUs,
@@ -1573,22 +1621,23 @@ union gc_info {
static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
union gc_info *gc_info;
u16 offset;
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[GC].offset);
if (!offset)
return 0;
- gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
+ gc_info = (union gc_info *)(discovery_bin + offset);
switch (le16_to_cpu(gc_info->v1.header.version_major)) {
case 1:
@@ -1681,24 +1730,25 @@ union mall_info {
static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
union mall_info *mall_info;
u32 u, mall_size_per_umc, m_s_present, half_use;
u64 mall_size;
u16 offset;
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
if (!offset)
return 0;
- mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
+ mall_info = (union mall_info *)(discovery_bin + offset);
switch (le16_to_cpu(mall_info->v1.header.version_major)) {
case 1:
@@ -1737,12 +1787,13 @@ union vcn_info {
static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
union vcn_info *vcn_info;
u16 offset;
int v;
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
@@ -1757,13 +1808,13 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
if (!offset)
return 0;
- vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
+ vcn_info = (union vcn_info *)(discovery_bin + offset);
switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
case 1:
@@ -1823,6 +1874,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
struct amdgpu_gmc_memrange **ranges,
int *range_cnt, bool refresh)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct amdgpu_gmc_memrange *mem_ranges;
struct binary_header *bhdr;
union nps_info *nps_info;
@@ -1839,13 +1891,13 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
return r;
nps_info = &nps_data;
} else {
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
dev_err(adev->dev,
"fetch mem range failed, ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
if (!offset)
@@ -1855,8 +1907,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
return -ENOENT;
- nps_info =
- (union nps_info *)(adev->mman.discovery_bin + offset);
+ nps_info = (union nps_info *)(discovery_bin + offset);
}
switch (le16_to_cpu(nps_info->v1.header.version_major)) {
@@ -2359,6 +2410,21 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_version(adev, SDMA0_HWIP, 0));
return -EINVAL;
}
+
+ return 0;
+}
+
+static int amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ amdgpu_device_ip_block_add(adev, &ras_v1_0_ip_block);
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -2565,7 +2631,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
adev->gmc.num_umc = 4;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
@@ -2592,7 +2660,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
adev->gmc.num_umc = 4;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
@@ -2619,8 +2689,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 1;
+ adev->sdma.sdma_mask = 1;
adev->vcn.num_vcn_inst = 1;
adev->gmc.num_umc = 2;
+ adev->gfx.xcc_mask = 1;
if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
@@ -2665,7 +2737,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
vega20_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
adev->gmc.num_umc = 8;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
@@ -2693,8 +2767,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
arct_reg_base_init(adev);
adev->sdma.num_instances = 8;
+ adev->sdma.sdma_mask = 0xff;
adev->vcn.num_vcn_inst = 2;
adev->gmc.num_umc = 8;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
@@ -2726,8 +2802,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
aldebaran_reg_base_init(adev);
adev->sdma.num_instances = 5;
+ adev->sdma.sdma_mask = 0x1f;
adev->vcn.num_vcn_inst = 2;
adev->gmc.num_umc = 4;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
@@ -2762,6 +2840,8 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
} else {
cyan_skillfish_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1);
@@ -3125,6 +3205,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
if (r)
return r;
+ r = amdgpu_discovery_set_ras_ip_blocks(adev);
+ if (r)
+ return r;
+
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
!amdgpu_sriov_vf(adev)) ||
(adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index b44d56465c5b..4ce04486cc31 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -24,9 +24,21 @@
#ifndef __AMDGPU_DISCOVERY__
#define __AMDGPU_DISCOVERY__
+#include <linux/debugfs.h>
+
#define DISCOVERY_TMR_SIZE (10 << 10)
#define DISCOVERY_TMR_OFFSET (64 << 10)
+struct ip_discovery_top;
+
+struct amdgpu_discovery_info {
+ struct debugfs_blob_wrapper debugfs_blob;
+ struct ip_discovery_top *ip_top;
+ uint32_t size;
+ uint8_t *bin;
+ bool reserve_tmr;
+};
+
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 51bab32fd8c6..b5d34797d606 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -332,8 +332,6 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
if (crtc->enabled)
active = true;
- pm_runtime_mark_last_busy(dev->dev);
-
adev = drm_to_adev(dev);
/* if we have active crtcs and we don't have a power ref,
* take the current one
@@ -1365,6 +1363,64 @@ static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = {
{ AMDGPU_FMT_DITHER_ENABLE, "on" },
};
+/**
+ * DOC: property for adaptive backlight modulation
+ *
+ * The 'adaptive backlight modulation' property is used for the compositor to
+ * directly control the adaptive backlight modulation power savings feature
+ * that is part of DCN hardware.
+ *
+ * The property will be attached specifically to eDP panels that support it.
+ *
+ * The property is by default set to 'sysfs' to allow the sysfs file 'panel_power_savings'
+ * to be able to control it.
+ * If set to 'off' the compositor will ensure it stays off.
+ * The other values 'min', 'bias min', 'bias max', and 'max' will control the
+ * intensity of the power savings.
+ *
+ * Modifying this value can have implications on color accuracy, so tread
+ * carefully.
+ */
+static int amdgpu_display_setup_abm_prop(struct amdgpu_device *adev)
+{
+ const struct drm_prop_enum_list props[] = {
+ { ABM_SYSFS_CONTROL, "sysfs" },
+ { ABM_LEVEL_OFF, "off" },
+ { ABM_LEVEL_MIN, "min" },
+ { ABM_LEVEL_BIAS_MIN, "bias min" },
+ { ABM_LEVEL_BIAS_MAX, "bias max" },
+ { ABM_LEVEL_MAX, "max" },
+ };
+ struct drm_property *prop;
+ int i;
+
+ if (!adev->dc_enabled)
+ return 0;
+
+ prop = drm_property_create(adev_to_drm(adev), DRM_MODE_PROP_ENUM,
+ "adaptive backlight modulation",
+ 6);
+ if (!prop)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ int ret;
+
+ ret = drm_property_add_enum(prop, props[i].type,
+ props[i].name);
+
+ if (ret) {
+ drm_property_destroy(adev_to_drm(adev), prop);
+
+ return ret;
+ }
+ }
+
+ adev->mode_info.abm_level_property = prop;
+
+ return 0;
+}
+
int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
{
int sz;
@@ -1411,7 +1467,7 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
"dither",
amdgpu_dither_enum_list, sz);
- return 0;
+ return amdgpu_display_setup_abm_prop(adev);
}
void amdgpu_display_update_priority(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index 930c171473b4..49a29bf47a37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -55,4 +55,11 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev);
int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
struct drm_scanout_buffer *sb);
+#define ABM_SYSFS_CONTROL -1
+#define ABM_LEVEL_OFF 0
+#define ABM_LEVEL_MIN 1
+#define ABM_LEVEL_BIAS_MIN 2
+#define ABM_LEVEL_BIAS_MAX 3
+#define ABM_LEVEL_MAX 4
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 8561ad7f6180..e22cfa7c6d32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -81,13 +81,44 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct drm_gem_object *obj = dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ int r;
+
+ /*
+ * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
+ * Such buffers cannot be safely accessed over P2P due to device-local
+ * compression metadata. Fallback to system-memory path instead.
+ * Device supports GFX12 (GC 12.x or newer)
+ * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
+ *
+ */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
+ bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+ attach->peer2peer = false;
+
+ /*
+ * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
+ * Such buffers cannot be safely accessed over P2P due to device-local
+ * compression metadata. Fallback to system-memory path instead.
+ * Device supports GFX12 (GC 12.x or newer)
+ * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
+ *
+ */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
+ bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+ attach->peer2peer = false;
if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
+ r = dma_resv_lock(bo->tbo.base.resv, NULL);
+ if (r)
+ return r;
+
amdgpu_vm_bo_update_shared(bo);
+ dma_resv_unlock(bo->tbo.base.resv);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index bff25ef3e2d0..2dfbddcef9ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -144,7 +144,8 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),
AMDGPU_DEBUG_SMU_POOL = BIT(7),
AMDGPU_DEBUG_VM_USERPTR = BIT(8),
- AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9)
+ AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9),
+ AMDGPU_DEBUG_ENABLE_CE_CS = BIT(10)
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -311,7 +312,7 @@ module_param_named(moverate, amdgpu_moverate, int, 0600);
* DOC: audio (int)
* Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it.
*/
-MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
+MODULE_PARM_DESC(audio, "HDMI/DP Audio enable for non DC displays (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(audio, amdgpu_audio, int, 0444);
/**
@@ -353,22 +354,16 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint
* DOC: lockup_timeout (string)
* Set GPU scheduler timeout value in ms.
*
- * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
- * multiple values specified. 0 and negative values are invalidated. They will be adjusted
- * to the default timeout.
+ * The format can be [single value] for setting all timeouts at once or
+ * [GFX,Compute,SDMA,Video] to set individual timeouts.
+ * Negative values mean infinity.
*
- * - With one value specified, the setting will apply to all non-compute jobs.
- * - With multiple values specified, the first one will be for GFX.
- * The second one is for Compute. The third and fourth ones are
- * for SDMA and Video.
- *
- * By default(with no lockup_timeout settings), the timeout for all jobs is 10000.
+ * By default(with no lockup_timeout settings), the timeout for all queues is 2000.
*/
MODULE_PARM_DESC(lockup_timeout,
- "GPU lockup timeout in ms (default: 10000 for all jobs. "
- "0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
- "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
-module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
+ "GPU lockup timeout in ms (default: 2000. 0: keep default value. negative: infinity timeout), format: [single value for all] or [GFX,Compute,SDMA,Video].");
+module_param_string(lockup_timeout, amdgpu_lockup_timeout,
+ sizeof(amdgpu_lockup_timeout), 0444);
/**
* DOC: dpm (int)
@@ -623,39 +618,39 @@ module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
/**
* DOC: si_support (int)
- * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
- * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
- * otherwise using amdgpu driver.
- */
+ * 1 = enabled, 0 = disabled, -1 = default
+ *
+ * SI (Southern Islands) are first generation GCN GPUs, supported by both
+ * drivers: radeon (old) and amdgpu (new). This parameter controls whether
+ * amdgpu should support SI.
+ * By default, SI dedicated GPUs are supported by amdgpu.
+ * Only relevant when CONFIG_DRM_AMDGPU_SI is enabled to build SI support in amdgpu.
+ * See also radeon.si_support which should be disabled when amdgpu.si_support is
+ * enabled, and vice versa.
+ */
+int amdgpu_si_support = -1;
#ifdef CONFIG_DRM_AMDGPU_SI
-
-#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
-int amdgpu_si_support;
-MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))");
-#else
-int amdgpu_si_support = 1;
-MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)");
-#endif
-
+MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled, -1 = default)");
module_param_named(si_support, amdgpu_si_support, int, 0444);
#endif
/**
* DOC: cik_support (int)
- * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled,
- * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
- * otherwise using amdgpu driver.
- */
+ * 1 = enabled, 0 = disabled, -1 = default
+ *
+ * CIK (Sea Islands) are second generation GCN GPUs, supported by both
+ * drivers: radeon (old) and amdgpu (new). This parameter controls whether
+ * amdgpu should support CIK.
+ * By default:
+ * - CIK dedicated GPUs are supported by amdgpu.
+ * - CIK APUs are supported by radeon (except when radeon is not built).
+ * Only relevant when CONFIG_DRM_AMDGPU_CIK is enabled to build CIK support in amdgpu.
+ * See also radeon.cik_support which should be disabled when amdgpu.cik_support is
+ * enabled, and vice versa.
+ */
+int amdgpu_cik_support = -1;
#ifdef CONFIG_DRM_AMDGPU_CIK
-
-#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
-int amdgpu_cik_support;
-MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
-#else
-int amdgpu_cik_support = 1;
-MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)");
-#endif
-
+MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled, -1 = default)");
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
#endif
@@ -2233,7 +2228,6 @@ static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev)
adev->pdev->bus->number, i);
if (p) {
pm_runtime_get_sync(&p->dev);
- pm_runtime_mark_last_busy(&p->dev);
pm_runtime_put_autosuspend(&p->dev);
pci_dev_put(p);
}
@@ -2289,6 +2283,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: disable kernel logs of correctable errors\n");
adev->debug_disable_ce_logs = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_CE_CS) {
+ pr_info("debug: allowing command submission to CE engine\n");
+ adev->debug_enable_ce_cs = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2307,6 +2306,72 @@ static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long fl
return flags;
}
+static bool amdgpu_support_enabled(struct device *dev,
+ const enum amd_asic_type family)
+{
+ const char *gen;
+ const char *param;
+ int module_param = -1;
+ bool radeon_support_built = IS_ENABLED(CONFIG_DRM_RADEON);
+ bool amdgpu_support_built = false;
+ bool support_by_default = false;
+
+ switch (family) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ gen = "SI";
+ param = "si_support";
+ module_param = amdgpu_si_support;
+ amdgpu_support_built = IS_ENABLED(CONFIG_DRM_AMDGPU_SI);
+ support_by_default = true;
+ break;
+
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ support_by_default = true;
+ fallthrough;
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ gen = "CIK";
+ param = "cik_support";
+ module_param = amdgpu_cik_support;
+ amdgpu_support_built = IS_ENABLED(CONFIG_DRM_AMDGPU_CIK);
+ break;
+
+ default:
+ /* All other chips are supported by amdgpu only */
+ return true;
+ }
+
+ if (!amdgpu_support_built) {
+ dev_info(dev, "amdgpu built without %s support\n", gen);
+ return false;
+ }
+
+ if ((module_param == -1 && (support_by_default || !radeon_support_built)) ||
+ module_param == 1) {
+ if (radeon_support_built)
+ dev_info(dev, "%s support provided by amdgpu.\n"
+ "Use radeon.%s=1 amdgpu.%s=0 to override.\n",
+ gen, param, param);
+
+ return true;
+ }
+
+ if (radeon_support_built)
+ dev_info(dev, "%s support provided by radeon.\n"
+ "Use radeon.%s=0 amdgpu.%s=1 to override.\n",
+ gen, param, param);
+ else if (module_param == 0)
+ dev_info(dev, "%s support disabled by module param\n", gen);
+
+ return false;
+}
+
static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -2354,48 +2419,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENOTSUPP;
}
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
-#ifdef CONFIG_DRM_AMDGPU_SI
- if (!amdgpu_si_support) {
- dev_info(&pdev->dev,
- "SI support provided by radeon.\n");
- dev_info(&pdev->dev,
- "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
- );
- return -ENODEV;
- }
- break;
-#else
- dev_info(&pdev->dev, "amdgpu is built without SI support.\n");
- return -ENODEV;
-#endif
- case CHIP_KAVERI:
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_KABINI:
- case CHIP_MULLINS:
-#ifdef CONFIG_DRM_AMDGPU_CIK
- if (!amdgpu_cik_support) {
- dev_info(&pdev->dev,
- "CIK support provided by radeon.\n");
- dev_info(&pdev->dev,
- "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
- );
- return -ENODEV;
- }
- break;
-#else
- dev_info(&pdev->dev, "amdgpu is built without CIK support.\n");
+ if (!amdgpu_support_enabled(&pdev->dev, flags & AMD_ASIC_MASK))
return -ENODEV;
-#endif
- default:
- break;
- }
adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
if (IS_ERR(adev))
@@ -2474,7 +2499,6 @@ retry_init:
pm_runtime_allow(ddev->dev);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
pci_wake_from_d3(pdev, TRUE);
@@ -2558,7 +2582,8 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
*/
if (!amdgpu_passthrough(adev))
adev->mp1_state = PP_MP1_STATE_UNLOAD;
- amdgpu_device_ip_suspend(adev);
+ amdgpu_device_prepare(dev);
+ amdgpu_device_suspend(dev, true);
adev->mp1_state = PP_MP1_STATE_NONE;
}
@@ -2626,9 +2651,14 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- if (amdgpu_acpi_should_gpu_reset(adev))
- return amdgpu_asic_reset(adev);
+ if (amdgpu_acpi_should_gpu_reset(adev)) {
+ amdgpu_device_lock_reset_domain(adev->reset_domain);
+ r = amdgpu_asic_reset(adev);
+ amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ return r;
+ }
return 0;
}
@@ -2771,22 +2801,8 @@ static int amdgpu_runtime_idle_check_userq(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
- struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
- int ret = 0;
-
- mutex_lock(&adev->userq_mutex);
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- ret = -EBUSY;
- goto done;
- }
- }
-done:
- mutex_unlock(&adev->userq_mutex);
- return ret;
+ return xa_empty(&adev->userq_doorbell_xa) ? 0 : -EBUSY;
}
static int amdgpu_pmops_runtime_suspend(struct device *dev)
@@ -2933,7 +2949,6 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
ret = amdgpu_runtime_idle_check_userq(dev);
done:
- pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return ret;
}
@@ -2969,7 +2984,6 @@ long amdgpu_drm_ioctl(struct file *filp,
ret = drm_ioctl(filp, cmd, arg);
- pm_runtime_mark_last_busy(dev->dev);
out:
pm_runtime_put_autosuspend(dev->dev);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index fd8cca241da6..c7843e336310 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -45,16 +45,11 @@
* Cast helper
*/
static const struct dma_fence_ops amdgpu_fence_ops;
-static const struct dma_fence_ops amdgpu_job_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
- if (__f->base.ops == &amdgpu_fence_ops ||
- __f->base.ops == &amdgpu_job_fence_ops)
- return __f;
-
- return NULL;
+ return __f;
}
/**
@@ -98,51 +93,32 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* amdgpu_fence_emit - emit a fence on the requested ring
*
* @ring: ring the fence is associated with
- * @f: resulting fence object
* @af: amdgpu fence input
* @flags: flags to pass into the subordinate .emit_fence() call
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
- struct amdgpu_fence *af, unsigned int flags)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
+ unsigned int flags)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
- struct amdgpu_fence *am_fence;
struct dma_fence __rcu **ptr;
uint32_t seq;
int r;
- if (!af) {
- /* create a separate hw fence */
- am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL);
- if (!am_fence)
- return -ENOMEM;
- } else {
- am_fence = af;
- }
- fence = &am_fence->base;
- am_fence->ring = ring;
+ fence = &af->base;
+ af->ring = ring;
seq = ++ring->fence_drv.sync_seq;
- am_fence->seq = seq;
- if (af) {
- dma_fence_init(fence, &amdgpu_job_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- /* Against remove in amdgpu_job_{free, free_cb} */
- dma_fence_get(fence);
- } else {
- dma_fence_init(fence, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- }
+ dma_fence_init(fence, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
- amdgpu_fence_save_wptr(fence);
+ amdgpu_fence_save_wptr(af);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
@@ -167,8 +143,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
*/
rcu_assign_pointer(*ptr, dma_fence_get(fence));
- *f = fence;
-
return 0;
}
@@ -276,7 +250,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
drv->signalled_wptr = am_fence->wptr;
dma_fence_signal(fence);
dma_fence_put(fence);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
} while (last_seq != seq);
@@ -670,36 +643,6 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
- *
- * @ring: fence of the ring to be cleared
- *
- */
-void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
-{
- int i;
- struct dma_fence *old, **ptr;
-
- for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
- ptr = &ring->fence_drv.fences[i];
- old = rcu_dereference_protected(*ptr, 1);
- if (old && old->ops == &amdgpu_job_fence_ops) {
- struct amdgpu_job *job;
-
- /* For non-scheduler bad job, i.e. failed ib test, we need to signal
- * it right here or we won't be able to track them in fence_drv
- * and they will remain unsignaled during sa_bo free.
- */
- job = container_of(old, struct amdgpu_job, hw_fence.base);
- if (!job->base.s_fence && !dma_fence_is_signaled(old))
- dma_fence_signal(old);
- RCU_INIT_POINTER(*ptr, NULL);
- dma_fence_put(old);
- }
- }
-}
-
-/**
* amdgpu_fence_driver_set_error - set error code on fences
* @ring: the ring which contains the fences
* @error: the error code to set
@@ -755,21 +698,50 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
/**
* amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence
*
- * @fence: fence of the ring to signal
+ * @af: fence of the ring to signal
*
*/
-void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence)
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
{
- dma_fence_set_error(&fence->base, -ETIME);
- amdgpu_fence_write(fence->ring, fence->seq);
- amdgpu_fence_process(fence->ring);
+ struct dma_fence *unprocessed;
+ struct dma_fence __rcu **ptr;
+ struct amdgpu_fence *fence;
+ struct amdgpu_ring *ring = af->ring;
+ unsigned long flags;
+ u32 seq, last_seq;
+
+ last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
+ seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
+
+ /* mark all fences from the guilty context with an error */
+ spin_lock_irqsave(&ring->fence_drv.lock, flags);
+ do {
+ last_seq++;
+ last_seq &= ring->fence_drv.num_fences_mask;
+
+ ptr = &ring->fence_drv.fences[last_seq];
+ rcu_read_lock();
+ unprocessed = rcu_dereference(*ptr);
+
+ if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) {
+ fence = container_of(unprocessed, struct amdgpu_fence, base);
+
+ if (fence == af)
+ dma_fence_set_error(&fence->base, -ETIME);
+ else if (fence->context == af->context)
+ dma_fence_set_error(&fence->base, -ECANCELED);
+ }
+ rcu_read_unlock();
+ } while (last_seq != seq);
+ spin_unlock_irqrestore(&ring->fence_drv.lock, flags);
+ /* signal the guilty fence */
+ amdgpu_fence_write(ring, (u32)af->base.seqno);
+ amdgpu_fence_process(ring);
}
-void amdgpu_fence_save_wptr(struct dma_fence *fence)
+void amdgpu_fence_save_wptr(struct amdgpu_fence *af)
{
- struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base);
-
- am_fence->wptr = am_fence->ring->wptr;
+ af->wptr = af->ring->wptr;
}
static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
@@ -790,14 +762,19 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
struct dma_fence *unprocessed;
struct dma_fence __rcu **ptr;
struct amdgpu_fence *fence;
- u64 wptr, i, seqno;
+ u64 wptr;
+ u32 seq, last_seq;
- seqno = amdgpu_fence_read(ring);
+ last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
+ seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
wptr = ring->fence_drv.signalled_wptr;
ring->ring_backup_entries_to_copy = 0;
- for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) {
- ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask];
+ do {
+ last_seq++;
+ last_seq &= ring->fence_drv.num_fences_mask;
+
+ ptr = &ring->fence_drv.fences[last_seq];
rcu_read_lock();
unprocessed = rcu_dereference(*ptr);
@@ -813,7 +790,7 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
wptr = fence->wptr;
}
rcu_read_unlock();
- }
+ } while (last_seq != seq);
}
/*
@@ -830,13 +807,6 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
return (const char *)to_amdgpu_fence(f)->ring->name;
}
-static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
-{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
-
- return (const char *)to_amdgpu_ring(job->base.sched)->name;
-}
-
/**
* amdgpu_fence_enable_signaling - enable signalling on fence
* @f: fence
@@ -854,23 +824,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
}
/**
- * amdgpu_job_fence_enable_signaling - enable signalling on job fence
- * @f: fence
- *
- * This is the simliar function with amdgpu_fence_enable_signaling above, it
- * only handles the job embedded fence.
- */
-static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
-{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
-
- if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
- amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
-
- return true;
-}
-
-/**
* amdgpu_fence_free - free up the fence memory
*
* @rcu: RCU callback head
@@ -886,21 +839,6 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
}
/**
- * amdgpu_job_fence_free - free up the job with embedded fence
- *
- * @rcu: RCU callback head
- *
- * Free up the job with embedded fence after the RCU grace period.
- */
-static void amdgpu_job_fence_free(struct rcu_head *rcu)
-{
- struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
-
- /* free job if fence has a parent job */
- kfree(container_of(f, struct amdgpu_job, hw_fence.base));
-}
-
-/**
* amdgpu_fence_release - callback that fence can be freed
*
* @f: fence
@@ -913,19 +851,6 @@ static void amdgpu_fence_release(struct dma_fence *f)
call_rcu(&f->rcu, amdgpu_fence_free);
}
-/**
- * amdgpu_job_fence_release - callback that job embedded fence can be freed
- *
- * @f: fence
- *
- * This is the simliar function with amdgpu_fence_release above, it
- * only handles the job embedded fence.
- */
-static void amdgpu_job_fence_release(struct dma_fence *f)
-{
- call_rcu(&f->rcu, amdgpu_job_fence_free);
-}
-
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
@@ -933,13 +858,6 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
.release = amdgpu_fence_release,
};
-static const struct dma_fence_ops amdgpu_job_fence_ops = {
- .get_driver_name = amdgpu_fence_get_driver_name,
- .get_timeline_name = amdgpu_job_fence_get_timeline_name,
- .enable_signaling = amdgpu_job_fence_enable_signaling,
- .release = amdgpu_job_fence_release,
-};
-
/*
* Fence debugfs
*/
@@ -1009,7 +927,6 @@ static int gpu_recover_get(void *data, u64 *val)
*val = atomic_read(&adev->reset_domain->reset_res);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index b2033f8352f5..d2237ce9da70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -302,7 +302,6 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages)
{
unsigned t;
- unsigned p;
int i, j;
u64 page_base;
/* Starting from VEGA10, system bit must be 0 to mean invalid. */
@@ -316,8 +315,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
return;
t = offset / AMDGPU_GPU_PAGE_SIZE;
- p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
- for (i = 0; i < pages; i++, p++) {
+ for (i = 0; i < pages; i++) {
page_base = adev->dummy_page_addr;
if (!adev->gart.ptr)
continue;
@@ -370,6 +368,42 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
}
/**
+ * amdgpu_gart_map_vram_range - map VRAM pages into the GART page table
+ *
+ * @adev: amdgpu_device pointer
+ * @pa: physical address of the first page to be mapped
+ * @start_page: first page to map in the GART aperture
+ * @num_pages: number of pages to be mapped
+ * @flags: page table entry flags
+ * @dst: CPU address of the GART table
+ *
+ * Binds a BO that is allocated in VRAM to the GART page table
+ * (all ASICs).
+ *
+ * Useful when a kernel BO is located in VRAM but
+ * needs to be accessed from the GART address space.
+ */
+void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
+ uint64_t start_page, uint64_t num_pages,
+ uint64_t flags, void *dst)
+{
+ u32 i, idx;
+
+ /* The SYSTEM flag indicates the pages aren't in VRAM. */
+ WARN_ON_ONCE(flags & AMDGPU_PTE_SYSTEM);
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return;
+
+ for (i = 0; i < num_pages; ++i) {
+ amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
+ start_page + i, pa + AMDGPU_GPU_PAGE_SIZE * i, flags);
+ }
+
+ drm_dev_exit(idx);
+}
+
+/**
* amdgpu_gart_bind - bind pages into the gart page table
*
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 7cc980bf4725..d3118275ddae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -64,5 +64,8 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
void *dst);
void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, dma_addr_t *dma_addr, uint64_t flags);
+void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
+ uint64_t start_page, uint64_t num_pages,
+ uint64_t flags, void *dst);
void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index b7ebae289bea..3e38c5db2987 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -198,7 +198,7 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
amdgpu_hmm_unregister(aobj);
- ttm_bo_put(&aobj->tbo);
+ ttm_bo_fini(&aobj->tbo);
}
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
@@ -531,7 +531,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_amdgpu_gem_userptr *args = data;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct drm_gem_object *gobj;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
struct amdgpu_bo *bo;
uint32_t handle;
int r;
@@ -572,10 +572,14 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- r = amdgpu_ttm_tt_get_user_pages(bo, &range);
- if (r)
+ range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!range))
+ return -ENOMEM;
+ r = amdgpu_ttm_tt_get_user_pages(bo, range);
+ if (r) {
+ amdgpu_hmm_range_free(range);
goto release_object;
-
+ }
r = amdgpu_bo_reserve(bo, true);
if (r)
goto user_pages_done;
@@ -597,8 +601,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
user_pages_done:
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
-
+ amdgpu_hmm_range_free(range);
release_object:
drm_gem_object_put(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index ebe2b4c68b0f..8b118c53f351 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -33,6 +33,7 @@
#include "amdgpu_reset.h"
#include "amdgpu_xcp.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_mes.h"
#include "nvd.h"
/* delay 0.1 second to enable gfx off feature */
@@ -1194,6 +1195,75 @@ failed_kiq_write:
dev_err(adev->dev, "failed to write reg:%x\n", reg);
}
+int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
+ if (adev->enable_mes_kiq && adev->mes.ring[0].sched.ready)
+ return amdgpu_mes_hdp_flush(adev);
+
+ if (!ring->funcs->emit_hdp_flush) {
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ r = amdgpu_ring_alloc(ring, 32);
+ if (r)
+ goto failed_unlock;
+
+ amdgpu_ring_emit_hdp_flush(ring);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
+ goto failed_kiq_hdp_flush;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_hdp_flush;
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY) {
+ dev_err(adev->dev, "failed to flush HDP via KIQ timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+
+failed_undo:
+ amdgpu_ring_undo(ring);
+failed_unlock:
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+failed_kiq_hdp_flush:
+ dev_err(adev->dev, "failed to flush HDP via KIQ\n");
+ return r < 0 ? r : -EIO;
+}
+
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
{
if (amdgpu_num_kcq == -1) {
@@ -1600,7 +1670,6 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
ret = amdgpu_gfx_run_cleaner_shader(adev, value);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (ret)
@@ -2485,3 +2554,4 @@ void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
&amdgpu_debugfs_compute_sched_mask_fops);
#endif
}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index fb5f7a0ee029..efd61a1ccc66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -615,6 +615,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id);
void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id);
+int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev);
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 9dcf51991b5b..869bceb0fe2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -597,6 +597,9 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
/* reserve engine 5 for firmware */
if (adev->enable_mes)
vm_inv_engs[i] &= ~(1 << 5);
+ /* reserve engine 6 for uni mes */
+ if (adev->enable_uni_mes)
+ vm_inv_engs[i] &= ~(1 << 6);
/* reserve mmhub engine 3 for firmware */
if (adev->enable_umsch_mm)
vm_inv_engs[i] &= ~(1 << 3);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 55097ca10738..727342689d4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -86,6 +86,11 @@ enum amdgpu_memory_partition {
#define AMDGPU_MAX_MEM_RANGES 8
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY 0x80
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_READ 0x40
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE 0x20
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_EXE 0x10
+
/*
* GMC page fault information
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 0760e70402ec..895c1e4c6747 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -284,6 +284,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
ttm_resource_manager_init(man, &adev->mman.bdev, gtt_size);
start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
+ start += amdgpu_vce_required_gart_pages(adev);
size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
index 6e02fb9ac2f6..5a60d69a3e1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
@@ -66,3 +66,19 @@ void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
0);
}
}
+
+void amdgpu_hdp_invalidate(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (adev->asic_funcs && adev->asic_funcs->invalidate_hdp)
+ adev->asic_funcs->invalidate_hdp(adev, ring);
+ else if (adev->hdp.funcs && adev->hdp.funcs->invalidate_hdp)
+ adev->hdp.funcs->invalidate_hdp(adev, ring);
+}
+
+void amdgpu_hdp_flush(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (adev->asic_funcs && adev->asic_funcs->flush_hdp)
+ adev->asic_funcs->flush_hdp(adev, ring);
+ else if (adev->hdp.funcs && adev->hdp.funcs->flush_hdp)
+ adev->hdp.funcs->flush_hdp(adev, ring);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
index 4cfd932b7e91..d9f488fa76b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -46,4 +46,8 @@ struct amdgpu_hdp {
int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev);
void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
+void amdgpu_hdp_invalidate(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+void amdgpu_hdp_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index 2c6a6b858112..90d26d820bac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -168,17 +168,13 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
void *owner,
- struct hmm_range **phmm_range)
+ struct amdgpu_hmm_range *range)
{
- struct hmm_range *hmm_range;
unsigned long end;
unsigned long timeout;
unsigned long *pfns;
int r = 0;
-
- hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
- if (unlikely(!hmm_range))
- return -ENOMEM;
+ struct hmm_range *hmm_range = &range->hmm_range;
pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
if (unlikely(!pfns)) {
@@ -221,28 +217,77 @@ retry:
hmm_range->start = start;
hmm_range->hmm_pfns = pfns;
- *phmm_range = hmm_range;
-
return 0;
out_free_pfns:
kvfree(pfns);
+ hmm_range->hmm_pfns = NULL;
out_free_range:
- kfree(hmm_range);
-
if (r == -EBUSY)
r = -EAGAIN;
return r;
}
-bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
+/**
+ * amdgpu_hmm_range_valid - check if an HMM range is still valid
+ * @range: pointer to the &struct amdgpu_hmm_range to validate
+ *
+ * Determines whether the given HMM range @range is still valid by
+ * checking for invalidations via the MMU notifier sequence. This is
+ * typically used to verify that the range has not been invalidated
+ * by concurrent address space updates before it is accessed.
+ *
+ * Return:
+ * * true if @range is valid and can be used safely
+ * * false if @range is NULL or has been invalidated
+ */
+bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
{
- bool r;
+ if (!range)
+ return false;
- r = mmu_interval_read_retry(hmm_range->notifier,
- hmm_range->notifier_seq);
- kvfree(hmm_range->hmm_pfns);
- kfree(hmm_range);
+ return !mmu_interval_read_retry(range->hmm_range.notifier,
+ range->hmm_range.notifier_seq);
+}
- return r;
+/**
+ * amdgpu_hmm_range_alloc - allocate and initialize an AMDGPU HMM range
+ * @bo: optional buffer object to associate with this HMM range
+ *
+ * Allocates memory for amdgpu_hmm_range and associates it with the @bo passed.
+ * The reference count of the @bo is incremented.
+ *
+ * Return:
+ * Pointer to a newly allocated struct amdgpu_hmm_range on success,
+ * or NULL if memory allocation fails.
+ */
+struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo)
+{
+ struct amdgpu_hmm_range *range;
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return NULL;
+
+ range->bo = amdgpu_bo_ref(bo);
+ return range;
+}
+
+/**
+ * amdgpu_hmm_range_free - release an AMDGPU HMM range
+ * @range: pointer to the range object to free
+ *
+ * Releases all resources held by @range, including the associated
+ * hmm_pfns and the dropping reference of associated bo if any.
+ *
+ * Return: void
+ */
+void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range)
+{
+ if (!range)
+ return;
+
+ kvfree(range->hmm_range.hmm_pfns);
+ amdgpu_bo_unref(&range->bo);
+ kfree(range);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
index 953e1d06de20..140bc9cd57b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
@@ -31,13 +31,20 @@
#include <linux/interval_tree.h>
#include <linux/mmu_notifier.h>
+struct amdgpu_hmm_range {
+ struct hmm_range hmm_range;
+ struct amdgpu_bo *bo;
+};
+
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
void *owner,
- struct hmm_range **phmm_range);
-bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
+ struct amdgpu_hmm_range *range);
#if defined(CONFIG_HMM_MIRROR)
+bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range);
+struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo);
+void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range);
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_hmm_unregister(struct amdgpu_bo *bo);
#else
@@ -47,7 +54,20 @@ static inline int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
"add CONFIG_ZONE_DEVICE=y in config file to fix this\n");
return -ENODEV;
}
+
static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {}
+
+static inline bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
+{
+ return false;
+}
+
+static inline struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo)
+{
+ return NULL;
+}
+
+static inline void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) {}
#endif
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 7d9bcb72e8dd..586a58facca1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -149,17 +149,19 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
if (job) {
vm = job->vm;
fence_ctx = job->base.s_fence ?
- job->base.s_fence->scheduled.context : 0;
+ job->base.s_fence->finished.context : 0;
shadow_va = job->shadow_va;
csa_va = job->csa_va;
gds_va = job->gds_va;
init_shadow = job->init_shadow;
- af = &job->hw_fence;
+ af = job->hw_fence;
/* Save the context of the job for reset handling.
* The driver needs this so it can skip the ring
* contents for guilty contexts.
*/
- af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
+ af->context = fence_ctx;
+ /* the vm fence is also part of the job's context */
+ job->hw_vm_fence->context = fence_ctx;
} else {
vm = NULL;
fence_ctx = 0;
@@ -167,23 +169,28 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
csa_va = 0;
gds_va = 0;
init_shadow = false;
- af = NULL;
+ af = kzalloc(sizeof(*af), GFP_ATOMIC);
+ if (!af)
+ return -ENOMEM;
}
if (!ring->sched.ready) {
dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
- return -EINVAL;
+ r = -EINVAL;
+ goto free_fence;
}
if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto free_fence;
}
if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
(!ring->funcs->secure_submission_supported)) {
dev_err(adev->dev, "secure submissions not supported on ring <%s>\n", ring->name);
- return -EINVAL;
+ r = -EINVAL;
+ goto free_fence;
}
alloc_size = ring->funcs->emit_frame_size + num_ibs *
@@ -192,7 +199,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
r = amdgpu_ring_alloc(ring, alloc_size);
if (r) {
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
- return r;
+ goto free_fence;
}
need_ctx_switch = ring->current_ctx != fence_ctx;
@@ -289,7 +296,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
}
- r = amdgpu_fence_emit(ring, f, af, fence_flags);
+ r = amdgpu_fence_emit(ring, af, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vmid)
@@ -297,6 +304,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_undo(ring);
return r;
}
+ *f = &af->base;
+ /* get a ref for the job */
+ if (job)
+ dma_fence_get(*f);
if (ring->funcs->insert_end)
ring->funcs->insert_end(ring);
@@ -317,12 +328,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
* fence so we know what rings contents to backup
* after we reset the queue.
*/
- amdgpu_fence_save_wptr(*f);
+ amdgpu_fence_save_wptr(af);
amdgpu_ring_ib_end(ring);
amdgpu_ring_commit(ring);
return 0;
+
+free_fence:
+ if (!job)
+ kfree(af);
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 3ef5bc95642c..9cab36322c16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -201,58 +201,34 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct dma_fence **fences;
- unsigned i;
+ /* If anybody is waiting for a VMID let everybody wait for fairness */
if (!dma_fence_is_signaled(ring->vmid_wait)) {
*fence = dma_fence_get(ring->vmid_wait);
return 0;
}
- fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_NOWAIT);
- if (!fences)
- return -ENOMEM;
-
/* Check if we have an idle VMID */
- i = 0;
- list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
+ list_for_each_entry_reverse((*idle), &id_mgr->ids_lru, list) {
/* Don't use per engine and per process VMID at the same time */
struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
NULL : ring;
- fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
- if (!fences[i])
- break;
- ++i;
+ *fence = amdgpu_sync_peek_fence(&(*idle)->active, r);
+ if (!(*fence))
+ return 0;
}
- /* If we can't find a idle VMID to use, wait till one becomes available */
- if (&(*idle)->list == &id_mgr->ids_lru) {
- u64 fence_context = adev->vm_manager.fence_context + ring->idx;
- unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
- struct dma_fence_array *array;
- unsigned j;
-
- *idle = NULL;
- for (j = 0; j < i; ++j)
- dma_fence_get(fences[j]);
-
- array = dma_fence_array_create(i, fences, fence_context,
- seqno, true);
- if (!array) {
- for (j = 0; j < i; ++j)
- dma_fence_put(fences[j]);
- kfree(fences);
- return -ENOMEM;
- }
-
- *fence = dma_fence_get(&array->base);
- dma_fence_put(ring->vmid_wait);
- ring->vmid_wait = &array->base;
- return 0;
- }
- kfree(fences);
+ /*
+ * If we can't find a idle VMID to use, wait on a fence from the least
+ * recently used in the hope that it will be available soon.
+ */
+ *idle = NULL;
+ dma_fence_put(ring->vmid_wait);
+ ring->vmid_wait = dma_fence_get(*fence);
+ /* This is the reference we return */
+ dma_fence_get(*fence);
return 0;
}
@@ -313,7 +289,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
* user of the VMID.
*/
r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished,
- GFP_NOWAIT);
+ GFP_ATOMIC);
if (r)
return r;
@@ -373,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
*/
r = amdgpu_sync_fence(&(*id)->active,
&job->base.s_fence->finished,
- GFP_NOWAIT);
+ GFP_ATOMIC);
if (r)
return r;
@@ -426,7 +402,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Remember this submission as user of the VMID */
r = amdgpu_sync_fence(&id->active,
&job->base.s_fence->finished,
- GFP_NOWAIT);
+ GFP_ATOMIC);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 9cddbf50442a..37270c4dab8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -280,6 +280,8 @@ int isp_kernel_buffer_alloc(struct device *dev, u64 size,
if (ret)
return ret;
+ /* Ensure *bo is NULL so a new BO will be created */
+ *bo = NULL;
ret = amdgpu_bo_create_kernel(adev,
size,
ISP_MC_ADDR_ALIGN,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index d020a890a0ea..0a0dcbf0798d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -130,14 +130,12 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
/* attempt a per ring reset */
- if (unlikely(adev->debug_disable_gpu_ring_reset)) {
- dev_err(adev->dev, "Ring reset disabled by debug mask\n");
- } else if (amdgpu_gpu_recovery &&
- amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) &&
- ring->funcs->reset) {
+ if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) &&
+ ring->funcs->reset) {
dev_err(adev->dev, "Starting %s ring reset\n",
s_job->sched->name);
- r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence);
+ r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence);
if (!r) {
atomic_inc(&ring->adev->gpu_reset_counter);
dev_err(adev->dev, "Ring %s reset succeeded\n",
@@ -186,6 +184,9 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned int num_ibs, struct amdgpu_job **job,
u64 drm_client_id)
{
+ struct amdgpu_fence *af;
+ int r;
+
if (num_ibs == 0)
return -EINVAL;
@@ -193,6 +194,20 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!*job)
return -ENOMEM;
+ af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+ if (!af) {
+ r = -ENOMEM;
+ goto err_job;
+ }
+ (*job)->hw_fence = af;
+
+ af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+ if (!af) {
+ r = -ENOMEM;
+ goto err_fence;
+ }
+ (*job)->hw_vm_fence = af;
+
(*job)->vm = vm;
amdgpu_sync_create(&(*job)->explicit_sync);
@@ -204,6 +219,14 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return drm_sched_job_init(&(*job)->base, entity, 1, owner,
drm_client_id);
+
+err_fence:
+ kfree((*job)->hw_fence);
+err_job:
+ kfree(*job);
+ *job = NULL;
+
+ return r;
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
@@ -223,7 +246,10 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
if (r) {
if (entity)
drm_sched_job_cleanup(&(*job)->base);
+ kfree((*job)->hw_vm_fence);
+ kfree((*job)->hw_fence);
kfree(*job);
+ *job = NULL;
}
return r;
@@ -251,11 +277,11 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
struct dma_fence *f;
unsigned i;
- /* Check if any fences where initialized */
+ /* Check if any fences were initialized */
if (job->base.s_fence && job->base.s_fence->finished.ops)
f = &job->base.s_fence->finished;
- else if (job->hw_fence.base.ops)
- f = &job->hw_fence.base;
+ else if (job->hw_fence && job->hw_fence->base.ops)
+ f = &job->hw_fence->base;
else
f = NULL;
@@ -271,11 +297,16 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
amdgpu_sync_free(&job->explicit_sync);
- /* only put the hw fence if has embedded fence */
- if (!job->hw_fence.base.ops)
- kfree(job);
+ if (job->hw_fence->base.ops)
+ dma_fence_put(&job->hw_fence->base);
+ else
+ kfree(job->hw_fence);
+ if (job->hw_vm_fence->base.ops)
+ dma_fence_put(&job->hw_vm_fence->base);
else
- dma_fence_put(&job->hw_fence.base);
+ kfree(job->hw_vm_fence);
+
+ kfree(job);
}
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
@@ -304,10 +335,16 @@ void amdgpu_job_free(struct amdgpu_job *job)
if (job->gang_submit != &job->base.s_fence->scheduled)
dma_fence_put(job->gang_submit);
- if (!job->hw_fence.base.ops)
- kfree(job);
+ if (job->hw_fence->base.ops)
+ dma_fence_put(&job->hw_fence->base);
+ else
+ kfree(job->hw_fence);
+ if (job->hw_vm_fence->base.ops)
+ dma_fence_put(&job->hw_vm_fence->base);
else
- dma_fence_put(&job->hw_fence.base);
+ kfree(job->hw_vm_fence);
+
+ kfree(job);
}
struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 4a6487eb6cb5..7abf069d17d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -64,7 +64,8 @@ struct amdgpu_job {
struct drm_sched_job base;
struct amdgpu_vm *vm;
struct amdgpu_sync explicit_sync;
- struct amdgpu_fence hw_fence;
+ struct amdgpu_fence *hw_fence;
+ struct amdgpu_fence *hw_vm_fence;
struct dma_fence *gang_submit;
uint32_t preamble_status;
uint32_t preemption_status;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 6b7d66b6d4cc..63ee6ba6a931 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -371,7 +371,7 @@ static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
- if (val & (BIT_ULL(1) << ((i * adev->jpeg.num_jpeg_rings) + j)))
+ if (val & (BIT_ULL((i * adev->jpeg.num_jpeg_rings) + j)))
ring->sched.ready = true;
else
ring->sched.ready = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index a9327472c651..6ee77f431d56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -758,7 +758,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
- ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
+ ui64 = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
+ ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) : 0;
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE:
ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
@@ -804,8 +805,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
atomic64_read(&adev->vram_pin_size) -
AMDGPU_VM_RESERVED_VRAM;
- mem.vram.heap_usage =
- ttm_resource_manager_usage(vram_man);
+ mem.vram.heap_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
+ ttm_resource_manager_usage(vram_man) : 0;
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
@@ -1470,7 +1471,6 @@ error_pasid:
kfree(fpriv);
out_suspend:
- pm_runtime_mark_last_busy(dev->dev);
pm_put:
pm_runtime_put_autosuspend(dev->dev);
@@ -1538,7 +1538,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
kfree(fpriv);
file_priv->driver_priv = NULL;
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 5bf9be073cdd..9c182ce501af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -105,8 +105,8 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
spin_lock_init(&adev->mes.ring_lock[i]);
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
- adev->mes.vmid_mask_mmhub = 0xffffff00;
- adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00;
+ adev->mes.vmid_mask_mmhub = 0xFF00;
+ adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xFFFE : 0xFF00;
num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me;
if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES)
@@ -409,7 +409,7 @@ int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
return -EINVAL;
/* Clear the doorbell array before detection */
- memset(adev->mes.hung_queue_db_array_cpu_addr, 0,
+ memset(adev->mes.hung_queue_db_array_cpu_addr, AMDGPU_MES_INVALID_DB_OFFSET,
adev->mes.hung_queue_db_array_size * sizeof(u32));
input.queue_type = queue_type;
input.detect_only = detect_only;
@@ -420,12 +420,17 @@ int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
dev_err(adev->dev, "failed to detect and reset\n");
} else {
*hung_db_num = 0;
- for (i = 0; i < adev->mes.hung_queue_db_array_size; i++) {
+ for (i = 0; i < adev->mes.hung_queue_hqd_info_offset; i++) {
if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) {
hung_db_array[i] = db_array[i];
*hung_db_num += 1;
}
}
+
+ /*
+ * TODO: return HQD info for MES scheduled user compute queue reset cases
+ * stored in hung_db_array hqd info offset to full array size
+ */
}
return r;
@@ -523,6 +528,18 @@ error:
return r;
}
+int amdgpu_mes_hdp_flush(struct amdgpu_device *adev)
+{
+ uint32_t hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask;
+
+ hdp_flush_req_offset = adev->nbio.funcs->get_hdp_flush_req_offset(adev);
+ hdp_flush_done_offset = adev->nbio.funcs->get_hdp_flush_done_offset(adev);
+ ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0;
+
+ return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, hdp_flush_done_offset,
+ ref_and_mask, ref_and_mask);
+}
+
int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr,
uint32_t spi_gdbg_per_vmid_cntl,
@@ -686,14 +703,11 @@ out:
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
{
uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
- bool is_supported = false;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
- mes_rev >= 0x63)
- is_supported = true;
- return is_supported;
+ return ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
+ mes_rev >= 0x63) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0));
}
/* Fix me -- node_id is used to identify the correct MES instances in the future */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 6b506fc72f58..e989225b354b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -149,6 +149,7 @@ struct amdgpu_mes {
void *resource_1_addr[AMDGPU_MAX_MES_PIPES];
int hung_queue_db_array_size;
+ int hung_queue_hqd_info_offset;
struct amdgpu_bo *hung_queue_db_array_gpu_obj;
uint64_t hung_queue_db_array_gpu_addr;
void *hung_queue_db_array_cpu_addr;
@@ -238,6 +239,7 @@ struct mes_add_queue_input {
struct mes_remove_queue_input {
uint32_t doorbell_offset;
uint64_t gang_context_addr;
+ bool remove_queue_after_reset;
};
struct mes_map_legacy_queue_input {
@@ -427,6 +429,7 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev,
int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
+int amdgpu_mes_hdp_flush(struct amdgpu_device *adev);
int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr,
uint32_t spi_gdbg_per_vmid_cntl,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 20460cfd09bc..dc8d2f52c7d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -326,6 +326,8 @@ struct amdgpu_mode_info {
struct drm_property *audio_property;
/* FMT dithering */
struct drm_property *dither_property;
+ /* Adaptive Backlight Modulation (power feature) */
+ struct drm_property *abm_level_property;
/* hardcoded DFP edid from BIOS */
const struct drm_edid *bios_hardcoded_edid;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 656b8a931dae..52c2d1731aab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -96,6 +96,7 @@ struct amdgpu_bo_va {
* if non-zero, cannot unmap from GPU because user queues may still access it
*/
unsigned int queue_refcount;
+ atomic_t userq_va_mapped;
};
struct amdgpu_bo {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 8c0e5d03de50..0b10497d487c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1539,6 +1539,7 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
uint64_t dst_node_id = node_info.node_id;
uint8_t dst_num_hops = node_info.num_hops;
+ uint8_t dst_is_sharing_enabled = node_info.is_sharing_enabled;
uint8_t dst_num_links = node_info.num_links;
hive = amdgpu_get_xgmi_hive(psp->adev);
@@ -1558,13 +1559,20 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
continue;
mirror_top_info->nodes[j].num_hops = dst_num_hops;
- /*
- * prevent 0 num_links value re-reflection since reflection
+ mirror_top_info->nodes[j].is_sharing_enabled = dst_is_sharing_enabled;
+ /* prevent 0 num_links value re-reflection since reflection
* criteria is based on num_hops (direct or indirect).
- *
*/
- if (dst_num_links)
+ if (dst_num_links) {
mirror_top_info->nodes[j].num_links = dst_num_links;
+ /* swap src and dst due to frame of reference */
+ for (int k = 0; k < dst_num_links; k++) {
+ mirror_top_info->nodes[j].port_num[k].src_xgmi_port_num =
+ node_info.port_num[k].dst_xgmi_port_num;
+ mirror_top_info->nodes[j].port_num[k].dst_xgmi_port_num =
+ node_info.port_num[k].src_xgmi_port_num;
+ }
+ }
break;
}
@@ -1639,9 +1647,10 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
IP_VERSION(13, 0, 6) ||
amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
- IP_VERSION(13, 0, 14);
- bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
- psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
+ IP_VERSION(13, 0, 14) ||
+ amdgpu_sriov_vf(psp->adev);
+ bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG ||
+ amdgpu_sriov_xgmi_ta_ext_peer_link_en(psp->adev);
/* popluate the shared output buffer rather than the cmd input buffer
* with node_ids as the input for GET_PEER_LINKS command execution.
@@ -2355,8 +2364,11 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (!ret && !psp->securedisplay_context.context.resp_status) {
psp->securedisplay_context.context.initialized = true;
mutex_init(&psp->securedisplay_context.mutex);
- } else
+ } else {
+ /* don't try again */
+ psp->securedisplay_context.context.bin_desc.size_bytes = 0;
return ret;
+ }
mutex_lock(&psp->securedisplay_context.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
index 123bcf5c2bb1..bacf888735db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
@@ -101,7 +101,6 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
}
amdgpu_gfx_off_ctrl(adev, true);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index e0ee21150860..2a6cf7963dde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -41,6 +41,7 @@
#include "atom.h"
#include "amdgpu_reset.h"
#include "amdgpu_psp.h"
+#include "amdgpu_ras_mgr.h"
#ifdef CONFIG_X86_MCE_AMD
#include <asm/mce.h>
@@ -149,6 +150,8 @@ static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev);
#ifdef CONFIG_X86_MCE_AMD
static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
+static void
+amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev);
struct mce_notifier_adev_list {
struct amdgpu_device *devs[MAX_GPU_INSTANCE];
int num_gpu;
@@ -611,6 +614,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
return size;
}
+static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev);
+
/**
* DOC: AMDGPU RAS debugfs EEPROM table reset interface
*
@@ -635,6 +640,11 @@ static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
(struct amdgpu_device *)file_inode(f)->i_private;
int ret;
+ if (amdgpu_uniras_enabled(adev)) {
+ ret = amdgpu_uniras_clear_badpages_info(adev);
+ return ret ? ret : size;
+ }
+
ret = amdgpu_ras_eeprom_reset_table(
&(amdgpu_ras_get_context(adev)->eeprom_control));
@@ -1542,9 +1552,51 @@ out_fini_err_data:
return ret;
}
+static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev)
+{
+ struct ras_cmd_dev_handle req = {0};
+ int ret;
+
+ ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__CLEAR_BAD_PAGE_INFO,
+ &req, sizeof(req), NULL, 0);
+ if (ret) {
+ dev_err(adev->dev, "Failed to clear bad pages info, ret: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int amdgpu_uniras_query_block_ecc(struct amdgpu_device *adev,
+ struct ras_query_if *info)
+{
+ struct ras_cmd_block_ecc_info_req req = {0};
+ struct ras_cmd_block_ecc_info_rsp rsp = {0};
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+
+ req.block_id = info->head.block;
+ req.subblock_id = info->head.sub_block_index;
+
+ ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BLOCK_ECC_STATUS,
+ &req, sizeof(req), &rsp, sizeof(rsp));
+ if (!ret) {
+ info->ce_count = rsp.ce_count;
+ info->ue_count = rsp.ue_count;
+ info->de_count = rsp.de_count;
+ }
+
+ return ret;
+}
+
int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
{
- return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_uniras_query_block_ecc(adev, info);
+ else
+ return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
}
int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
@@ -1596,6 +1648,27 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_uniras_error_inject(struct amdgpu_device *adev,
+ struct ras_inject_if *info)
+{
+ struct ras_cmd_inject_error_req inject_req;
+ struct ras_cmd_inject_error_rsp rsp;
+
+ if (!info)
+ return -EINVAL;
+
+ memset(&inject_req, 0, sizeof(inject_req));
+ inject_req.block_id = info->head.block;
+ inject_req.subblock_id = info->head.sub_block_index;
+ inject_req.address = info->address;
+ inject_req.error_type = info->head.type;
+ inject_req.instance_mask = info->instance_mask;
+ inject_req.method = info->value;
+
+ return amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__INJECT_ERROR,
+ &inject_req, sizeof(inject_req), &rsp, sizeof(rsp));
+}
+
/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info)
@@ -1613,6 +1686,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
info->head.block,
info->head.sub_block_index);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_uniras_error_inject(adev, info);
+
/* inject on guest isn't allowed, return success directly */
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1757,7 +1833,9 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
/* sysfs begin */
static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
- struct ras_badpage **bps, unsigned int *count);
+ struct ras_badpage *bps, uint32_t count, uint32_t start);
+static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
+ struct ras_badpage *bps, uint32_t count, uint32_t start);
static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
{
@@ -1815,19 +1893,50 @@ static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
unsigned int end = div64_ul(ppos + count - 1, element_size);
ssize_t s = 0;
struct ras_badpage *bps = NULL;
- unsigned int bps_count = 0;
+ int bps_count = 0, i, status;
+ uint64_t address;
memset(buf, 0, count);
- if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
+ bps_count = end - start;
+ bps = kmalloc_array(bps_count, sizeof(*bps), GFP_KERNEL);
+ if (!bps)
+ return 0;
+
+ memset(bps, 0, sizeof(*bps) * bps_count);
+
+ if (amdgpu_uniras_enabled(adev))
+ bps_count = amdgpu_uniras_badpages_read(adev, bps, bps_count, start);
+ else
+ bps_count = amdgpu_ras_badpages_read(adev, bps, bps_count, start);
+
+ if (bps_count <= 0) {
+ kfree(bps);
return 0;
+ }
+
+ for (i = 0; i < bps_count; i++) {
+ address = ((uint64_t)bps[i].bp) << AMDGPU_GPU_PAGE_SHIFT;
+ if (amdgpu_ras_check_critical_address(adev, address))
+ continue;
+
+ bps[i].size = AMDGPU_GPU_PAGE_SIZE;
+
+ status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
+ address);
+ if (status == -EBUSY)
+ bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
+ else if (status == -ENOENT)
+ bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
+ else
+ bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED;
- for (; start < end && start < bps_count; start++)
s += scnprintf(&buf[s], element_size + 1,
"0x%08x : 0x%08x : %1s\n",
- bps[start].bp,
- bps[start].size,
- amdgpu_ras_badpage_flags_str(bps[start].flags));
+ bps[i].bp,
+ bps[i].size,
+ amdgpu_ras_badpage_flags_str(bps[i].flags));
+ }
kfree(bps);
@@ -1843,12 +1952,42 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
}
+static bool amdgpu_ras_get_version_info(struct amdgpu_device *adev, u32 *major,
+ u32 *minor, u32 *rev)
+{
+ int i;
+
+ if (!adev || !major || !minor || !rev || !amdgpu_uniras_enabled(adev))
+ return false;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_RAS) {
+ *major = adev->ip_blocks[i].version->major;
+ *minor = adev->ip_blocks[i].version->minor;
+ *rev = adev->ip_blocks[i].version->rev;
+ return true;
+ }
+ }
+
+ return false;
+}
+
static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct amdgpu_ras *con =
container_of(attr, struct amdgpu_ras, version_attr);
- return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
+ u32 major, minor, rev;
+ ssize_t size = 0;
+
+ size += sysfs_emit_at(buf, size, "table version: 0x%x\n",
+ con->eeprom_control.tbl_hdr.version);
+
+ if (amdgpu_ras_get_version_info(con->adev, &major, &minor, &rev))
+ size += sysfs_emit_at(buf, size, "ras version: %u.%u.%u\n",
+ major, minor, rev);
+
+ return size;
}
static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
@@ -2241,6 +2380,11 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
return;
+ if (amdgpu_uniras_enabled(adev)) {
+ amdgpu_ras_mgr_handle_fatal_interrupt(adev, NULL);
+ return;
+ }
+
if (adev->nbio.ras &&
adev->nbio.ras->handle_ras_controller_intr_no_bifring)
adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
@@ -2411,6 +2555,16 @@ int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
struct ras_manager *obj;
struct ras_ih_data *data;
+ if (amdgpu_uniras_enabled(adev)) {
+ struct ras_ih_info ih_info;
+
+ memset(&ih_info, 0, sizeof(ih_info));
+ ih_info.block = info->head.block;
+ memcpy(&ih_info.iv_entry, info->entry, sizeof(struct amdgpu_iv_entry));
+
+ return amdgpu_ras_mgr_handle_controller_interrupt(adev, &ih_info);
+ }
+
obj = amdgpu_ras_find_obj(adev, &info->head);
if (!obj)
return -EINVAL;
@@ -2605,62 +2759,83 @@ static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
}
}
-/* recovery begin */
-
-/* return 0 on success.
- * caller need free bps.
- */
static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
- struct ras_badpage **bps, unsigned int *count)
+ struct ras_badpage *bps, uint32_t count, uint32_t start)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
- int i = 0;
- int ret = 0, status;
+ int r = 0;
+ uint32_t i;
if (!con || !con->eh_data || !bps || !count)
return -EINVAL;
mutex_lock(&con->recovery_lock);
data = con->eh_data;
- if (!data || data->count == 0) {
- *bps = NULL;
- ret = -EINVAL;
- goto out;
+ if (start < data->count) {
+ for (i = start; i < data->count; i++) {
+ if (!data->bps[i].ts)
+ continue;
+
+ bps[r].bp = data->bps[i].retired_page;
+ r++;
+ if (r >= count)
+ break;
+ }
}
+ mutex_unlock(&con->recovery_lock);
- *bps = kmalloc_array(data->count, sizeof(struct ras_badpage), GFP_KERNEL);
- if (!*bps) {
- ret = -ENOMEM;
- goto out;
- }
+ return r;
+}
- for (; i < data->count; i++) {
- if (!data->bps[i].ts)
- continue;
+static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
+ struct ras_badpage *bps, uint32_t count, uint32_t start)
+{
+ struct ras_cmd_bad_pages_info_req cmd_input;
+ struct ras_cmd_bad_pages_info_rsp *output;
+ uint32_t group, start_group, end_group;
+ uint32_t pos, pos_in_group;
+ int r = 0, i;
- (*bps)[i] = (struct ras_badpage){
- .bp = data->bps[i].retired_page,
- .size = AMDGPU_GPU_PAGE_SIZE,
- .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
- };
+ if (!bps || !count)
+ return -EINVAL;
- if (amdgpu_ras_check_critical_address(adev,
- data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
- continue;
+ output = kmalloc(sizeof(*output), GFP_KERNEL);
+ if (!output)
+ return -ENOMEM;
- status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
- data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
- if (status == -EBUSY)
- (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
- else if (status == -ENOENT)
- (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
+ memset(&cmd_input, 0, sizeof(cmd_input));
+
+ start_group = start / RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ end_group = (start + count + RAS_CMD_MAX_BAD_PAGES_PER_GROUP - 1) /
+ RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+
+ pos = start;
+ for (group = start_group; group < end_group; group++) {
+ memset(output, 0, sizeof(*output));
+ cmd_input.group_index = group;
+ if (amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BAD_PAGES,
+ &cmd_input, sizeof(cmd_input), output, sizeof(*output)))
+ goto out;
+
+ if (pos >= output->bp_total_cnt)
+ goto out;
+
+ pos_in_group = pos - group * RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ for (i = pos_in_group; i < output->bp_in_group; i++, pos++) {
+ if (!output->records[i].ts)
+ continue;
+
+ bps[r].bp = output->records[i].retired_page;
+ r++;
+ if (r >= count)
+ goto out;
+ }
}
- *count = con->bad_page_num;
out:
- mutex_unlock(&con->recovery_lock);
- return ret;
+ kfree(output);
+ return r;
}
static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
@@ -2748,8 +2923,12 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
type = amdgpu_ras_get_fatal_error_event(adev);
list_for_each_entry(remote_adev,
device_list_handle, gmc.xgmi.head) {
- amdgpu_ras_query_err_status(remote_adev);
- amdgpu_ras_log_on_err_counter(remote_adev, type);
+ if (amdgpu_uniras_enabled(remote_adev)) {
+ amdgpu_ras_mgr_update_ras_ecc(remote_adev);
+ } else {
+ amdgpu_ras_query_err_status(remote_adev);
+ amdgpu_ras_log_on_err_counter(remote_adev, type);
+ }
}
}
@@ -2837,8 +3016,13 @@ static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
addr_in.ma.err_addr = bps->address;
addr_in.ma.socket_id = socket;
addr_in.ma.ch_inst = bps->mem_channel;
- /* tell RAS TA the node instance is not used */
- addr_in.ma.node_inst = TA_RAS_INV_NODE;
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ /* tell RAS TA the node instance is not used */
+ addr_in.ma.node_inst = TA_RAS_INV_NODE;
+ } else {
+ addr_in.ma.umc_inst = bps->mcumc_id;
+ addr_in.ma.node_inst = bps->cu;
+ }
if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
@@ -2981,8 +3165,16 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
int i = 0;
enum amdgpu_memory_partition save_nps;
- save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
- bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
+ bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+ } else {
+ /* if pmfw manages eeprom, save_nps is not stored on eeprom,
+ * we should always convert mca address into physical address,
+ * make save_nps different from nps
+ */
+ save_nps = nps + 1;
+ }
if (save_nps == nps) {
if (amdgpu_umc_pages_in_a_row(adev, err_data,
@@ -3048,7 +3240,8 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
if (from_rom) {
/* there is no pa recs in V3, so skip pa recs processing */
- if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
+ !amdgpu_ras_smu_eeprom_supported(adev)) {
for (i = 0; i < pages; i++) {
if (control->ras_num_recs - i >= adev->umc.retire_unit) {
if ((bps[i].address == bps[i + 1].address) &&
@@ -3118,7 +3311,13 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
- unit_num = data->count / adev->umc.retire_unit - control->ras_num_recs;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ unit_num = control->ras_num_recs -
+ control->ras_num_recs_old;
+ else
+ unit_num = data->count / adev->umc.retire_unit -
+ control->ras_num_recs;
+
save_count = con->bad_page_num - control->ras_num_bad_pages;
mutex_unlock(&con->recovery_lock);
@@ -3126,7 +3325,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
*new_cnt = unit_num;
/* only new entries are saved */
- if (unit_num > 0) {
+ if (unit_num && save_count) {
/*old asics only save pa to eeprom like before*/
if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
if (amdgpu_ras_eeprom_append(control,
@@ -3179,7 +3378,8 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
/*In V3, there is no pa recs, and some cases(when address==0) may be parsed
as pa recs, so add verion check to avoid it.
*/
- if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
+ !amdgpu_ras_smu_eeprom_supported(adev)) {
for (i = 0; i < control->ras_num_recs; i++) {
if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
if ((bps[i].address == bps[i + 1].address) &&
@@ -3590,7 +3790,12 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
if (!con || amdgpu_sriov_vf(adev))
return 0;
+ if (amdgpu_uniras_enabled(adev))
+ return 0;
+
control = &con->eeprom_control;
+ con->ras_smu_drv = amdgpu_dpm_get_ras_smu_driver(adev);
+
ret = amdgpu_ras_eeprom_init(control);
control->is_eeprom_valid = !ret;
@@ -3751,7 +3956,9 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
mutex_unlock(&con->recovery_lock);
amdgpu_ras_critical_region_init(adev);
-
+#ifdef CONFIG_X86_MCE_AMD
+ amdgpu_unregister_bad_pages_mca_notifier(adev);
+#endif
return 0;
}
/* recovery end */
@@ -3975,7 +4182,6 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
atomic_set(&con->ras_ue_count, ue_count);
}
- pm_runtime_mark_last_busy(dev->dev);
Out:
pm_runtime_put_autosuspend(dev->dev);
}
@@ -4584,6 +4790,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_
struct ras_event_state *event_state;
int ret = 0;
+ if (amdgpu_uniras_enabled(adev))
+ return 0;
+
if (type >= RAS_EVENT_TYPE_COUNT) {
ret = -EINVAL;
goto out;
@@ -4634,20 +4843,18 @@ u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type
return id;
}
-void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
+int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
{
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
- u64 event_id;
+ u64 event_id = RAS_EVENT_INVALID_ID;
- if (amdgpu_ras_mark_ras_event(adev, type)) {
- dev_err(adev->dev,
- "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n");
- return;
- }
+ if (amdgpu_uniras_enabled(adev))
+ return 0;
- event_id = amdgpu_ras_acquire_event_id(adev, type);
+ if (!amdgpu_ras_mark_ras_event(adev, type))
+ event_id = amdgpu_ras_acquire_event_id(adev, type);
RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
"(ERREVENT_ATHUB_INTERRUPT) detected!\n");
@@ -4656,6 +4863,8 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
amdgpu_ras_reset_gpu(adev);
}
+
+ return -EBUSY;
}
bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
@@ -4783,6 +4992,28 @@ static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
notifier_registered = true;
}
}
+static void amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ if (!notifier_registered && !mce_adev_list.num_gpu)
+ return;
+ for (i = 0, j = 0; i < mce_adev_list.num_gpu; i++) {
+ if (mce_adev_list.devs[i] == adev)
+ mce_adev_list.devs[i] = NULL;
+ if (!mce_adev_list.devs[i])
+ ++j;
+ }
+
+ if (j == mce_adev_list.num_gpu) {
+ mce_adev_list.num_gpu = 0;
+ /* Unregister x86 notifier with MCE subsystem. */
+ if (notifier_registered) {
+ mce_unregister_decode_chain(&amdgpu_bad_page_nb);
+ notifier_registered = false;
+ }
+ }
+}
#endif
struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
@@ -5408,6 +5639,9 @@ bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_ras_mgr_is_rma(adev);
+
if (!con)
return false;
@@ -5490,3 +5724,25 @@ bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr
return ret;
}
+
+void amdgpu_ras_pre_reset(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ if (amdgpu_uniras_enabled(tmp_adev))
+ amdgpu_ras_mgr_pre_reset(tmp_adev);
+ }
+}
+
+void amdgpu_ras_post_reset(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ if (amdgpu_uniras_enabled(tmp_adev))
+ amdgpu_ras_mgr_post_reset(tmp_adev);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 6cf0dfd38be8..ff44190d7d98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -503,7 +503,34 @@ struct ras_critical_region {
uint64_t size;
};
+struct ras_eeprom_table_version {
+ uint32_t minor : 16;
+ uint32_t major : 16;
+};
+
+struct ras_eeprom_smu_funcs {
+ int (*get_ras_table_version)(struct amdgpu_device *adev,
+ uint32_t *table_version);
+ int (*get_badpage_count)(struct amdgpu_device *adev, uint32_t *count, uint32_t timeout);
+ int (*get_badpage_mca_addr)(struct amdgpu_device *adev, uint16_t index, uint64_t *mca_addr);
+ int (*set_timestamp)(struct amdgpu_device *adev, uint64_t timestamp);
+ int (*get_timestamp)(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *timestamp);
+ int (*get_badpage_ipid)(struct amdgpu_device *adev, uint16_t index, uint64_t *ipid);
+ int (*erase_ras_table)(struct amdgpu_device *adev, uint32_t *result);
+};
+
+enum ras_smu_feature_flags {
+ RAS_SMU_FEATURE_BIT__RAS_EEPROM = BIT_ULL(0),
+};
+
+struct ras_smu_drv {
+ const struct ras_eeprom_smu_funcs *smu_eeprom_funcs;
+ void (*ras_smu_feature_flags)(struct amdgpu_device *adev, uint64_t *flags);
+};
+
struct amdgpu_ras {
+ void *ras_mgr;
/* ras infrastructure */
/* for ras itself. */
uint32_t features;
@@ -590,6 +617,10 @@ struct amdgpu_ras {
/* Protect poison injection */
struct mutex poison_lock;
+
+ /* Disable/Enable uniras switch */
+ bool uniras_enabled;
+ const struct ras_smu_drv *ras_smu_drv;
};
struct ras_fs_data {
@@ -909,7 +940,7 @@ static inline void amdgpu_ras_intr_cleared(void)
atomic_set(&amdgpu_ras_in_intr, 0);
}
-void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
+int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready);
@@ -1008,4 +1039,9 @@ void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
const char *fmt, ...);
bool amdgpu_ras_is_rma(struct amdgpu_device *adev);
+
+void amdgpu_ras_pre_reset(struct amdgpu_device *adev,
+ struct list_head *device_list);
+void amdgpu_ras_post_reset(struct amdgpu_device *adev,
+ struct list_head *device_list);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 3eb3fb55ccb0..64dd7a81bff5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -32,6 +32,7 @@
#include <linux/uaccess.h>
#include "amdgpu_reset.h"
+#include "amdgpu_ras_mgr.h"
/* These are memory addresses as would be seen by one or more EEPROM
* chips strung on the I2C bus, usually by manipulating pins 1-3 of a
@@ -123,6 +124,8 @@
RAS_TABLE_V2_1_INFO_SIZE) \
/ RAS_TABLE_RECORD_SIZE)
+#define RAS_SMU_MESSAGE_TIMEOUT_MS 1000 /* 1s */
+
/* Given a zero-based index of an EEPROM RAS record, yields the EEPROM
* offset off of RAS_TABLE_START. That is, this is something you can
* add to control->i2c_address, and then tell I2C layer to read
@@ -443,40 +446,57 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
struct amdgpu_ras_eeprom_table_ras_info *rai = &control->tbl_rai;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ u32 erase_res = 0;
u8 csum;
int res;
mutex_lock(&control->ras_tbl_mutex);
- hdr->header = RAS_TABLE_HDR_VAL;
- amdgpu_ras_set_eeprom_table_version(control);
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ hdr->header = RAS_TABLE_HDR_VAL;
+ amdgpu_ras_set_eeprom_table_version(control);
- if (hdr->version >= RAS_TABLE_VER_V2_1) {
- hdr->first_rec_offset = RAS_RECORD_START_V2_1;
- hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
- RAS_TABLE_V2_1_INFO_SIZE;
- rai->rma_status = GPU_HEALTH_USABLE;
- /**
- * GPU health represented as a percentage.
- * 0 means worst health, 100 means fully health.
- */
- rai->health_percent = 100;
- /* ecc_page_threshold = 0 means disable bad page retirement */
- rai->ecc_page_threshold = con->bad_page_cnt_threshold;
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ hdr->first_rec_offset = RAS_RECORD_START_V2_1;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE;
+ rai->rma_status = GPU_HEALTH_USABLE;
+
+ control->ras_record_offset = RAS_RECORD_START_V2_1;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
+ /**
+ * GPU health represented as a percentage.
+ * 0 means worst health, 100 means fully health.
+ */
+ rai->health_percent = 100;
+ /* ecc_page_threshold = 0 means disable bad page retirement */
+ rai->ecc_page_threshold = con->bad_page_cnt_threshold;
+ } else {
+ hdr->first_rec_offset = RAS_RECORD_START;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
+
+ control->ras_record_offset = RAS_RECORD_START;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ }
+
+ csum = __calc_hdr_byte_sum(control);
+ if (hdr->version >= RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
+ csum = -csum;
+ hdr->checksum = csum;
+ res = __write_table_header(control);
+ if (!res && hdr->version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
} else {
- hdr->first_rec_offset = RAS_RECORD_START;
- hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
+ res = amdgpu_ras_smu_erase_ras_table(adev, &erase_res);
+ if (res || erase_res) {
+ dev_warn(adev->dev, "RAS EEPROM reset failed, res:%d result:%d",
+ res, erase_res);
+ if (!res)
+ res = -EIO;
+ }
}
- csum = __calc_hdr_byte_sum(control);
- if (hdr->version >= RAS_TABLE_VER_V2_1)
- csum += __calc_ras_info_byte_sum(control);
- csum = -csum;
- hdr->checksum = csum;
- res = __write_table_header(control);
- if (!res && hdr->version > RAS_TABLE_VER_V1)
- res = __write_table_ras_info(control);
-
control->ras_num_recs = 0;
control->ras_num_bad_pages = 0;
control->ras_num_mca_recs = 0;
@@ -556,6 +576,9 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_ras_mgr_check_eeprom_safety_watermark(adev);
+
if (!__is_ras_eeprom_supported(adev) ||
!amdgpu_bad_page_threshold)
return false;
@@ -766,7 +789,8 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
"Saved bad pages %d reaches threshold value %d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
- if (adev->cper.enabled && amdgpu_cper_generate_bp_threshold_record(adev))
+ if (adev->cper.enabled && !amdgpu_uniras_enabled(adev) &&
+ amdgpu_cper_generate_bp_threshold_record(adev))
dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
if ((amdgpu_bad_page_threshold != -1) &&
@@ -849,6 +873,71 @@ Out:
return res;
}
+int amdgpu_ras_eeprom_update_record_num(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ int ret, retry = 20;
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return 0;
+
+ control->ras_num_recs_old = control->ras_num_recs;
+
+ do {
+ /* 1000ms timeout is long enough, smu_get_badpage_count won't
+ * return -EBUSY before timeout.
+ */
+ ret = amdgpu_ras_smu_get_badpage_count(adev,
+ &(control->ras_num_recs), RAS_SMU_MESSAGE_TIMEOUT_MS);
+ if (!ret &&
+ (control->ras_num_recs_old == control->ras_num_recs)) {
+ /* record number update in PMFW needs some time,
+ * smu_get_badpage_count may return immediately without
+ * count update, sleep for a while and retry again.
+ */
+ msleep(50);
+ retry--;
+ } else {
+ break;
+ }
+ } while (retry);
+
+ /* no update of record number is not a real failure,
+ * don't print warning here
+ */
+ if (!ret && (control->ras_num_recs_old == control->ras_num_recs))
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int amdgpu_ras_smu_eeprom_append(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev) || !con)
+ return 0;
+
+ control->ras_num_bad_pages = con->bad_page_num;
+
+ if (amdgpu_bad_page_threshold != 0 &&
+ control->ras_num_bad_pages > con->bad_page_cnt_threshold) {
+ dev_warn(adev->dev,
+ "Saved bad pages %d reaches threshold value %d\n",
+ control->ras_num_bad_pages, con->bad_page_cnt_threshold);
+
+ if (adev->cper.enabled && amdgpu_cper_generate_bp_threshold_record(adev))
+ dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
+
+ if ((amdgpu_bad_page_threshold != -1) &&
+ (amdgpu_bad_page_threshold != -2))
+ con->is_rma = true;
+ }
+
+ return 0;
+}
+
/**
* amdgpu_ras_eeprom_append -- append records to the EEPROM RAS table
* @control: pointer to control structure
@@ -873,6 +962,9 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
if (!__is_ras_eeprom_supported(adev))
return 0;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_smu_eeprom_append(control);
+
if (num == 0) {
dev_err(adev->dev, "will not append 0 records\n");
return -EINVAL;
@@ -948,6 +1040,50 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
return res;
}
+int amdgpu_ras_eeprom_read_idx(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *record, u32 rec_idx,
+ const u32 num)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ uint64_t ts, end_idx;
+ int i, ret;
+ u64 mca, ipid;
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return 0;
+
+ if (!adev->umc.ras || !adev->umc.ras->mca_ipid_parse)
+ return -EOPNOTSUPP;
+
+ end_idx = rec_idx + num;
+ for (i = rec_idx; i < end_idx; i++) {
+ ret = amdgpu_ras_smu_get_badpage_mca_addr(adev, i, &mca);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_ras_smu_get_badpage_ipid(adev, i, &ipid);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_ras_smu_get_timestamp(adev, i, &ts);
+ if (ret)
+ return ret;
+
+ record[i - rec_idx].address = mca;
+ /* retired_page (pa) is unused now */
+ record[i - rec_idx].retired_page = 0x1ULL;
+ record[i - rec_idx].ts = ts;
+ record[i - rec_idx].err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+
+ adev->umc.ras->mca_ipid_parse(adev, ipid,
+ (uint32_t *)&(record[i - rec_idx].cu),
+ (uint32_t *)&(record[i - rec_idx].mem_channel),
+ (uint32_t *)&(record[i - rec_idx].mcumc_id), NULL);
+ }
+
+ return 0;
+}
+
/**
* amdgpu_ras_eeprom_read -- read EEPROM
* @control: pointer to control structure
@@ -969,6 +1105,9 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
u8 *buf, *pp;
u32 g0, g1;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_eeprom_read_idx(control, record, 0, num);
+
if (!__is_ras_eeprom_supported(adev))
return 0;
@@ -1140,6 +1279,10 @@ static ssize_t amdgpu_ras_debugfs_table_read(struct file *f, char __user *buf,
int res = -EFAULT;
size_t data_len;
+ /* pmfw manages eeprom data by itself */
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return 0;
+
mutex_lock(&control->ras_tbl_mutex);
/* We want *pos - data_len > 0, which means there's
@@ -1370,6 +1513,42 @@ Out:
return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res;
}
+static int amdgpu_ras_smu_eeprom_init(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ uint64_t local_time;
+ int res;
+
+ ras->is_rma = false;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+ mutex_init(&control->ras_tbl_mutex);
+
+ res = amdgpu_ras_smu_get_table_version(adev, &(hdr->version));
+ if (res)
+ return res;
+
+ res = amdgpu_ras_smu_get_badpage_count(adev,
+ &(control->ras_num_recs), 100);
+ if (res)
+ return res;
+
+ local_time = (uint64_t)ktime_get_real_seconds();
+ res = amdgpu_ras_smu_set_timestamp(adev, local_time);
+ if (res)
+ return res;
+
+ control->ras_max_record_count = 4000;
+
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
+
+ return 0;
+}
+
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
@@ -1378,6 +1557,9 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int res;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_smu_eeprom_init(control);
+
ras->is_rma = false;
if (!__is_ras_eeprom_supported(adev))
@@ -1444,6 +1626,47 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
return 0;
}
+static int amdgpu_ras_smu_eeprom_check(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+
+ control->ras_num_bad_pages = ras->bad_page_num;
+
+ if ((ras->bad_page_cnt_threshold < control->ras_num_bad_pages) &&
+ amdgpu_bad_page_threshold != 0) {
+ dev_warn(adev->dev,
+ "RAS records:%d exceed threshold:%d\n",
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
+ if ((amdgpu_bad_page_threshold == -1) ||
+ (amdgpu_bad_page_threshold == -2)) {
+ dev_warn(adev->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n");
+ } else {
+ ras->is_rma = true;
+ dev_warn(adev->dev,
+ "User defined threshold is set, runtime service will be halt when threshold is reached\n");
+ }
+
+ return 0;
+ }
+
+ dev_dbg(adev->dev,
+ "Found existing EEPROM table with %d records",
+ control->ras_num_bad_pages);
+
+ /* Warn if we are at 90% of the threshold or above
+ */
+ if (10 * control->ras_num_bad_pages >= 9 * ras->bad_page_cnt_threshold)
+ dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d",
+ control->ras_num_bad_pages,
+ ras->bad_page_cnt_threshold);
+ return 0;
+}
+
int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
@@ -1451,6 +1674,9 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int res = 0;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_smu_eeprom_check(control);
+
if (!__is_ras_eeprom_supported(adev))
return 0;
@@ -1541,7 +1767,8 @@ void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev)
struct amdgpu_ras_eeprom_control *control;
int res;
- if (!__is_ras_eeprom_supported(adev) || !ras)
+ if (!__is_ras_eeprom_supported(adev) || !ras ||
+ amdgpu_ras_smu_eeprom_supported(adev))
return;
control = &ras->eeprom_control;
if (!control->is_eeprom_valid)
@@ -1561,4 +1788,143 @@ void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev)
control->is_eeprom_valid = false;
}
return;
-} \ No newline at end of file
+}
+
+static const struct ras_smu_drv *amdgpu_ras_get_smu_ras_drv(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (!ras)
+ return NULL;
+
+ return ras->ras_smu_drv;
+}
+
+static uint64_t amdgpu_ras_smu_get_feature_flags(struct amdgpu_device *adev)
+{
+ const struct ras_smu_drv *ras_smu_drv = amdgpu_ras_get_smu_ras_drv(adev);
+ uint64_t flags = 0ULL;
+
+ if (!ras_smu_drv)
+ goto out;
+
+ if (ras_smu_drv->ras_smu_feature_flags)
+ ras_smu_drv->ras_smu_feature_flags(adev, &flags);
+
+out:
+ return flags;
+}
+
+bool amdgpu_ras_smu_eeprom_supported(struct amdgpu_device *adev)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+ uint64_t flags = 0ULL;
+
+ if (!__is_ras_eeprom_supported(adev) || !smu_ras_drv)
+ return false;
+
+ if (!smu_ras_drv->smu_eeprom_funcs)
+ return false;
+
+ flags = amdgpu_ras_smu_get_feature_flags(adev);
+
+ return !!(flags & RAS_SMU_FEATURE_BIT__RAS_EEPROM);
+}
+
+int amdgpu_ras_smu_get_table_version(struct amdgpu_device *adev,
+ uint32_t *table_version)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_ras_table_version)
+ return smu_ras_drv->smu_eeprom_funcs->get_ras_table_version(adev,
+ table_version);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_badpage_count(struct amdgpu_device *adev,
+ uint32_t *count, uint32_t timeout)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_badpage_count)
+ return smu_ras_drv->smu_eeprom_funcs->get_badpage_count(adev,
+ count, timeout);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_badpage_mca_addr(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *mca_addr)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_badpage_mca_addr)
+ return smu_ras_drv->smu_eeprom_funcs->get_badpage_mca_addr(adev,
+ index, mca_addr);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_set_timestamp(struct amdgpu_device *adev,
+ uint64_t timestamp)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->set_timestamp)
+ return smu_ras_drv->smu_eeprom_funcs->set_timestamp(adev,
+ timestamp);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_timestamp(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *timestamp)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_timestamp)
+ return smu_ras_drv->smu_eeprom_funcs->get_timestamp(adev,
+ index, timestamp);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_badpage_ipid(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *ipid)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_badpage_ipid)
+ return smu_ras_drv->smu_eeprom_funcs->get_badpage_ipid(adev,
+ index, ipid);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_erase_ras_table(struct amdgpu_device *adev,
+ uint32_t *result)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->erase_ras_table)
+ return smu_ras_drv->smu_eeprom_funcs->erase_ras_table(adev,
+ result);
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index ebfca4cb5688..2e5d63957e71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -82,6 +82,7 @@ struct amdgpu_ras_eeprom_control {
/* Number of records in the table.
*/
u32 ras_num_recs;
+ u32 ras_num_recs_old;
/* the bad page number is ras_num_recs or
* ras_num_recs * umc.retire_unit
@@ -163,6 +164,35 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control);
void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev);
+bool amdgpu_ras_smu_eeprom_supported(struct amdgpu_device *adev);
+
+int amdgpu_ras_smu_get_table_version(struct amdgpu_device *adev,
+ uint32_t *table_version);
+
+int amdgpu_ras_smu_get_badpage_count(struct amdgpu_device *adev,
+ uint32_t *count, uint32_t timeout);
+
+int amdgpu_ras_smu_get_badpage_mca_addr(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *mca_addr);
+
+int amdgpu_ras_smu_set_timestamp(struct amdgpu_device *adev,
+ uint64_t timestamp);
+
+int amdgpu_ras_smu_get_timestamp(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *timestamp);
+
+int amdgpu_ras_smu_get_badpage_ipid(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *ipid);
+
+int amdgpu_ras_smu_erase_ras_table(struct amdgpu_device *adev,
+ uint32_t *result);
+
+int amdgpu_ras_eeprom_read_idx(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *record, u32 rec_idx,
+ const u32 num);
+
+int amdgpu_ras_eeprom_update_record_num(struct amdgpu_ras_eeprom_control *control);
+
extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops;
extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 8f6ce948c684..c596b6df2e2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -33,6 +33,7 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
+#include "amdgpu_ras_mgr.h"
#include "atom.h"
/*
@@ -159,8 +160,16 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- while (ib->length_dw & ring->funcs->align_mask)
- ib->ptr[ib->length_dw++] = ring->funcs->nop;
+ u32 align_mask = ring->funcs->align_mask;
+ u32 count = ib->length_dw & align_mask;
+
+ if (count) {
+ count = align_mask + 1 - count;
+
+ memset32(&ib->ptr[ib->length_dw], ring->funcs->nop, count);
+
+ ib->length_dw += count;
+ }
}
/**
@@ -460,9 +469,6 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
ktime_t deadline;
bool ret;
- if (unlikely(ring->adev->debug_disable_soft_recovery))
- return false;
-
deadline = ktime_add_us(ktime_get(), 10000);
if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
@@ -490,6 +496,66 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
*/
#if defined(CONFIG_DEBUG_FS)
+static ssize_t amdgpu_ras_cper_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *offset)
+{
+ const uint8_t ring_header_size = 12;
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
+ struct ras_cmd_cper_snapshot_req *snapshot_req __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_snapshot_req), GFP_KERNEL);
+ struct ras_cmd_cper_snapshot_rsp *snapshot_rsp __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_snapshot_rsp), GFP_KERNEL);
+ struct ras_cmd_cper_record_req *record_req __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_record_req), GFP_KERNEL);
+ struct ras_cmd_cper_record_rsp *record_rsp __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_record_rsp), GFP_KERNEL);
+ uint8_t *ring_header __free(kfree) =
+ kzalloc(ring_header_size, GFP_KERNEL);
+ uint32_t total_cper_num;
+ uint64_t start_cper_id;
+ int r;
+
+ if (!snapshot_req || !snapshot_rsp || !record_req || !record_rsp ||
+ !ring_header)
+ return -ENOMEM;
+
+ if (!(*offset)) {
+ /* Need at least 12 bytes for the header on the first read */
+ if (size < ring_header_size)
+ return -EINVAL;
+
+ if (copy_to_user(buf, ring_header, ring_header_size))
+ return -EFAULT;
+ buf += ring_header_size;
+ size -= ring_header_size;
+ }
+
+ r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev,
+ RAS_CMD__GET_CPER_SNAPSHOT,
+ snapshot_req, sizeof(struct ras_cmd_cper_snapshot_req),
+ snapshot_rsp, sizeof(struct ras_cmd_cper_snapshot_rsp));
+ if (r || !snapshot_rsp->total_cper_num)
+ return r;
+
+ start_cper_id = snapshot_rsp->start_cper_id;
+ total_cper_num = snapshot_rsp->total_cper_num;
+
+ record_req->buf_ptr = (uint64_t)(uintptr_t)buf;
+ record_req->buf_size = size;
+ record_req->cper_start_id = start_cper_id + *offset;
+ record_req->cper_num = total_cper_num;
+ r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev, RAS_CMD__GET_CPER_RECORD,
+ record_req, sizeof(struct ras_cmd_cper_record_req),
+ record_rsp, sizeof(struct ras_cmd_cper_record_rsp));
+ if (r)
+ return r;
+
+ r = *offset ? record_rsp->real_data_size : record_rsp->real_data_size + ring_header_size;
+ (*offset) += record_rsp->real_cper_num;
+
+ return r;
+}
+
/* Layout of file is 12 bytes consisting of
* - rptr
* - wptr
@@ -506,6 +572,9 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
loff_t i;
int r;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER && amdgpu_uniras_enabled(ring->adev))
+ return amdgpu_ras_cper_debugfs_read(f, buf, size, pos);
+
if (*pos & 3 || size & 3)
return -EINVAL;
@@ -811,7 +880,7 @@ int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
if (r)
return r;
- /* signal the fence of the bad job */
+ /* signal the guilty fence and set an error on all fences from the context */
if (guilty_fence)
amdgpu_fence_driver_guilty_force_completion(guilty_fence);
/* Re-emit the non-guilty commands */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index b6b649179776..7a27c6c4bb44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -83,6 +83,7 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_MES,
AMDGPU_RING_TYPE_UMSCH_MM,
AMDGPU_RING_TYPE_CPER,
+ AMDGPU_RING_TYPE_MAX,
};
enum amdgpu_ib_pool_type {
@@ -147,16 +148,14 @@ struct amdgpu_fence {
u64 wptr;
/* fence context for resets */
u64 context;
- uint32_t seq;
};
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
-void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
-void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence);
-void amdgpu_fence_save_wptr(struct dma_fence *fence);
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af);
+void amdgpu_fence_save_wptr(struct amdgpu_fence *af);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
@@ -166,8 +165,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
- struct amdgpu_fence *af, unsigned int flags);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
+ unsigned int flags);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
uint32_t timeout);
bool amdgpu_fence_process(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
index 41ebe690eeff..3739be1b71e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -159,7 +159,6 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
dev_err(adev->dev, "Invalid input: %s\n", str);
}
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index aa9ee5dffa45..2b931e855abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -188,7 +188,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
struct amdgpu_job *job;
void *cpu_addr;
uint64_t flags;
- unsigned int i;
int r;
BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
@@ -255,16 +254,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
} else {
- dma_addr_t dma_address;
-
- dma_address = mm_cur->start;
- dma_address += adev->vm_manager.vram_base_offset;
+ u64 pa = mm_cur->start + adev->vm_manager.vram_base_offset;
- for (i = 0; i < num_pages; ++i) {
- amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
- flags, cpu_addr);
- dma_address += PAGE_SIZE;
- }
+ amdgpu_gart_map_vram_range(adev, pa, 0, num_pages, flags, cpu_addr);
}
dma_fence_put(amdgpu_job_submit(job));
@@ -286,12 +278,13 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
* move and different for a BO to BO copy.
*
*/
-int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- const struct amdgpu_copy_mem *src,
- const struct amdgpu_copy_mem *dst,
- uint64_t size, bool tmz,
- struct dma_resv *resv,
- struct dma_fence **f)
+__attribute__((nonnull))
+static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ const struct amdgpu_copy_mem *src,
+ const struct amdgpu_copy_mem *dst,
+ uint64_t size, bool tmz,
+ struct dma_resv *resv,
+ struct dma_fence **f)
{
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor src_mm, dst_mm;
@@ -365,9 +358,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
}
error:
mutex_unlock(&adev->mman.gtt_window_lock);
- if (f)
- *f = dma_fence_get(fence);
- dma_fence_put(fence);
+ *f = fence;
return r;
}
@@ -706,10 +697,11 @@ struct amdgpu_ttm_tt {
* memory and start HMM tracking CPU page table update
*
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
- * once afterwards to stop HMM tracking
+ * once afterwards to stop HMM tracking. Its the caller responsibility to ensure
+ * that range is a valid memory and it is freed too.
*/
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct hmm_range **range)
+ struct amdgpu_hmm_range *range)
{
struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
@@ -719,9 +711,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
bool readonly;
int r = 0;
- /* Make sure get_user_pages_done() can cleanup gracefully */
- *range = NULL;
-
mm = bo->notifier.mm;
if (unlikely(!mm)) {
DRM_DEBUG_DRIVER("BO is not registered?\n");
@@ -756,38 +745,6 @@ out_unlock:
return r;
}
-/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
- */
-void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
-
- if (gtt && gtt->userptr && range)
- amdgpu_hmm_range_get_pages_done(range);
-}
-
-/*
- * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
- * Check if the pages backing this ttm range have been invalidated
- *
- * Returns: true if pages are still valid
- */
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
-
- if (!gtt || !gtt->userptr || !range)
- return false;
-
- DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
- gtt->userptr, ttm->num_pages);
-
- WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
-
- return !amdgpu_hmm_range_get_pages_done(range);
-}
#endif
/*
@@ -797,12 +754,12 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
* that backs user memory and will ultimately be mapped into the device
* address space.
*/
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range)
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range)
{
unsigned long i;
for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_pfns[i]) : NULL;
+ ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL;
}
/*
@@ -1372,7 +1329,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
flags |= AMDGPU_PTE_SYSTEM;
- if (ttm->caching == ttm_cached)
+ if (ttm && ttm->caching == ttm_cached)
flags |= AMDGPU_PTE_SNOOPED;
}
@@ -1529,6 +1486,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
if (r)
goto out;
+ mutex_lock(&adev->mman.gtt_window_lock);
amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
src_mm.start;
@@ -1543,6 +1501,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
WARN_ON(job->ibs[0].length_dw > num_dw);
fence = amdgpu_job_submit(job);
+ mutex_unlock(&adev->mman.gtt_window_lock);
if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
r = -ETIMEDOUT;
@@ -1804,18 +1763,14 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
}
- if (!adev->gmc.is_app_apu) {
- ret = amdgpu_bo_create_kernel_at(
- adev, adev->gmc.real_vram_size - reserve_size,
- reserve_size, &adev->mman.fw_reserved_memory, NULL);
- if (ret) {
- dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
- amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
- NULL, NULL);
- return ret;
- }
- } else {
- DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n");
+ ret = amdgpu_bo_create_kernel_at(
+ adev, adev->gmc.real_vram_size - reserve_size, reserve_size,
+ &adev->mman.fw_reserved_memory, NULL);
+ if (ret) {
+ dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
+ NULL);
+ return ret;
}
return 0;
@@ -1837,7 +1792,7 @@ static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev,
adev->gmc.mem_partitions[i].numa.node,
- false, false);
+ TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
}
return 0;
}
@@ -1930,8 +1885,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager,
- adev->need_swiotlb,
- dma_addressing_limited(adev->dev));
+ (adev->need_swiotlb ?
+ TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
+ (dma_addressing_limited(adev->dev) ?
+ TTM_ALLOCATION_POOL_USE_DMA32 : 0) |
+ TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
if (r) {
dev_err(adev->dev,
"failed initializing buffer object driver(%d).\n", r);
@@ -1980,19 +1938,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
/*
- *The reserved vram for driver must be pinned to the specified
- *place on the VRAM, so reserve it early.
+ * The reserved VRAM for the driver must be pinned to a specific
+ * location in VRAM, so reserve it early.
*/
r = amdgpu_ttm_drv_reserve_vram_init(adev);
if (r)
return r;
/*
- * only NAVI10 and onwards ASIC support for IP discovery.
- * If IP discovery enabled, a block of memory should be
- * reserved for IP discovey.
+ * only NAVI10 and later ASICs support IP discovery.
+ * If IP discovery is enabled, a block of memory should be
+ * reserved for it.
*/
- if (adev->mman.discovery_bin) {
+ if (adev->discovery.reserve_tmr) {
r = amdgpu_ttm_reserve_tmr(adev);
if (r)
return r;
@@ -2229,8 +2187,10 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
} else {
drm_sched_entity_destroy(&adev->mman.high_pr);
drm_sched_entity_destroy(&adev->mman.low_pr);
- dma_fence_put(man->move);
- man->move = NULL;
+ /* Drop all the old fences since re-creating the scheduler entities
+ * will allocate new contexts.
+ */
+ ttm_resource_manager_cleanup(man);
}
/* this just adjusts TTM size idea, which sets lpfn to the correct value */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 0be2728aa872..577ee04ce0bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -28,6 +28,7 @@
#include <drm/gpu_scheduler.h>
#include <drm/ttm/ttm_placement.h>
#include "amdgpu_vram_mgr.h"
+#include "amdgpu_hmm.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
@@ -82,9 +83,6 @@ struct amdgpu_mman {
uint64_t stolen_reserved_offset;
uint64_t stolen_reserved_size;
- /* discovery */
- uint8_t *discovery_bin;
- uint32_t discovery_tmr_size;
/* fw reserved memory */
struct amdgpu_bo *fw_reserved_memory;
struct amdgpu_bo *fw_reserved_memory_extend;
@@ -170,12 +168,6 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush, uint32_t copy_flags);
-int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- const struct amdgpu_copy_mem *src,
- const struct amdgpu_copy_mem *dst,
- uint64_t size, bool tmz,
- struct dma_resv *resv,
- struct dma_fence **f);
int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
struct dma_resv *resv,
struct dma_fence **fence);
@@ -192,29 +184,16 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct hmm_range **range);
-void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range);
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range);
+ struct amdgpu_hmm_range *range);
#else
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct hmm_range **range)
+ struct amdgpu_hmm_range *range)
{
return -EPERM;
}
-static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
-}
-static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- return false;
-}
#endif
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range);
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range);
int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
uint64_t *user_addr);
int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 2e039fb778ea..3f0b0e9af4f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -24,6 +24,7 @@
#include <linux/sort.h>
#include "amdgpu.h"
#include "umc_v6_7.h"
+#include "amdgpu_ras_mgr.h"
#define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms
#define MAX_UMC_HASH_STRING_SIZE 256
@@ -96,67 +97,96 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control = &con->eeprom_control;
unsigned int error_query_mode;
int ret = 0;
unsigned long err_count;
amdgpu_ras_get_error_query_mode(adev, &error_query_mode);
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if (!err_data->err_addr)
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error address record!\n");
+ else
+ err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
+
mutex_lock(&con->page_retirement_lock);
- ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
- if (ret == -EOPNOTSUPP &&
- error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
- if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
- adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
- adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
-
- if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
- adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
- adev->umc.max_ras_err_cnt_per_query) {
- err_data->err_addr =
- kcalloc(adev->umc.max_ras_err_cnt_per_query,
- sizeof(struct eeprom_table_record), GFP_KERNEL);
-
- /* still call query_ras_error_address to clear error status
- * even NOMEM error is encountered
- */
- if(!err_data->err_addr)
- dev_warn(adev->dev, "Failed to alloc memory for "
- "umc error address record!\n");
- else
- err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
-
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
+ if (ret == -EOPNOTSUPP &&
+ error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev,
+ ras_error_status);
+
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if (!err_data->err_addr)
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error address record!\n");
+ else
+ err_data->err_addr_len =
+ adev->umc.max_ras_err_cnt_per_query;
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev,
+ ras_error_status);
+ }
+ } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
+ (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_count)
+ adev->umc.ras->ecc_info_query_ras_error_count(adev,
+ ras_error_status);
+
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if (!err_data->err_addr)
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error address record!\n");
+ else
+ err_data->err_addr_len =
+ adev->umc.max_ras_err_cnt_per_query;
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.ras->ecc_info_query_ras_error_address(adev,
+ ras_error_status);
+ }
}
- } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
- (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
- if (adev->umc.ras &&
- adev->umc.ras->ecc_info_query_ras_error_count)
- adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
-
- if (adev->umc.ras &&
- adev->umc.ras->ecc_info_query_ras_error_address &&
- adev->umc.max_ras_err_cnt_per_query) {
- err_data->err_addr =
- kcalloc(adev->umc.max_ras_err_cnt_per_query,
- sizeof(struct eeprom_table_record), GFP_KERNEL);
-
- /* still call query_ras_error_address to clear error status
- * even NOMEM error is encountered
- */
- if(!err_data->err_addr)
- dev_warn(adev->dev, "Failed to alloc memory for "
- "umc error address record!\n");
- else
- err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
-
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status);
+ } else {
+ if (!amdgpu_ras_eeprom_update_record_num(control)) {
+ err_data->err_addr_cnt = err_data->de_count =
+ control->ras_num_recs - control->ras_num_recs_old;
+ amdgpu_ras_eeprom_read_idx(control, err_data->err_addr,
+ control->ras_num_recs_old, err_data->de_count);
}
}
@@ -166,7 +196,7 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
if ((amdgpu_bad_page_threshold != 0) &&
err_data->err_addr_cnt) {
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
- err_data->err_addr_cnt, false);
+ err_data->err_addr_cnt, amdgpu_ras_smu_eeprom_supported(adev));
amdgpu_ras_save_bad_pages(adev, &err_count);
amdgpu_dpm_send_hbm_bad_pages_num(adev,
@@ -244,6 +274,15 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
}
amdgpu_ras_error_data_fini(&err_data);
+ } else if (amdgpu_uniras_enabled(adev)) {
+ struct ras_ih_info ih_info = {0};
+
+ ih_info.block = block;
+ ih_info.pasid = pasid;
+ ih_info.reset = reset;
+ ih_info.pasid_fn = pasid_fn;
+ ih_info.data = data;
+ amdgpu_ras_mgr_handle_consumer_interrupt(adev, &ih_info);
} else {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index ec203f9e5ffa..28dff750c47e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -113,6 +113,8 @@ struct amdgpu_umc_ras {
uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev,
uint64_t mca_addr, uint64_t retired_page);
void (*get_retire_flip_bits)(struct amdgpu_device *adev);
+ void (*mca_ipid_parse)(struct amdgpu_device *adev, uint64_t ipid,
+ uint32_t *did, uint32_t *ch, uint32_t *umc_inst, uint32_t *sid);
};
struct amdgpu_umc_funcs {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 1add21160d21..9a969175900e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -25,10 +25,13 @@
#include <drm/drm_auth.h>
#include <drm/drm_exec.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
+#include "amdgpu_reset.h"
#include "amdgpu_vm.h"
#include "amdgpu_userq.h"
+#include "amdgpu_hmm.h"
#include "amdgpu_userq_fence.h"
u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
@@ -44,10 +47,130 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
return userq_ip_mask;
}
-int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
- u64 expected_size)
+static bool amdgpu_userq_is_reset_type_supported(struct amdgpu_device *adev,
+ enum amdgpu_ring_type ring_type, int reset_type)
+{
+
+ if (ring_type < 0 || ring_type >= AMDGPU_RING_TYPE_MAX)
+ return false;
+
+ switch (ring_type) {
+ case AMDGPU_RING_TYPE_GFX:
+ if (adev->gfx.gfx_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ if (adev->gfx.compute_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ if (adev->sdma.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_DEC:
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ if (adev->vcn.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ if (adev->jpeg.supported_reset & reset_type)
+ return true;
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+static void amdgpu_userq_gpu_reset(struct amdgpu_device *adev)
+{
+ if (amdgpu_device_should_recover_gpu(adev)) {
+ amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->userq_reset_work);
+ /* Wait for the reset job to complete */
+ flush_work(&adev->userq_reset_work);
+ }
+}
+
+static int
+amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const int queue_types[] = {
+ AMDGPU_RING_TYPE_COMPUTE,
+ AMDGPU_RING_TYPE_GFX,
+ AMDGPU_RING_TYPE_SDMA
+ };
+ const int num_queue_types = ARRAY_SIZE(queue_types);
+ bool gpu_reset = false;
+ int r = 0;
+ int i;
+
+ /* Warning if current process mutex is not held */
+ WARN_ON(!mutex_is_locked(&uq_mgr->userq_mutex));
+
+ if (unlikely(adev->debug_disable_gpu_ring_reset)) {
+ dev_err(adev->dev, "userq reset disabled by debug mask\n");
+ return 0;
+ }
+
+ /*
+ * If GPU recovery feature is disabled system-wide,
+ * skip all reset detection logic
+ */
+ if (!amdgpu_gpu_recovery)
+ return 0;
+
+ /*
+ * Iterate through all queue types to detect and reset problematic queues
+ * Process each queue type in the defined order
+ */
+ for (i = 0; i < num_queue_types; i++) {
+ int ring_type = queue_types[i];
+ const struct amdgpu_userq_funcs *funcs = adev->userq_funcs[ring_type];
+
+ if (!amdgpu_userq_is_reset_type_supported(adev, ring_type, AMDGPU_RESET_TYPE_PER_QUEUE))
+ continue;
+
+ if (atomic_read(&uq_mgr->userq_count[ring_type]) > 0 &&
+ funcs && funcs->detect_and_reset) {
+ r = funcs->detect_and_reset(adev, ring_type);
+ if (r) {
+ gpu_reset = true;
+ break;
+ }
+ }
+ }
+
+ if (gpu_reset)
+ amdgpu_userq_gpu_reset(adev);
+
+ return r;
+}
+
+static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue,
+ struct amdgpu_bo_va_mapping *va_map, u64 addr)
+{
+ struct amdgpu_userq_va_cursor *va_cursor;
+ struct userq_va_list;
+
+ va_cursor = kzalloc(sizeof(*va_cursor), GFP_KERNEL);
+ if (!va_cursor)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&va_cursor->list);
+ va_cursor->gpu_addr = addr;
+ atomic_set(&va_map->bo_va->userq_va_mapped, 1);
+ list_add(&va_cursor->list, &queue->userq_va_list);
+
+ return 0;
+}
+
+int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
+ u64 addr, u64 expected_size)
{
struct amdgpu_bo_va_mapping *va_map;
+ struct amdgpu_vm *vm = queue->vm;
u64 user_addr;
u64 size;
int r = 0;
@@ -67,6 +190,7 @@ int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
/* Only validate the userq whether resident in the VM mapping range */
if (user_addr >= va_map->start &&
va_map->last - user_addr + 1 >= size) {
+ amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr);
amdgpu_bo_unreserve(vm->root.bo);
return 0;
}
@@ -77,6 +201,76 @@ out_err:
return r;
}
+static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ bool r;
+
+ if (amdgpu_bo_reserve(vm->root.bo, false))
+ return false;
+
+ mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
+ if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped))
+ r = true;
+ else
+ r = false;
+ amdgpu_bo_unreserve(vm->root.bo);
+
+ return r;
+}
+
+static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_userq_va_cursor *va_cursor, *tmp;
+ int r = 0;
+
+ list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
+ r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr);
+ dev_dbg(queue->userq_mgr->adev->dev,
+ "validate the userq mapping:%p va:%llx r:%d\n",
+ queue, va_cursor->gpu_addr, r);
+ }
+
+ if (r != 0)
+ return true;
+
+ return false;
+}
+
+static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_userq_va_cursor *va_cursor)
+{
+ atomic_set(&mapping->bo_va->userq_va_mapped, 0);
+ list_del(&va_cursor->list);
+ kfree(va_cursor);
+}
+
+static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_userq_va_cursor *va_cursor, *tmp;
+ struct amdgpu_bo_va_mapping *mapping;
+ int r;
+
+ r = amdgpu_bo_reserve(queue->vm->root.bo, false);
+ if (r)
+ return r;
+
+ list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
+ mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr);
+ if (!mapping) {
+ r = -EINVAL;
+ goto err;
+ }
+ dev_dbg(adev->dev, "delete the userq:%p va:%llx\n",
+ queue, va_cursor->gpu_addr);
+ amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
+ }
+err:
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ return r;
+}
+
static int
amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue)
@@ -84,17 +278,22 @@ amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_device *adev = uq_mgr->adev;
const struct amdgpu_userq_funcs *userq_funcs =
adev->userq_funcs[queue->queue_type];
+ bool found_hung_queue = false;
int r = 0;
if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
r = userq_funcs->preempt(uq_mgr, queue);
if (r) {
queue->state = AMDGPU_USERQ_STATE_HUNG;
+ found_hung_queue = true;
} else {
queue->state = AMDGPU_USERQ_STATE_PREEMPTED;
}
}
+ if (found_hung_queue)
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
+
return r;
}
@@ -126,16 +325,23 @@ amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_device *adev = uq_mgr->adev;
const struct amdgpu_userq_funcs *userq_funcs =
adev->userq_funcs[queue->queue_type];
+ bool found_hung_queue = false;
int r = 0;
if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
(queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
r = userq_funcs->unmap(uq_mgr, queue);
- if (r)
+ if (r) {
queue->state = AMDGPU_USERQ_STATE_HUNG;
- else
+ found_hung_queue = true;
+ } else {
queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
+ }
}
+
+ if (found_hung_queue)
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
+
return r;
}
@@ -152,26 +358,33 @@ amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
r = userq_funcs->map(uq_mgr, queue);
if (r) {
queue->state = AMDGPU_USERQ_STATE_HUNG;
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
} else {
queue->state = AMDGPU_USERQ_STATE_MAPPED;
}
}
+
return r;
}
-static void
+static int
amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue)
{
struct dma_fence *f = queue->last_fence;
- int ret;
+ int ret = 0;
if (f && !dma_fence_is_signaled(f)) {
- ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
- if (ret <= 0)
+ ret = dma_fence_wait_timeout(f, true, MAX_SCHEDULE_TIMEOUT);
+ if (ret <= 0) {
drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
f->context, f->seqno);
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ return -ETIME;
+ }
}
+
+ return ret;
}
static void
@@ -182,16 +395,27 @@ amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_device *adev = uq_mgr->adev;
const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+
+ /* Drop the userq reference. */
+ amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
amdgpu_userq_fence_driver_free(queue);
- idr_remove(&uq_mgr->userq_idr, queue_id);
+ /* Use interrupt-safe locking since IRQ handlers may access these XArrays */
+ xa_erase_irq(&uq_mgr->userq_mgr_xa, (unsigned long)queue_id);
+ xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
+ queue->userq_mgr = NULL;
+ list_del(&queue->userq_va_list);
kfree(queue);
+
+ up_read(&adev->reset_domain->sem);
}
static struct amdgpu_usermode_queue *
amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
{
- return idr_find(&uq_mgr->userq_idr, qid);
+ return xa_load(&uq_mgr->userq_mgr_xa, qid);
}
void
@@ -319,17 +543,6 @@ amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
case AMDGPU_HW_IP_DMA:
db_size = sizeof(u64);
break;
-
- case AMDGPU_HW_IP_VCN_ENC:
- db_size = sizeof(u32);
- db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
- break;
-
- case AMDGPU_HW_IP_VPE:
- db_size = sizeof(u32);
- db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
- break;
-
default:
drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
db_info->queue_type);
@@ -378,10 +591,11 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
amdgpu_bo_unreserve(queue->db_obj.obj);
}
amdgpu_bo_unref(&queue->db_obj.obj);
-
+ atomic_dec(&uq_mgr->userq_count[queue->queue_type]);
#if defined(CONFIG_DEBUG_FS)
debugfs_remove_recursive(queue->debugfs_queue);
#endif
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
r = amdgpu_userq_unmap_helper(uq_mgr, queue);
/*TODO: It requires a reset for userq hw unmap error*/
if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) {
@@ -391,7 +605,6 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
mutex_unlock(&uq_mgr->userq_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -463,8 +676,9 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
struct amdgpu_db_info db_info;
char *queue_name;
bool skip_map_queue;
+ u32 qid;
uint64_t index;
- int qid, r = 0;
+ int r = 0;
int priority =
(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
@@ -487,7 +701,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
*
* This will also make sure we have a valid eviction fence ready to be used.
*/
- mutex_lock(&adev->userq_mutex);
amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
uq_funcs = adev->userq_funcs[args->in.ip_type];
@@ -505,14 +718,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
goto unlock;
}
- /* Validate the userq virtual address.*/
- if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) ||
- amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
- amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
- r = -EINVAL;
- kfree(queue);
- goto unlock;
- }
+ INIT_LIST_HEAD(&queue->userq_va_list);
queue->doorbell_handle = args->in.doorbell_handle;
queue->queue_type = args->in.ip_type;
queue->vm = &fpriv->vm;
@@ -523,6 +729,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
db_info.db_obj = &queue->db_obj;
db_info.doorbell_offset = args->in.doorbell_offset;
+ /* Validate the userq virtual address.*/
+ if (amdgpu_userq_input_va_validate(queue, args->in.queue_va, args->in.queue_size) ||
+ amdgpu_userq_input_va_validate(queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
+ amdgpu_userq_input_va_validate(queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
+ r = -EINVAL;
+ kfree(queue);
+ goto unlock;
+ }
+
/* Convert relative doorbell offset into absolute doorbell index */
index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
if (index == (uint64_t)-EINVAL) {
@@ -548,16 +763,27 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
goto unlock;
}
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+ r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
+ if (r) {
+ kfree(queue);
+ up_read(&adev->reset_domain->sem);
+ goto unlock;
+ }
- qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
- if (qid < 0) {
+ r = xa_alloc(&uq_mgr->userq_mgr_xa, &qid, queue, XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
+ if (r) {
drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
amdgpu_userq_fence_driver_free(queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
kfree(queue);
r = -ENOMEM;
+ up_read(&adev->reset_domain->sem);
goto unlock;
}
+ up_read(&adev->reset_domain->sem);
+ queue->userq_mgr = uq_mgr;
/* don't map the queue if scheduling is halted */
if (adev->userq_halt_for_enforce_isolation &&
@@ -570,7 +796,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = amdgpu_userq_map_helper(uq_mgr, queue);
if (r) {
drm_file_err(uq_mgr->file, "Failed to map Queue\n");
- idr_remove(&uq_mgr->userq_idr, qid);
+ xa_erase(&uq_mgr->userq_mgr_xa, qid);
amdgpu_userq_fence_driver_free(queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
kfree(queue);
@@ -592,10 +818,10 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
kfree(queue_name);
args->out.queue_id = qid;
+ atomic_inc(&uq_mgr->userq_count[queue->queue_type]);
unlock:
mutex_unlock(&uq_mgr->userq_mutex);
- mutex_unlock(&adev->userq_mutex);
return r;
}
@@ -693,11 +919,19 @@ static int
amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
- int queue_id;
+ unsigned long queue_id;
int ret = 0, r;
/* Resume all the queues for this process */
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
+
+ if (!amdgpu_userq_buffer_vas_mapped(queue)) {
+ drm_file_err(uq_mgr->file,
+ "trying restore queue without va mapping\n");
+ queue->state = AMDGPU_USERQ_STATE_INVALID_VA;
+ continue;
+ }
+
r = amdgpu_userq_restore_helper(uq_mgr, queue);
if (r)
ret = r;
@@ -760,12 +994,21 @@ static int
amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ bool invalidated = false, new_addition = false;
+ struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_hmm_range *range;
struct amdgpu_vm *vm = &fpriv->vm;
+ unsigned long key, tmp_key;
struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
struct drm_exec exec;
+ struct xarray xa;
int ret;
+ xa_init(&xa);
+
+retry_lock:
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
ret = amdgpu_vm_lock_pd(vm, &exec, 1);
@@ -792,10 +1035,74 @@ amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
goto unlock_all;
}
+ if (invalidated) {
+ xa_for_each(&xa, tmp_key, range) {
+ bo = range->bo;
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ goto unlock_all;
+
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ goto unlock_all;
+ }
+ invalidated = false;
+ }
+
ret = amdgpu_vm_handle_moved(adev, vm, NULL);
if (ret)
goto unlock_all;
+ key = 0;
+ /* Validate User Ptr BOs */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status) {
+ bo = bo_va->base.bo;
+ if (!bo)
+ continue;
+
+ if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm))
+ continue;
+
+ range = xa_load(&xa, key);
+ if (range && range->bo != bo) {
+ xa_erase(&xa, key);
+ amdgpu_hmm_range_free(range);
+ range = NULL;
+ }
+
+ if (!range) {
+ range = amdgpu_hmm_range_alloc(bo);
+ if (!range) {
+ ret = -ENOMEM;
+ goto unlock_all;
+ }
+
+ xa_store(&xa, key, range, GFP_KERNEL);
+ new_addition = true;
+ }
+ key++;
+ }
+
+ if (new_addition) {
+ drm_exec_fini(&exec);
+ xa_for_each(&xa, tmp_key, range) {
+ if (!range)
+ continue;
+ bo = range->bo;
+ ret = amdgpu_ttm_tt_get_user_pages(bo, range);
+ if (ret)
+ goto unlock_all;
+ }
+
+ invalidated = true;
+ new_addition = false;
+ goto retry_lock;
+ }
+
ret = amdgpu_vm_update_pdes(adev, vm, false);
if (ret)
goto unlock_all;
@@ -815,6 +1122,13 @@ amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
unlock_all:
drm_exec_fini(&exec);
+ xa_for_each(&xa, tmp_key, range) {
+ if (!range)
+ continue;
+ bo = range->bo;
+ amdgpu_hmm_range_free(range);
+ }
+ xa_destroy(&xa);
return ret;
}
@@ -848,11 +1162,12 @@ static int
amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
- int queue_id;
+ unsigned long queue_id;
int ret = 0, r;
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
/* Try to unmap all the queues in this process ctx */
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
r = amdgpu_userq_preempt_helper(uq_mgr, queue);
if (r)
ret = r;
@@ -863,13 +1178,31 @@ amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
return ret;
}
+void amdgpu_userq_reset_work(struct work_struct *work)
+{
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+ userq_reset_work);
+ struct amdgpu_reset_context reset_context;
+
+ memset(&reset_context, 0, sizeof(reset_context));
+
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ reset_context.src = AMDGPU_RESET_SRC_USERQ;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ /*set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);*/
+
+ amdgpu_device_gpu_recover(adev, NULL, &reset_context);
+}
+
static int
amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
- int queue_id, ret;
+ unsigned long queue_id;
+ int ret;
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
struct dma_fence *f = queue->last_fence;
if (!f || dma_fence_is_signaled(f))
@@ -889,22 +1222,19 @@ void
amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_eviction_fence *ev_fence)
{
- int ret;
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ int ret;
/* Wait for any pending userqueue fence work to finish */
ret = amdgpu_userq_wait_for_signal(uq_mgr);
- if (ret) {
- drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n");
- return;
- }
+ if (ret)
+ dev_err(adev->dev, "Not evicting userqueue, timeout waiting for work\n");
ret = amdgpu_userq_evict_all(uq_mgr);
- if (ret) {
- drm_file_err(uq_mgr->file, "Failed to evict userqueue\n");
- return;
- }
+ if (ret)
+ dev_err(adev->dev, "Failed to evict userqueue\n");
/* Signal current eviction fence */
amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
@@ -922,44 +1252,31 @@ int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *f
struct amdgpu_device *adev)
{
mutex_init(&userq_mgr->userq_mutex);
- idr_init_base(&userq_mgr->userq_idr, 1);
+ xa_init_flags(&userq_mgr->userq_mgr_xa, XA_FLAGS_ALLOC);
userq_mgr->adev = adev;
userq_mgr->file = file_priv;
- mutex_lock(&adev->userq_mutex);
- list_add(&userq_mgr->list, &adev->userq_mgr_list);
- mutex_unlock(&adev->userq_mutex);
-
INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
return 0;
}
void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
{
- struct amdgpu_device *adev = userq_mgr->adev;
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- uint32_t queue_id;
+ unsigned long queue_id;
cancel_delayed_work_sync(&userq_mgr->resume_work);
- mutex_lock(&adev->userq_mutex);
mutex_lock(&userq_mgr->userq_mutex);
- idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
+ amdgpu_userq_detect_and_reset_queues(userq_mgr);
+ xa_for_each(&userq_mgr->userq_mgr_xa, queue_id, queue) {
amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
amdgpu_userq_unmap_helper(userq_mgr, queue);
amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
}
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- if (uqm == userq_mgr) {
- list_del(&uqm->list);
- break;
- }
- }
- idr_destroy(&userq_mgr->userq_idr);
+ xa_destroy(&userq_mgr->userq_mgr_xa);
mutex_unlock(&userq_mgr->userq_mutex);
- mutex_unlock(&adev->userq_mutex);
mutex_destroy(&userq_mgr->userq_mutex);
}
@@ -967,57 +1284,51 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev)
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
- int ret = 0, r;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+ int r;
if (!ip_mask)
return 0;
- mutex_lock(&adev->userq_mutex);
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
cancel_delayed_work_sync(&uqm->resume_work);
- mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- if (adev->in_s0ix)
- r = amdgpu_userq_preempt_helper(uqm, queue);
- else
- r = amdgpu_userq_unmap_helper(uqm, queue);
- if (r)
- ret = r;
- }
- mutex_unlock(&uqm->userq_mutex);
+ guard(mutex)(&uqm->userq_mutex);
+ amdgpu_userq_detect_and_reset_queues(uqm);
+ if (adev->in_s0ix)
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ else
+ r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (r)
+ return r;
}
- mutex_unlock(&adev->userq_mutex);
- return ret;
+ return 0;
}
int amdgpu_userq_resume(struct amdgpu_device *adev)
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
- int ret = 0, r;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+ int r;
if (!ip_mask)
return 0;
- mutex_lock(&adev->userq_mutex);
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- if (adev->in_s0ix)
- r = amdgpu_userq_restore_helper(uqm, queue);
- else
- r = amdgpu_userq_map_helper(uqm, queue);
- if (r)
- ret = r;
- }
- mutex_unlock(&uqm->userq_mutex);
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ guard(mutex)(&uqm->userq_mutex);
+ if (adev->in_s0ix)
+ r = amdgpu_userq_restore_helper(uqm, queue);
+ else
+ r = amdgpu_userq_map_helper(uqm, queue);
+ if (r)
+ return r;
}
- mutex_unlock(&adev->userq_mutex);
- return ret;
+
+ return 0;
}
int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
@@ -1025,33 +1336,32 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
int ret = 0, r;
/* only need to stop gfx/compute */
if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
return 0;
- mutex_lock(&adev->userq_mutex);
if (adev->userq_halt_for_enforce_isolation)
dev_warn(adev->dev, "userq scheduling already stopped!\n");
adev->userq_halt_for_enforce_isolation = true;
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
cancel_delayed_work_sync(&uqm->resume_work);
mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
- (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
- (queue->xcp_id == idx)) {
- r = amdgpu_userq_preempt_helper(uqm, queue);
- if (r)
- ret = r;
- }
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ amdgpu_userq_detect_and_reset_queues(uqm);
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ if (r)
+ ret = r;
}
mutex_unlock(&uqm->userq_mutex);
}
- mutex_unlock(&adev->userq_mutex);
+
return ret;
}
@@ -1060,21 +1370,20 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
int ret = 0, r;
/* only need to stop gfx/compute */
if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
return 0;
- mutex_lock(&adev->userq_mutex);
if (!adev->userq_halt_for_enforce_isolation)
dev_warn(adev->dev, "userq scheduling already started!\n");
adev->userq_halt_for_enforce_isolation = false;
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
(queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
(queue->xcp_id == idx)) {
@@ -1082,9 +1391,92 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
if (r)
ret = r;
}
- }
mutex_unlock(&uqm->userq_mutex);
}
- mutex_unlock(&adev->userq_mutex);
+
return ret;
}
+
+int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t saddr)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_bo_va *bo_va = mapping->bo_va;
+ struct dma_resv *resv = bo_va->base.bo->tbo.base.resv;
+ int ret = 0;
+
+ if (!ip_mask)
+ return 0;
+
+ dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr);
+ /**
+ * The userq VA mapping reservation should include the eviction fence,
+ * if the eviction fence can't signal successfully during unmapping,
+ * then driver will warn to flag this improper unmap of the userq VA.
+ * Note: The eviction fence may be attached to different BOs, and this
+ * unmap is only for one kind of userq VAs, so at this point suppose
+ * the eviction fence is always unsignaled.
+ */
+ if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) {
+ ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret <= 0)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void amdgpu_userq_pre_reset(struct amdgpu_device *adev)
+{
+ const struct amdgpu_userq_funcs *userq_funcs;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ cancel_delayed_work_sync(&uqm->resume_work);
+ if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+ amdgpu_userq_wait_for_last_fence(uqm, queue);
+ userq_funcs = adev->userq_funcs[queue->queue_type];
+ userq_funcs->unmap(uqm, queue);
+ /* just mark all queues as hung at this point.
+ * if unmap succeeds, we could map again
+ * in amdgpu_userq_post_reset() if vram is not lost
+ */
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ amdgpu_userq_fence_driver_force_completion(queue);
+ }
+ }
+}
+
+int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost)
+{
+ /* if any queue state is AMDGPU_USERQ_STATE_UNMAPPED
+ * at this point, we should be able to map it again
+ * and continue if vram is not lost.
+ */
+ struct amdgpu_userq_mgr *uqm;
+ struct amdgpu_usermode_queue *queue;
+ const struct amdgpu_userq_funcs *userq_funcs;
+ unsigned long queue_id;
+ int r = 0;
+
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) {
+ userq_funcs = adev->userq_funcs[queue->queue_type];
+ /* Re-map queue */
+ r = userq_funcs->map(uqm, queue);
+ if (r) {
+ dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id);
+ continue;
+ }
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index c027dd916672..c37444427a14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -37,6 +37,7 @@ enum amdgpu_userq_state {
AMDGPU_USERQ_STATE_MAPPED,
AMDGPU_USERQ_STATE_PREEMPTED,
AMDGPU_USERQ_STATE_HUNG,
+ AMDGPU_USERQ_STATE_INVALID_VA,
};
struct amdgpu_mqd_prop;
@@ -47,6 +48,11 @@ struct amdgpu_userq_obj {
struct amdgpu_bo *obj;
};
+struct amdgpu_userq_va_cursor {
+ u64 gpu_addr;
+ struct list_head list;
+};
+
struct amdgpu_usermode_queue {
int queue_type;
enum amdgpu_userq_state state;
@@ -66,6 +72,8 @@ struct amdgpu_usermode_queue {
u32 xcp_id;
int priority;
struct dentry *debugfs_queue;
+
+ struct list_head userq_va_list;
};
struct amdgpu_userq_funcs {
@@ -88,12 +96,17 @@ struct amdgpu_userq_funcs {
/* Usermode queues for gfx */
struct amdgpu_userq_mgr {
- struct idr userq_idr;
+ /**
+ * @userq_mgr_xa: Per-process user queue map (queue ID → queue)
+ * Key: queue_id (unique ID within the process's userq manager)
+ * Value: struct amdgpu_usermode_queue
+ */
+ struct xarray userq_mgr_xa;
struct mutex userq_mutex;
struct amdgpu_device *adev;
struct delayed_work resume_work;
- struct list_head list;
struct drm_file *file;
+ atomic_t userq_count[AMDGPU_RING_TYPE_MAX];
};
struct amdgpu_db_info {
@@ -136,7 +149,13 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
u32 idx);
int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
u32 idx);
-
-int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
- u64 expected_size);
+void amdgpu_userq_reset_work(struct work_struct *work);
+void amdgpu_userq_pre_reset(struct amdgpu_device *adev);
+int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost);
+
+int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
+ u64 addr, u64 expected_size);
+int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t saddr);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index 761bad98da3e..eba9fb359047 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -151,15 +151,16 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d
{
struct amdgpu_userq_fence *userq_fence, *tmp;
struct dma_fence *fence;
+ unsigned long flags;
u64 rptr;
int i;
if (!fence_drv)
return;
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
rptr = amdgpu_userq_fence_read(fence_drv);
- spin_lock(&fence_drv->fence_list_lock);
list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
fence = &userq_fence->base;
@@ -174,7 +175,7 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d
list_del(&userq_fence->link);
dma_fence_put(fence);
}
- spin_unlock(&fence_drv->fence_list_lock);
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
}
void amdgpu_userq_fence_driver_destroy(struct kref *ref)
@@ -386,6 +387,7 @@ static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue,
amdgpu_bo_unreserve(queue->vm->root.bo);
r = amdgpu_bo_reserve(bo, true);
if (r) {
+ amdgpu_bo_unref(&bo);
DRM_ERROR("Failed to reserve userqueue wptr bo");
return r;
}
@@ -537,7 +539,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
}
/* Retrieve the user queue */
- queue = idr_find(&userq_mgr->userq_idr, args->queue_id);
+ queue = xa_load(&userq_mgr->userq_mgr_xa, args->queue_id);
if (!queue) {
r = -ENOENT;
goto put_gobj_write;
@@ -899,7 +901,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
*/
num_fences = dma_fence_dedup_array(fences, num_fences);
- waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id);
+ waitq = xa_load(&userq_mgr->userq_mgr_xa, wait_info->waitq_id);
if (!waitq) {
r = -EINVAL;
goto free_fences;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index ce318f5de047..a7d8f1ce6ac2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -41,6 +41,9 @@
#define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
/* Firmware Names */
+#ifdef CONFIG_DRM_AMDGPU_SI
+#define FIRMWARE_VCE_V1_0 "amdgpu/vce_1_0_0.bin"
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
#define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
#define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
@@ -61,6 +64,9 @@
#define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
#define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin"
+#ifdef CONFIG_DRM_AMDGPU_SI
+MODULE_FIRMWARE(FIRMWARE_VCE_V1_0);
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
MODULE_FIRMWARE(FIRMWARE_KABINI);
@@ -88,82 +94,93 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence);
/**
- * amdgpu_vce_sw_init - allocate memory, load vce firmware
+ * amdgpu_vce_firmware_name() - determine the firmware file name for VCE
*
* @adev: amdgpu_device pointer
- * @size: size for the new BO
*
- * First step to get VCE online, allocate memory and load the firmware
+ * Each chip that has VCE IP may need a different firmware.
+ * This function returns the name of the VCE firmware file
+ * appropriate for the current chip.
*/
-int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
+static const char *amdgpu_vce_firmware_name(struct amdgpu_device *adev)
{
- const char *fw_name;
- const struct common_firmware_header *hdr;
- unsigned int ucode_version, version_major, version_minor, binary_id;
- int i, r;
-
switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_PITCAIRN:
+ case CHIP_TAHITI:
+ case CHIP_VERDE:
+ return FIRMWARE_VCE_V1_0;
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
- fw_name = FIRMWARE_BONAIRE;
- break;
+ return FIRMWARE_BONAIRE;
case CHIP_KAVERI:
- fw_name = FIRMWARE_KAVERI;
- break;
+ return FIRMWARE_KAVERI;
case CHIP_KABINI:
- fw_name = FIRMWARE_KABINI;
- break;
+ return FIRMWARE_KABINI;
case CHIP_HAWAII:
- fw_name = FIRMWARE_HAWAII;
- break;
+ return FIRMWARE_HAWAII;
case CHIP_MULLINS:
- fw_name = FIRMWARE_MULLINS;
- break;
+ return FIRMWARE_MULLINS;
#endif
case CHIP_TONGA:
- fw_name = FIRMWARE_TONGA;
- break;
+ return FIRMWARE_TONGA;
case CHIP_CARRIZO:
- fw_name = FIRMWARE_CARRIZO;
- break;
+ return FIRMWARE_CARRIZO;
case CHIP_FIJI:
- fw_name = FIRMWARE_FIJI;
- break;
+ return FIRMWARE_FIJI;
case CHIP_STONEY:
- fw_name = FIRMWARE_STONEY;
- break;
+ return FIRMWARE_STONEY;
case CHIP_POLARIS10:
- fw_name = FIRMWARE_POLARIS10;
- break;
+ return FIRMWARE_POLARIS10;
case CHIP_POLARIS11:
- fw_name = FIRMWARE_POLARIS11;
- break;
+ return FIRMWARE_POLARIS11;
case CHIP_POLARIS12:
- fw_name = FIRMWARE_POLARIS12;
- break;
+ return FIRMWARE_POLARIS12;
case CHIP_VEGAM:
- fw_name = FIRMWARE_VEGAM;
- break;
+ return FIRMWARE_VEGAM;
case CHIP_VEGA10:
- fw_name = FIRMWARE_VEGA10;
- break;
+ return FIRMWARE_VEGA10;
case CHIP_VEGA12:
- fw_name = FIRMWARE_VEGA12;
- break;
+ return FIRMWARE_VEGA12;
case CHIP_VEGA20:
- fw_name = FIRMWARE_VEGA20;
- break;
+ return FIRMWARE_VEGA20;
default:
- return -EINVAL;
+ return NULL;
}
+}
+
+/**
+ * amdgpu_vce_early_init() - try to load VCE firmware
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tries to load the VCE firmware.
+ *
+ * When not found, returns ENOENT so that the driver can
+ * still load and initialize the rest of the IP blocks.
+ * The GPU can function just fine without VCE, they will just
+ * not support video encoding.
+ */
+int amdgpu_vce_early_init(struct amdgpu_device *adev)
+{
+ const char *fw_name = amdgpu_vce_firmware_name(adev);
+ const struct common_firmware_header *hdr;
+ unsigned int ucode_version, version_major, version_minor, binary_id;
+ int r;
+
+ if (!fw_name)
+ return -ENOENT;
r = amdgpu_ucode_request(adev, &adev->vce.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name);
if (r) {
- dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
- fw_name);
+ dev_err(adev->dev,
+ "amdgpu_vce: Firmware \"%s\" not found or failed to validate (%d)\n",
+ fw_name, r);
+
amdgpu_ucode_release(&adev->vce.fw);
- return r;
+ return -ENOENT;
}
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
@@ -172,11 +189,35 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
version_major = (ucode_version >> 20) & 0xfff;
version_minor = (ucode_version >> 8) & 0xfff;
binary_id = ucode_version & 0xff;
- DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
+ dev_info(adev->dev, "Found VCE firmware Version: %d.%d Binary ID: %d\n",
version_major, version_minor, binary_id);
adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
(binary_id << 8));
+ return 0;
+}
+
+/**
+ * amdgpu_vce_sw_init() - allocate memory for VCE BO
+ *
+ * @adev: amdgpu_device pointer
+ * @size: size for the new BO
+ *
+ * First step to get VCE online: allocate memory for VCE BO.
+ * The VCE firmware binary is copied into the VCE BO later,
+ * in amdgpu_vce_resume. The VCE executes its code from the
+ * VCE BO and also uses the space in this BO for its stack and data.
+ *
+ * Ideally this BO should be placed in VRAM for optimal performance,
+ * although technically it also runs from system RAM (albeit slowly).
+ */
+int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
+{
+ int i, r;
+
+ if (!adev->vce.fw)
+ return -ENOENT;
+
r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
@@ -285,40 +326,23 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
*/
int amdgpu_vce_resume(struct amdgpu_device *adev)
{
- void *cpu_addr;
const struct common_firmware_header *hdr;
unsigned int offset;
- int r, idx;
+ int idx;
if (adev->vce.vcpu_bo == NULL)
return -EINVAL;
- r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
- if (r) {
- dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
- return r;
- }
-
- r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->vce.vcpu_bo);
- dev_err(adev->dev, "(%d) VCE map failed\n", r);
- return r;
- }
-
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
+ memset_io(adev->vce.cpu_addr, 0, amdgpu_bo_size(adev->vce.vcpu_bo));
+ memcpy_toio(adev->vce.cpu_addr, adev->vce.fw->data + offset,
adev->vce.fw->size - offset);
drm_dev_exit(idx);
}
- amdgpu_bo_kunmap(adev->vce.vcpu_bo);
-
- amdgpu_bo_unreserve(adev->vce.vcpu_bo);
-
return 0;
}
@@ -427,6 +451,24 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
}
/**
+ * amdgpu_vce_required_gart_pages() - gets number of GART pages required by VCE
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns how many GART pages we need before GTT for the VCE IP block.
+ * For VCE1, see vce_v1_0_ensure_vcpu_bo_32bit_addr for details.
+ * For VCE2+, this is not needed so return zero.
+ */
+u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev)
+{
+ /* VCE IP block not added yet, so can't use amdgpu_ip_version */
+ if (adev->family == AMDGPU_FAMILY_SI)
+ return 512;
+
+ return 0;
+}
+
+/**
* amdgpu_vce_get_create_msg - generate a VCE create msg
*
* @ring: ring we should submit the msg to
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 6e53f872d084..1c3464ce5037 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -51,14 +51,17 @@ struct amdgpu_vce {
struct drm_sched_entity entity;
uint32_t srbm_soft_reset;
unsigned num_rings;
+ uint32_t keyselect;
};
+int amdgpu_vce_early_init(struct amdgpu_device *adev);
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
+u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
struct amdgpu_ib *ib);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index dc8a17bcc3c8..82624b44e661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -100,7 +100,8 @@
#define SOC15_DPG_MODE_OFFSET(ip, inst_idx, reg) \
({ \
- uint32_t internal_reg_offset, addr; \
+ /* To avoid a -Wunused-but-set-variable warning. */ \
+ uint32_t internal_reg_offset __maybe_unused, addr; \
bool video_range, video1_range, aon_range, aon1_range; \
\
addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
@@ -161,7 +162,8 @@
#define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \
({ \
- uint32_t internal_reg_offset, addr; \
+ /* To avoid a -Wunused-but-set-variable warning. */ \
+ uint32_t internal_reg_offset __maybe_unused, addr; \
bool video_range, video1_range, aon_range, aon1_range; \
\
addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 3328ab63376b..47a6ce4fdc74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -44,6 +44,18 @@
vf2pf_info->ucode_info[ucode].version = ver; \
} while (0)
+#define mmRCC_CONFIG_MEMSIZE 0xde3
+
+const char *amdgpu_virt_dynamic_crit_table_name[] = {
+ "IP DISCOVERY",
+ "VBIOS IMG",
+ "RAS TELEMETRY",
+ "DATA EXCHANGE",
+ "BAD PAGE INFO",
+ "INIT HEADER",
+ "LAST",
+};
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
{
/* By now all MMIO pages except mailbox are blocked */
@@ -150,9 +162,10 @@ void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
virt->ops->req_init_data(adev);
if (adev->virt.req_init_data_ver > 0)
- DRM_INFO("host supports REQ_INIT_DATA handshake\n");
+ dev_info(adev->dev, "host supports REQ_INIT_DATA handshake of critical_region_version %d\n",
+ adev->virt.req_init_data_ver);
else
- DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
+ dev_warn(adev->dev, "host doesn't support REQ_INIT_DATA handshake\n");
}
/**
@@ -205,12 +218,12 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
&adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr);
if (r) {
- DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
+ dev_err(adev->dev, "failed to alloc mm table and error = %d.\n", r);
return r;
}
memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
- DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
+ dev_info(adev->dev, "MM table gpu addr = 0x%llx, cpu addr = %p.\n",
adev->virt.mm_table.gpu_addr,
adev->virt.mm_table.cpu_addr);
return 0;
@@ -390,7 +403,9 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GPU_PAGE_SIZE,
&bo, NULL))
- DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+ dev_dbg(adev->dev,
+ "RAS WARN: reserve vram for retired page %llx fail\n",
+ bp);
data->bps_bo[i] = bo;
}
data->last_reserved = i + 1;
@@ -598,8 +613,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->driver_cert = 0;
vf2pf_info->os_info.all = 0;
- vf2pf_info->fb_usage =
- ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
+ vf2pf_info->fb_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
+ ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20 : 0;
vf2pf_info->fb_vis_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
@@ -658,10 +673,34 @@ out:
schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
}
+static int amdgpu_virt_read_exchange_data_from_mem(struct amdgpu_device *adev, uint32_t *pfvf_data)
+{
+ uint32_t dataexchange_offset =
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset;
+ uint32_t dataexchange_size =
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10;
+ uint64_t pos = 0;
+
+ dev_info(adev->dev,
+ "Got data exchange info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
+ dataexchange_offset, dataexchange_size);
+
+ if (!IS_ALIGNED(dataexchange_offset, 4) || !IS_ALIGNED(dataexchange_size, 4)) {
+ dev_err(adev->dev, "Data exchange data not aligned to 4 bytes\n");
+ return -EINVAL;
+ }
+
+ pos = (uint64_t)dataexchange_offset;
+ amdgpu_device_vram_access(adev, pos, pfvf_data,
+ dataexchange_size, false);
+
+ return 0;
+}
+
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
{
if (adev->virt.vf2pf_update_interval_ms != 0) {
- DRM_INFO("clean up the vf2pf work item\n");
+ dev_info(adev->dev, "clean up the vf2pf work item\n");
cancel_delayed_work_sync(&adev->virt.vf2pf_work);
adev->virt.vf2pf_update_interval_ms = 0;
}
@@ -669,13 +708,15 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
{
+ uint32_t *pfvf_data = NULL;
+
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
adev->virt.vf2pf_update_interval_ms = 0;
adev->virt.vf2pf_update_retry_cnt = 0;
if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
- DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
+ dev_warn(adev->dev, "Currently fw_vram and drv_vram should not have values at the same time!");
} else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
/* go through this logic in ip_init and reset to init workqueue*/
amdgpu_virt_exchange_data(adev);
@@ -684,11 +725,34 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
} else if (adev->bios != NULL) {
/* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
- adev->virt.fw_reserve.p_pf2vf =
- (struct amd_sriov_msg_pf2vf_info_header *)
- (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
+ pfvf_data =
+ kzalloc(adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10,
+ GFP_KERNEL);
+ if (!pfvf_data) {
+ dev_err(adev->dev, "Failed to allocate memory for pfvf_data\n");
+ return;
+ }
- amdgpu_virt_read_pf2vf_data(adev);
+ if (amdgpu_virt_read_exchange_data_from_mem(adev, pfvf_data))
+ goto free_pfvf_data;
+
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)pfvf_data;
+
+ amdgpu_virt_read_pf2vf_data(adev);
+
+free_pfvf_data:
+ kfree(pfvf_data);
+ pfvf_data = NULL;
+ adev->virt.fw_reserve.p_pf2vf = NULL;
+ } else {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
+
+ amdgpu_virt_read_pf2vf_data(adev);
+ }
}
}
@@ -701,23 +765,38 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
if (adev->mman.fw_vram_usage_va) {
- adev->virt.fw_reserve.p_pf2vf =
- (struct amd_sriov_msg_pf2vf_info_header *)
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
- adev->virt.fw_reserve.p_vf2pf =
- (struct amd_sriov_msg_vf2pf_info_header *)
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
- adev->virt.fw_reserve.ras_telemetry =
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
+ if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.fw_vram_usage_va +
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset);
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.fw_vram_usage_va +
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset +
+ (AMD_SRIOV_MSG_SIZE_KB << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.fw_vram_usage_va +
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset);
+ } else {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
+ }
} else if (adev->mman.drv_vram_usage_va) {
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
- (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
adev->virt.fw_reserve.p_vf2pf =
(struct amd_sriov_msg_vf2pf_info_header *)
- (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
adev->virt.fw_reserve.ras_telemetry =
- (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
}
amdgpu_virt_read_pf2vf_data(adev);
@@ -816,7 +895,7 @@ static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg)
break;
default: /* other chip doesn't support SRIOV */
is_sriov = false;
- DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
+ dev_err(adev->dev, "Unknown asic type: %d!\n", adev->asic_type);
break;
}
}
@@ -838,10 +917,220 @@ static void amdgpu_virt_init_ras(struct amdgpu_device *adev)
RATELIMIT_MSG_ON_RELEASE);
mutex_init(&adev->virt.ras.ras_telemetry_mutex);
+ mutex_init(&adev->virt.access_req_mutex);
adev->virt.ras.cper_rptr = 0;
}
+static uint8_t amdgpu_virt_crit_region_calc_checksum(uint8_t *buf_start, uint8_t *buf_end)
+{
+ uint32_t sum = 0;
+
+ if (buf_start >= buf_end)
+ return 0;
+
+ for (; buf_start < buf_end; buf_start++)
+ sum += buf_start[0];
+
+ return 0xffffffff - sum;
+}
+
+int amdgpu_virt_init_critical_region(struct amdgpu_device *adev)
+{
+ struct amd_sriov_msg_init_data_header *init_data_hdr = NULL;
+ u64 init_hdr_offset = adev->virt.init_data_header.offset;
+ u64 init_hdr_size = (u64)adev->virt.init_data_header.size_kb << 10; /* KB → bytes */
+ u64 vram_size;
+ u64 end;
+ int r = 0;
+ uint8_t checksum = 0;
+
+ /* Skip below init if critical region version != v2 */
+ if (adev->virt.req_init_data_ver != GPU_CRIT_REGION_V2)
+ return 0;
+
+ if (init_hdr_offset < 0) {
+ dev_err(adev->dev, "Invalid init header offset\n");
+ return -EINVAL;
+ }
+
+ vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
+ if (!vram_size || vram_size == U32_MAX)
+ return -EINVAL;
+ vram_size <<= 20;
+
+ if (check_add_overflow(init_hdr_offset, init_hdr_size, &end) || end > vram_size) {
+ dev_err(adev->dev, "init_data_header exceeds VRAM size, exiting\n");
+ return -EINVAL;
+ }
+
+ /* Allocate for init_data_hdr */
+ init_data_hdr = kzalloc(sizeof(struct amd_sriov_msg_init_data_header), GFP_KERNEL);
+ if (!init_data_hdr)
+ return -ENOMEM;
+
+ amdgpu_device_vram_access(adev, (uint64_t)init_hdr_offset, (uint32_t *)init_data_hdr,
+ sizeof(struct amd_sriov_msg_init_data_header), false);
+
+ /* Table validation */
+ if (strncmp(init_data_hdr->signature,
+ AMDGPU_SRIOV_CRIT_DATA_SIGNATURE,
+ AMDGPU_SRIOV_CRIT_DATA_SIG_LEN) != 0) {
+ dev_err(adev->dev, "Invalid init data signature: %.4s\n",
+ init_data_hdr->signature);
+ r = -EINVAL;
+ goto out;
+ }
+
+ checksum = amdgpu_virt_crit_region_calc_checksum(
+ (uint8_t *)&init_data_hdr->initdata_offset,
+ (uint8_t *)init_data_hdr +
+ sizeof(struct amd_sriov_msg_init_data_header));
+ if (checksum != init_data_hdr->checksum) {
+ dev_err(adev->dev, "Found unmatching checksum from calculation 0x%x and init_data 0x%x\n",
+ checksum, init_data_hdr->checksum);
+ r = -EINVAL;
+ goto out;
+ }
+
+ memset(&adev->virt.crit_regn, 0, sizeof(adev->virt.crit_regn));
+ memset(adev->virt.crit_regn_tbl, 0, sizeof(adev->virt.crit_regn_tbl));
+
+ adev->virt.crit_regn.offset = init_data_hdr->initdata_offset;
+ adev->virt.crit_regn.size_kb = init_data_hdr->initdata_size_in_kb;
+
+ /* Validation and initialization for each table entry */
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_IPD_TABLE_ID)) {
+ if (!init_data_hdr->ip_discovery_size_in_kb ||
+ init_data_hdr->ip_discovery_size_in_kb > DISCOVERY_TMR_SIZE) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_IPD_TABLE_ID],
+ init_data_hdr->ip_discovery_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].offset =
+ init_data_hdr->ip_discovery_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb =
+ init_data_hdr->ip_discovery_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID)) {
+ if (!init_data_hdr->vbios_img_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID],
+ init_data_hdr->vbios_img_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].offset =
+ init_data_hdr->vbios_img_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].size_kb =
+ init_data_hdr->vbios_img_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID)) {
+ if (!init_data_hdr->ras_tele_info_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID],
+ init_data_hdr->ras_tele_info_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset =
+ init_data_hdr->ras_tele_info_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].size_kb =
+ init_data_hdr->ras_tele_info_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID)) {
+ if (!init_data_hdr->dataexchange_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID],
+ init_data_hdr->dataexchange_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset =
+ init_data_hdr->dataexchange_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb =
+ init_data_hdr->dataexchange_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID)) {
+ if (!init_data_hdr->bad_page_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID],
+ init_data_hdr->bad_page_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].offset =
+ init_data_hdr->bad_page_info_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].size_kb =
+ init_data_hdr->bad_page_size_in_kb;
+ }
+
+ /* Validation for critical region info */
+ if (adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb > DISCOVERY_TMR_SIZE) {
+ dev_err(adev->dev, "Invalid IP discovery size: 0x%x\n",
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* reserved memory starts from crit region base offset with the size of 5MB */
+ adev->mman.fw_vram_usage_start_offset = adev->virt.crit_regn.offset;
+ adev->mman.fw_vram_usage_size = adev->virt.crit_regn.size_kb << 10;
+ dev_info(adev->dev,
+ "critical region v%d requested to reserve memory start at %08llx with %llu KB.\n",
+ init_data_hdr->version,
+ adev->mman.fw_vram_usage_start_offset,
+ adev->mman.fw_vram_usage_size >> 10);
+
+ adev->virt.is_dynamic_crit_regn_enabled = true;
+
+out:
+ kfree(init_data_hdr);
+ init_data_hdr = NULL;
+
+ return r;
+}
+
+int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
+ int data_id, uint8_t *binary, u32 *size)
+{
+ uint32_t data_offset = 0;
+ uint32_t data_size = 0;
+ enum amd_sriov_msg_table_id_enum data_table_id = data_id;
+
+ if (data_table_id >= AMD_SRIOV_MSG_MAX_TABLE_ID)
+ return -EINVAL;
+
+ data_offset = adev->virt.crit_regn_tbl[data_table_id].offset;
+ data_size = adev->virt.crit_regn_tbl[data_table_id].size_kb << 10;
+
+ /* Validate on input params */
+ if (!binary || !size || *size < (uint64_t)data_size)
+ return -EINVAL;
+
+ /* Proceed to copy the dynamic content */
+ amdgpu_device_vram_access(adev,
+ (uint64_t)data_offset, (uint32_t *)binary, data_size, false);
+ *size = (uint64_t)data_size;
+
+ dev_dbg(adev->dev,
+ "Got %s info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
+ amdgpu_virt_dynamic_crit_table_name[data_id], data_offset, data_size);
+
+ return 0;
+}
+
void amdgpu_virt_init(struct amdgpu_device *adev)
{
bool is_sriov = false;
@@ -1289,7 +1578,7 @@ amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block bloc
case AMDGPU_RAS_BLOCK__MPIO:
return RAS_TELEMETRY_GPU_BLOCK_MPIO;
default:
- DRM_WARN_ONCE("Unsupported SRIOV RAS telemetry block 0x%x\n",
+ dev_warn(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n",
block);
return RAS_TELEMETRY_GPU_BLOCK_COUNT;
}
@@ -1304,7 +1593,7 @@ static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev,
checksum = host_telemetry->header.checksum;
used_size = host_telemetry->header.used_size;
- if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
return 0;
tmp = kmemdup(&host_telemetry->body.error_count, used_size, GFP_KERNEL);
@@ -1383,7 +1672,7 @@ amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev,
checksum = host_telemetry->header.checksum;
used_size = host_telemetry->header.used_size;
- if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
return -EINVAL;
cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL);
@@ -1515,7 +1804,7 @@ static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev,
checksum = host_telemetry->header.checksum;
used_size = host_telemetry->header.used_size;
- if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
return 0;
tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index d1172c8e58c4..01d5bca2dee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -54,6 +54,12 @@
#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 2
+/* Signature used to validate the SR-IOV dynamic critical region init data header ("INDA") */
+#define AMDGPU_SRIOV_CRIT_DATA_SIGNATURE "INDA"
+#define AMDGPU_SRIOV_CRIT_DATA_SIG_LEN 4
+
+#define IS_SRIOV_CRIT_REGN_ENTRY_VALID(hdr, id) ((hdr)->valid_tables & (1 << (id)))
+
enum amdgpu_sriov_vf_mode {
SRIOV_VF_MODE_BARE_METAL = 0,
SRIOV_VF_MODE_ONE_VF,
@@ -144,6 +150,7 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_RAS_CAPS = (1 << 9),
AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10),
AMDGIM_FEATURE_RAS_CPER = (1 << 11),
+ AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK = (1 << 12),
};
enum AMDGIM_REG_ACCESS_FLAG {
@@ -262,6 +269,11 @@ struct amdgpu_virt_ras {
DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST);
+struct amdgpu_virt_region {
+ uint32_t offset;
+ uint32_t size_kb;
+};
+
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
@@ -289,6 +301,12 @@ struct amdgpu_virt {
bool ras_init_done;
uint32_t reg_access;
+ /* dynamic(v2) critical regions */
+ struct amdgpu_virt_region init_data_header;
+ struct amdgpu_virt_region crit_regn;
+ struct amdgpu_virt_region crit_regn_tbl[AMD_SRIOV_MSG_MAX_TABLE_ID];
+ bool is_dynamic_crit_regn_enabled;
+
/* vf2pf message */
struct delayed_work vf2pf_work;
uint32_t vf2pf_update_interval_ms;
@@ -307,6 +325,8 @@ struct amdgpu_virt {
/* Spinlock to protect access to the RLCG register interface */
spinlock_t rlcg_reg_lock;
+ struct mutex access_req_mutex;
+
union amd_sriov_ras_caps ras_en_caps;
union amd_sriov_ras_caps ras_telemetry_en_caps;
struct amdgpu_virt_ras ras;
@@ -378,6 +398,9 @@ struct amdgpu_video_codec_info;
#define amdgpu_sriov_ras_cper_en(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CPER)
+#define amdgpu_sriov_xgmi_ta_ext_peer_link_en(adev) \
+((adev)->virt.gim_feature & AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK)
+
static inline bool is_virtual_machine(void)
{
#if defined(CONFIG_X86)
@@ -424,6 +447,10 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
void amdgpu_virt_init(struct amdgpu_device *adev);
+int amdgpu_virt_init_critical_region(struct amdgpu_device *adev);
+int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
+ int data_id, uint8_t *binary, u32 *size);
+
bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c1a801203949..a67285118c37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -484,15 +484,19 @@ int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
spin_lock(&vm->status_lock);
while (!list_is_head(prev->next, &vm->done)) {
bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
- spin_unlock(&vm->status_lock);
bo = bo_va->base.bo;
if (bo) {
+ amdgpu_bo_ref(bo);
+ spin_unlock(&vm->status_lock);
+
ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
+ amdgpu_bo_unref(&bo);
if (unlikely(ret))
return ret;
+
+ spin_lock(&vm->status_lock);
}
- spin_lock(&vm->status_lock);
prev = prev->next;
}
spin_unlock(&vm->status_lock);
@@ -779,7 +783,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool cleaner_shader_needed = false;
bool pasid_mapping_needed = false;
struct dma_fence *fence = NULL;
- struct amdgpu_fence *af;
unsigned int patch;
int r;
@@ -842,12 +845,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
}
if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
- r = amdgpu_fence_emit(ring, &fence, NULL, 0);
+ r = amdgpu_fence_emit(ring, job->hw_vm_fence, 0);
if (r)
return r;
- /* this is part of the job's context */
- af = container_of(fence, struct amdgpu_fence, base);
- af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
+ fence = &job->hw_vm_fence->base;
+ /* get a ref for the job */
+ dma_fence_get(fence);
}
if (vm_flush_needed) {
@@ -1066,7 +1069,7 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
}
/* Prepare a TLB flush fence to be attached to PTs */
- if (!params->unlocked && vm->is_compute_context) {
+ if (!params->unlocked) {
amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
/* Makes sure no PD/PT is freed before the flush */
@@ -1952,6 +1955,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->base.vm;
bool valid = true;
+ int r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -1972,6 +1976,17 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
return -ENOENT;
}
+ /* It's unlikely to happen that the mapping userq hasn't been idled
+ * during user requests GEM unmap IOCTL except for forcing the unmap
+ * from user space.
+ */
+ if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0)) {
+ r = amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr);
+ if (unlikely(r == -EBUSY))
+ dev_warn_once(adev->dev,
+ "Attempt to unmap an active userq buffer\n");
+ }
+
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);
mapping->bo_va = NULL;
@@ -2078,7 +2093,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo *bo = before->bo_va->base.bo;
amdgpu_vm_it_insert(before, &vm->va);
- if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (before->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
@@ -2093,7 +2108,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo *bo = after->bo_va->base.bo;
amdgpu_vm_it_insert(after, &vm->va);
- if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (after->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
@@ -2828,8 +2843,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
*/
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
- unsigned i;
-
/* Concurrent flushes are only possible starting with Vega10 and
* are broken on Navi10 and Navi14.
*/
@@ -2838,11 +2851,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
adev->asic_type == CHIP_NAVI14);
amdgpu_vmid_mgr_init(adev);
- adev->vm_manager.fence_context =
- dma_fence_context_alloc(AMDGPU_MAX_RINGS);
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
- adev->vm_manager.seqno[i] = 0;
-
spin_lock_init(&adev->vm_manager.prt_lock);
atomic_set(&adev->vm_manager.num_prt_users, 0);
@@ -2908,8 +2916,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
/* We only have requirement to reserve vmid from gfxhub */
- amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
- break;
+ return amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
case AMDGPU_VM_OP_UNRESERVE_VMID:
amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index cf0ec94e8a07..15d757c016cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -453,10 +453,6 @@ struct amdgpu_vm_manager {
unsigned int first_kfd_vmid;
bool concurrent_flush;
- /* Handling of VM fences */
- u64 fence_context;
- unsigned seqno[AMDGPU_MAX_RINGS];
-
uint64_t max_pfn;
uint32_t num_level;
uint32_t block_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 474bfe36c0c2..aa78c2ee9e21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
+ case IP_VERSION(6, 1, 1):
+ return adev->pm.fw_version < 0x0a640500;
+ default:
+ return false;
+ }
+}
+
+static int vpe_get_dpm_level(struct amdgpu_device *adev)
+{
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ if (!adev->pm.dpm_enabled)
+ return 0;
+
+ return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv));
+}
+
static void vpe_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
@@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work)
unsigned int fences = 0;
fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
+ if (fences)
+ goto reschedule;
- if (fences == 0)
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
- else
- schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
+ if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0)
+ goto reschedule;
+
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
+ return;
+
+reschedule:
+ schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
static int vpe_common_init(struct amdgpu_vpe *vpe)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index a5adb2ed9b3c..9d934c07fa6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -234,6 +234,9 @@ static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
!adev->gmc.vram_vendor)
return 0;
+ if (!ttm_resource_manager_used(&adev->mman.vram_mgr.manager))
+ return 0;
+
return attr->mode;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 1ede308a7c67..aad530c46a9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -298,6 +298,9 @@ int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num)
{
int link_map_6_4_x[8] = { 0, 3, 1, 2, 7, 6, 4, 5 };
+ if (adev->gmc.xgmi.num_physical_nodes <= 1)
+ return -EINVAL;
+
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
case IP_VERSION(6, 4, 1):
@@ -333,6 +336,10 @@ static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link
}
i = global_link_num / n;
+
+ if (!(adev->aid_mask & BIT(i)))
+ return U32_MAX;
+
addr += adev->asic_funcs->encode_ext_smn_addressing(i);
return RREG32_PCIE_EXT(addr);
@@ -342,6 +349,9 @@ int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, int global_link_num)
{
u32 xgmi_state_reg_val;
+ if (adev->gmc.xgmi.num_physical_nodes <= 1)
+ return -EINVAL;
+
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
case IP_VERSION(6, 4, 1):
@@ -958,28 +968,6 @@ static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_inf
return 0;
}
-static void amdgpu_xgmi_fill_topology_info(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev)
-{
- struct psp_xgmi_topology_info *top_info = &adev->psp.xgmi_context.top_info;
- struct psp_xgmi_topology_info *peer_info = &peer_adev->psp.xgmi_context.top_info;
-
- for (int i = 0; i < peer_info->num_nodes; i++) {
- if (peer_info->nodes[i].node_id == adev->gmc.xgmi.node_id) {
- for (int j = 0; j < top_info->num_nodes; j++) {
- if (top_info->nodes[j].node_id == peer_adev->gmc.xgmi.node_id) {
- peer_info->nodes[i].num_hops = top_info->nodes[j].num_hops;
- peer_info->nodes[i].is_sharing_enabled =
- top_info->nodes[j].is_sharing_enabled;
- peer_info->nodes[i].num_links =
- top_info->nodes[j].num_links;
- return;
- }
- }
- }
- }
-}
-
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
struct psp_xgmi_topology_info *top_info;
@@ -1065,11 +1053,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
/* To do: continue with some node failed or disable the whole hive*/
goto exit_unlock;
}
-
- /* fill the topology info for peers instead of getting from PSP */
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- amdgpu_xgmi_fill_topology_info(adev, tmp_adev);
- }
} else {
/* get latest topology info for each device from psp */
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 3a79ed7d8031..3cdb1e0eca37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -23,26 +23,84 @@
#ifndef AMDGV_SRIOV_MSG__H_
#define AMDGV_SRIOV_MSG__H_
-/* unit in kilobytes */
-#define AMD_SRIOV_MSG_VBIOS_OFFSET 0
-#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64
-#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB
-#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4
-#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048
-#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB 2
-#define AMD_SRIOV_RAS_TELEMETRY_SIZE_KB 64
+#define AMD_SRIOV_MSG_SIZE_KB 1
+
/*
- * layout
+ * layout v1
* 0 64KB 65KB 66KB 68KB 132KB
* | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ...
* | 64KB | 1KB | 1KB | 2KB | 64KB | ...
*/
-#define AMD_SRIOV_MSG_SIZE_KB 1
-#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB
-#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
-#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
-#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB)
+/*
+ * layout v2 (offsets are dynamically allocated and the offsets below are examples)
+ * 0 1KB 64KB 65KB 66KB 68KB 132KB
+ * | INITD_H | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ...
+ * | 1KB | 64KB | 1KB | 1KB | 2KB | 64KB | ...
+ *
+ * Note: PF2VF + VF2PF + Bad Page = DataExchange region (allocated contiguously)
+ */
+
+/* v1 layout sizes */
+#define AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 64
+#define AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 1
+#define AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 1
+#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1 2
+#define AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 64
+#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 \
+ (AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 + AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 + \
+ AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1)
+
+/* v1 offsets */
+#define AMD_SRIOV_MSG_VBIOS_OFFSET_V1 0
+#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1 AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1
+#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048
+#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1
+#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 \
+ (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 \
+ (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 \
+ (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1)
+#define AMD_SRIOV_MSG_INIT_DATA_TOT_SIZE_KB_V1 \
+ (AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 + AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 + \
+ AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1)
+
+enum amd_sriov_crit_region_version {
+ GPU_CRIT_REGION_V1 = 1,
+ GPU_CRIT_REGION_V2 = 2,
+};
+
+/* v2 layout offset enum (in order of allocation) */
+enum amd_sriov_msg_table_id_enum {
+ AMD_SRIOV_MSG_IPD_TABLE_ID = 0,
+ AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID,
+ AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID,
+ AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID,
+ AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID,
+ AMD_SRIOV_MSG_INITD_H_TABLE_ID,
+ AMD_SRIOV_MSG_MAX_TABLE_ID,
+};
+
+struct amd_sriov_msg_init_data_header {
+ char signature[4]; /* "INDA" */
+ uint32_t version;
+ uint32_t checksum;
+ uint32_t initdata_offset; /* 0 */
+ uint32_t initdata_size_in_kb; /* 5MB */
+ uint32_t valid_tables;
+ uint32_t vbios_img_offset;
+ uint32_t vbios_img_size_in_kb;
+ uint32_t dataexchange_offset;
+ uint32_t dataexchange_size_in_kb;
+ uint32_t ras_tele_info_offset;
+ uint32_t ras_tele_info_size_in_kb;
+ uint32_t ip_discovery_offset;
+ uint32_t ip_discovery_size_in_kb;
+ uint32_t bad_page_info_offset;
+ uint32_t bad_page_size_in_kb;
+ uint32_t reserved[8];
+};
/*
* PF2VF history log:
@@ -102,7 +160,8 @@ union amd_sriov_msg_feature_flags {
uint32_t ras_caps : 1;
uint32_t ras_telemetry : 1;
uint32_t ras_cper : 1;
- uint32_t reserved : 20;
+ uint32_t xgmi_ta_ext_peer_link : 1;
+ uint32_t reserved : 19;
} flags;
uint32_t all;
};
@@ -140,8 +199,9 @@ union amd_sriov_ras_caps {
uint64_t block_jpeg : 1;
uint64_t block_ih : 1;
uint64_t block_mpio : 1;
+ uint64_t block_mmsch : 1;
uint64_t poison_propogation_mode : 1;
- uint64_t reserved : 44;
+ uint64_t reserved : 43;
} bits;
uint64_t all;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 811124ff88a8..f9e2edf5260b 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -407,7 +407,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
return -EINVAL;
}
- if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
+ if (adev->kfd.init_complete && !amdgpu_in_reset(adev) &&
+ !adev->in_suspend)
flags |= AMDGPU_XCP_OPS_KFD;
if (flags & AMDGPU_XCP_OPS_KFD) {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 41f4705bdbbd..876a3256dba4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -156,6 +156,9 @@ static int cik_ih_irq_init(struct amdgpu_device *adev)
/* enable irqs */
cik_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -192,6 +195,9 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
/* When a ring buffer overflow happen start parsing interrupt
@@ -211,6 +217,8 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(mmIH_RB_CNTL, tmp);
}
+
+out:
return (wptr & ih->ptr_mask);
}
@@ -306,6 +314,10 @@ static int cik_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
r = amdgpu_irq_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
index 96616a865aac..ed1e25661706 100644
--- a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 2f891fb846d5..bc7a2e06ab5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -157,6 +157,9 @@ static int cz_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
cz_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -194,6 +197,9 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -297,6 +303,10 @@ static int cz_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
r = amdgpu_irq_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 8841d7213de4..d75b9940f248 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4956,7 +4956,8 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
- if (!amdgpu_sriov_vf(adev)) {
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
@@ -9951,6 +9952,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
};
static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 66c47c466532..8a2ee2de390f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1821,13 +1821,15 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 3):
if ((adev->gfx.me_fw_version >= 2280) &&
(adev->gfx.mec_fw_version >= 2410) &&
- !amdgpu_sriov_vf(adev)) {
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
break;
default:
- if (!amdgpu_sriov_vf(adev)) {
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
@@ -2438,7 +2440,7 @@ static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev)
if (version_minor == 3)
gfx_v11_0_load_rlcp_rlcv_microcode(adev);
}
-
+
return 0;
}
@@ -3886,7 +3888,7 @@ static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
memcpy(fw, fw_data, fw_size);
-
+
amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
@@ -5862,8 +5864,6 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
- BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
-
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (vmid << 24);
@@ -5874,9 +5874,9 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (flags & AMDGPU_IB_PREEMPTED)
control |= INDIRECT_BUFFER_PRE_RESUME(1);
- if (vmid)
+ if (vmid && !ring->adev->gfx.rs64_enable)
gfx_v11_0_ring_emit_de_meta(ring,
- (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
+ !amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED));
}
amdgpu_ring_write(ring, header);
@@ -7320,6 +7320,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
};
static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 710ec9c34e43..d01d2712cf57 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -1548,7 +1548,8 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(12, 0, 1):
if ((adev->gfx.me_fw_version >= 2660) &&
(adev->gfx.mec_fw_version >= 2920) &&
- !amdgpu_sriov_vf(adev)) {
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
@@ -4419,8 +4420,6 @@ static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
- BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
-
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (vmid << 24);
@@ -5597,6 +5596,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = {
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush,
};
static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 7693b7953426..80565392313f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -3102,6 +3102,11 @@ static int gfx_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 5976ed55d9db..2b7aba22ecc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4399,6 +4399,11 @@ static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
gfx_v7_0_gpu_early_init(adev);
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 0856ff65288c..1c87375e1dd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -2023,6 +2023,11 @@ static int gfx_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
return 0;
}
@@ -6939,6 +6944,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v8_0_ring_emit_rreg,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
+ .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index dd19a97436db..0148d7ff34d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2409,7 +2409,7 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
- if (!amdgpu_sriov_vf(adev))
+ if (!amdgpu_sriov_vf(adev) && !adev->debug_disable_gpu_ring_reset)
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
@@ -7586,6 +7586,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
};
static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 77f9d5b9a556..cbb74ffc4792 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -1149,14 +1149,16 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
if ((adev->gfx.mec_fw_version >= 155) &&
- !amdgpu_sriov_vf(adev)) {
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
break;
case IP_VERSION(9, 5, 0):
if ((adev->gfx.mec_fw_version >= 21) &&
- !amdgpu_sriov_vf(adev)) {
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
@@ -2152,7 +2154,8 @@ static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
return 0;
}
-static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore)
+static void gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id,
+ bool restore)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
@@ -2186,8 +2189,6 @@ static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, b
atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
}
-
- return 0;
}
static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
@@ -2220,7 +2221,7 @@ static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
{
struct amdgpu_ring *ring;
- int i, r;
+ int i;
gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
@@ -2228,9 +2229,7 @@ static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
ring = &adev->gfx.compute_ring[i + xcc_id *
adev->gfx.num_compute_rings];
- r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
- if (r)
- return r;
+ gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
}
return amdgpu_gfx_enable_kcq(adev, xcc_id);
@@ -2292,7 +2291,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
} else {
- if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
+ if (adev->in_suspend)
+ amdgpu_xcp_restore_partition_mode(adev->xcp_mgr);
+ else if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
AMDGPU_XCP_FL_NONE) ==
AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
r = amdgpu_xcp_switch_partition_mode(
@@ -3605,11 +3606,8 @@ pipe_reset:
return r;
}
- r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
- if (r) {
- dev_err(adev->dev, "fail to init kcq\n");
- return r;
- }
+ gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
if (r) {
@@ -4798,6 +4796,7 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
};
static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index d7499be8c4bf..ce6e04242c52 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -103,8 +103,10 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ?
AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
- bool retry_fault = !!(entry->src_data[1] & 0x80);
- bool write_fault = !!(entry->src_data[1] & 0x20);
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
struct amdgpu_task_info *task_info;
uint32_t status = 0;
u64 addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 7bc389d9f5c4..ba59ee8e398a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -103,12 +103,41 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ?
AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
uint32_t status = 0;
u64 addr;
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+ if (retry_fault) {
+ /* Returning 1 here also prevents sending the IV to the KFD */
+
+ /* Process it only if it's the first fault for this address */
+ if (entry->ih != &adev->irq.ih_soft &&
+ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
+ entry->timestamp))
+ return 1;
+
+ /* Delegate it to a different ring if the hardware hasn't
+ * already done it.
+ */
+ if (entry->ih == &adev->irq.ih) {
+ amdgpu_irq_delegate(adev, entry, 8);
+ return 1;
+ }
+
+ /* Try to handle the recoverable page faults by filling page
+ * tables
+ */
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
+ entry->timestamp, write_fault))
+ return 1;
+ }
+
if (!amdgpu_sriov_vf(adev)) {
/*
* Issue a dummy read to wait for the status register to
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index f4a19357ccbc..7a9d6894e321 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -91,6 +91,10 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
struct amdgpu_vmhub *hub;
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
uint32_t status = 0;
u64 addr;
@@ -102,6 +106,31 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
else
hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+ if (retry_fault) {
+ /* Returning 1 here also prevents sending the IV to the KFD */
+
+ /* Process it only if it's the first fault for this address */
+ if (entry->ih != &adev->irq.ih_soft &&
+ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
+ entry->timestamp))
+ return 1;
+
+ /* Delegate it to a different ring if the hardware hasn't
+ * already done it.
+ */
+ if (entry->ih == &adev->irq.ih) {
+ amdgpu_irq_delegate(adev, entry, 8);
+ return 1;
+ }
+
+ /* Try to handle the recoverable page faults by filling page
+ * tables
+ */
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
+ entry->timestamp, write_fault))
+ return 1;
+ }
+
if (!amdgpu_sriov_vf(adev)) {
/*
* Issue a dummy read to wait for the status register to
@@ -312,9 +341,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
return;
}
- mutex_lock(&adev->mman.gtt_window_lock);
gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0);
- mutex_unlock(&adev->mman.gtt_window_lock);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index f6ad7911f1e6..a8ec95f42926 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -213,7 +213,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_set_agp_default(adev, mc);
amdgpu_gmc_vram_location(adev, mc, base);
- amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
+ amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_LOW);
}
static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
@@ -610,23 +610,21 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
}
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
- u32 status, u32 addr, u32 mc_client)
+ u32 status, u32 addr)
{
u32 mc_id;
u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
PROTECTIONS);
- char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
- (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_ID);
- dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+ dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from %d\n",
protections, vmid, addr,
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_RW) ?
- "write" : "read", block, mc_client, mc_id);
+ "write" : "read", mc_id);
}
static const u32 mc_cg_registers[] = {
@@ -1072,6 +1070,12 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
{
u32 addr, status;
+ /* Delegate to the soft IRQ handler ring */
+ if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
+ amdgpu_irq_delegate(adev, entry, 4);
+ return 1;
+ }
+
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
@@ -1079,6 +1083,10 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
if (!addr && !status)
return 0;
+ amdgpu_vm_update_fault_cache(adev, entry->pasid,
+ ((u64)addr) << AMDGPU_GPU_PAGE_SHIFT,
+ status, AMDGPU_GFXHUB(0));
+
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
gmc_v6_0_set_fault_enable_default(adev, false);
@@ -1089,7 +1097,7 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
- gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
+ gmc_v6_0_vm_decode_fault(adev, status, addr);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 93d7ccb7d013..fbd0bf147f50 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1068,7 +1068,7 @@ static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
GFP_KERNEL);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
return 0;
}
@@ -1261,6 +1261,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
{
u32 addr, status, mc_client, vmid;
+ /* Delegate to the soft IRQ handler ring */
+ if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
+ amdgpu_irq_delegate(adev, entry, 4);
+ return 1;
+ }
+
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
@@ -1290,7 +1296,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
VMID);
if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
- && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
u32 protections = REG_GET_FIELD(status,
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1306,8 +1312,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
info->prot_read = protections & 0x8 ? true : false;
info->prot_write = protections & 0x10 ? true : false;
info->prot_exec = protections & 0x20 ? true : false;
- mb();
- atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index c5e2a2c41e06..6551b60f2584 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1183,7 +1183,7 @@ static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
GFP_KERNEL);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
return 0;
}
@@ -1439,6 +1439,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+ /* Delegate to the soft IRQ handler ring */
+ if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
+ amdgpu_irq_delegate(adev, entry, 4);
+ return 1;
+ }
+
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
@@ -1478,7 +1484,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
VMID);
if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
- && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
u32 protections = REG_GET_FIELD(status,
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1494,8 +1500,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
info->prot_read = protections & 0x8 ? true : false;
info->prot_write = protections & 0x10 ? true : false;
info->prot_exec = protections & 0x20 ? true : false;
- mb();
- atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 0d1dd587db5f..8ad7519f7b58 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -544,8 +544,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- bool retry_fault = !!(entry->src_data[1] & 0x80);
- bool write_fault = !!(entry->src_data[1] & 0x20);
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
uint32_t status = 0, cid = 0, rw = 0, fed = 0;
struct amdgpu_task_info *task_info;
struct amdgpu_vmhub *hub;
@@ -1843,6 +1845,10 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) &&
+ adev->rev_id == 0x3)
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+
if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) {
vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
adev->gmc.vram_vendor = vram_info & 0xF;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 1317ede131b6..01cadf898c00 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -157,6 +157,9 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
iceland_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -194,6 +197,9 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -296,6 +302,10 @@ static int iceland_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
r = amdgpu_irq_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index baf097d2e1ac..ab0bf880d3d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -878,6 +878,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_1_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_1_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_1_dec_ring_set_wptr,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
index 2db9b2c63693..64cae89357b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
@@ -205,13 +205,14 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev);
struct mes_detect_and_reset_queue_input input;
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
unsigned int hung_db_num = 0;
- int queue_id, r, i;
- u32 db_array[4];
+ unsigned long queue_id;
+ u32 db_array[8];
+ bool found_hung_queue = false;
+ int r, i;
- if (db_array_size > 4) {
- dev_err(adev->dev, "DB array size (%d vs 4) too small\n",
+ if (db_array_size > 8) {
+ dev_err(adev->dev, "DB array size (%d vs 8) too small\n",
db_array_size);
return -EINVAL;
}
@@ -227,22 +228,26 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
if (r) {
dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r);
} else if (hung_db_num) {
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- if (queue->queue_type == queue_type) {
- for (i = 0; i < hung_db_num; i++) {
- if (queue->doorbell_index == db_array[i]) {
- queue->state = AMDGPU_USERQ_STATE_HUNG;
- atomic_inc(&adev->gpu_reset_counter);
- amdgpu_userq_fence_driver_force_completion(queue);
- drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
- }
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ if (queue->queue_type == queue_type) {
+ for (i = 0; i < hung_db_num; i++) {
+ if (queue->doorbell_index == db_array[i]) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ found_hung_queue = true;
+ atomic_inc(&adev->gpu_reset_counter);
+ amdgpu_userq_fence_driver_force_completion(queue);
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
}
}
}
}
}
+ if (found_hung_queue) {
+ /* Resume scheduling after hang recovery */
+ r = amdgpu_mes_resume(adev);
+ }
+
return r;
}
@@ -254,7 +259,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
struct drm_amdgpu_userq_in *mqd_user = args_in;
struct amdgpu_mqd_prop *userq_props;
- struct amdgpu_gfx_shadow_info shadow_info;
int r;
/* Structure to initialize MQD for userqueue using generic MQD init function */
@@ -280,8 +284,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
userq_props->doorbell_index = queue->doorbell_index;
userq_props->fence_address = queue->fence_drv->gpu_addr;
- if (adev->gfx.funcs->get_gfx_shadow_info)
- adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
@@ -298,8 +300,9 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
goto free_mqd;
}
- if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va,
- max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE)))
+ r = amdgpu_userq_input_va_validate(queue, compute_mqd->eop_va,
+ 2048);
+ if (r)
goto free_mqd;
userq_props->eop_gpu_addr = compute_mqd->eop_va;
@@ -311,6 +314,14 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
kfree(compute_mqd);
} else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
+ struct amdgpu_gfx_shadow_info shadow_info;
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
+ } else {
+ r = -EINVAL;
+ goto free_mqd;
+ }
if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
DRM_ERROR("Invalid GFX MQD\n");
@@ -330,8 +341,13 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
userq_props->tmz_queue =
mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
- if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va,
- shadow_info.shadow_size))
+ r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->shadow_va,
+ shadow_info.shadow_size);
+ if (r)
+ goto free_mqd;
+ r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->csa_va,
+ shadow_info.csa_size);
+ if (r)
goto free_mqd;
kfree(mqd_gfx_v11);
@@ -350,9 +366,9 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
r = -ENOMEM;
goto free_mqd;
}
-
- if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va,
- shadow_info.csa_size))
+ r = amdgpu_userq_input_va_validate(queue, mqd_sdma_v11->csa_va,
+ 32);
+ if (r)
goto free_mqd;
userq_props->csa_addr = mqd_sdma_v11->csa_va;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index e82188431f79..3a52754b5cad 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -66,7 +66,8 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
#define GFX_MES_DRAM_SIZE 0x80000
#define MES11_HW_RESOURCE_1_SIZE (128 * AMDGPU_GPU_PAGE_SIZE)
-#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 4
+#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset, [4:7] = hqd info */
+#define MES11_HUNG_HQD_INFO_OFFSET 4
static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)
{
@@ -368,6 +369,7 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
struct mes_remove_queue_input *input)
{
union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
+ uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK;
memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
@@ -378,6 +380,9 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
+ if (mes_rev >= 0x60)
+ mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset;
+
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
@@ -1720,8 +1725,9 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
- adev->mes.hung_queue_db_array_size =
- MES11_HUNG_DB_OFFSET_ARRAY_SIZE;
+ adev->mes.hung_queue_db_array_size = MES11_HUNG_DB_OFFSET_ARRAY_SIZE;
+ adev->mes.hung_queue_hqd_info_offset = MES11_HUNG_HQD_INFO_OFFSET;
+
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index aff06f06aeee..744e95d3984a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -47,7 +47,8 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev);
#define MES_EOP_SIZE 2048
-#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 4
+#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset [4:7] hqd info */
+#define MES12_HUNG_HQD_INFO_OFFSET 4
static void mes_v12_0_ring_set_wptr(struct amdgpu_ring *ring)
{
@@ -228,7 +229,12 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
pipe, x_pkt->header.opcode);
r = amdgpu_fence_wait_polling(ring, seq, timeout);
- if (r < 1 || !*status_ptr) {
+
+ /*
+ * status_ptr[31:0] == 0 (fail) or status_ptr[63:0] == 1 (success).
+ * If status_ptr[31:0] == 0 then status_ptr[63:32] will have debug error information.
+ */
+ if (r < 1 || !(lower_32_bits(*status_ptr))) {
if (misc_op_str)
dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n",
@@ -355,6 +361,7 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
struct mes_remove_queue_input *input)
{
union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
+ uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK;
memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
@@ -365,6 +372,9 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
+ if (mes_rev >= 0x5a)
+ mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset;
+
return mes_v12_0_submit_pkt_and_poll_completion(mes,
AMDGPU_MES_SCHED_PIPE,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
@@ -1899,8 +1909,9 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
- adev->mes.hung_queue_db_array_size =
- MES12_HUNG_DB_OFFSET_ARRAY_SIZE;
+ adev->mes.hung_queue_db_array_size = MES12_HUNG_DB_OFFSET_ARRAY_SIZE;
+ adev->mes.hung_queue_hqd_info_offset = MES12_HUNG_HQD_INFO_OFFSET;
+
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
r = amdgpu_mes_init_microcode(adev, pipe);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index e5282a5d05d9..e7cd07383d56 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -173,13 +173,17 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
enum idh_request req, u32 data1, u32 data2, u32 data3)
{
- int r, retry = 1;
+ struct amdgpu_virt *virt = &adev->virt;
+ int r = 0, retry = 1;
enum idh_event event = -1;
+ mutex_lock(&virt->access_req_mutex);
send_request:
- if (amdgpu_ras_is_rma(adev))
- return -ENODEV;
+ if (amdgpu_ras_is_rma(adev)) {
+ r = -ENODEV;
+ goto out;
+ }
xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
@@ -217,17 +221,25 @@ send_request:
if (req != IDH_REQ_GPU_INIT_DATA) {
dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r);
- return r;
+ goto out;
} else /* host doesn't support REQ_GPU_INIT_DATA handshake */
adev->virt.req_init_data_ver = 0;
} else {
if (req == IDH_REQ_GPU_INIT_DATA) {
- adev->virt.req_init_data_ver =
- RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
-
- /* assume V1 in case host doesn't set version number */
- if (adev->virt.req_init_data_ver < 1)
- adev->virt.req_init_data_ver = 1;
+ switch (RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1)) {
+ case GPU_CRIT_REGION_V2:
+ adev->virt.req_init_data_ver = GPU_CRIT_REGION_V2;
+ adev->virt.init_data_header.offset =
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
+ adev->virt.init_data_header.size_kb =
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW3);
+ break;
+ default:
+ adev->virt.req_init_data_ver = GPU_CRIT_REGION_V1;
+ adev->virt.init_data_header.offset = -1;
+ adev->virt.init_data_header.size_kb = 0;
+ break;
+ }
}
}
@@ -238,7 +250,10 @@ send_request:
}
}
- return 0;
+out:
+ mutex_unlock(&virt->access_req_mutex);
+
+ return r;
}
static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
@@ -285,7 +300,8 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
{
- return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
+ return xgpu_nv_send_access_requests_with_param(adev, IDH_REQ_GPU_INIT_DATA,
+ 0, GPU_CRIT_REGION_V2, 0);
}
static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index 1c22bc11c1f8..bdfd2917e3ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -41,19 +41,21 @@ static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev)
{
- u32 tmp;
-
- tmp = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0));
- /* If it is VF or subrevision holds a non-zero value, that should be used */
- if (tmp || amdgpu_sriov_vf(adev))
- return tmp;
+ u32 rev_id;
- /* If discovery subrev is not updated, use register version */
- tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
- tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0,
- STRAP_ATI_REV_ID_DEV0_F0);
+ /*
+ * fetch the sub-revision field from the IP-discovery table
+ * (returns zero if the table entry is not populated).
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ rev_id = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0));
+ } else {
+ rev_id = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
+ rev_id = REG_GET_FIELD(rev_id, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0,
+ STRAP_ATI_REV_ID_DEV0_F0);
+ }
- return tmp;
+ return rev_id;
}
static void nbio_v7_9_mc_access_enable(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 64b240b51f1a..a9be7a505026 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -142,13 +142,37 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
return err;
}
-static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
+static int psp_v11_wait_for_tos_unload(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg1, sol_reg2;
+ int retry_loop;
+ /* Wait for the TOS to be unloaded */
+ for (retry_loop = 0; retry_loop < 20; retry_loop++) {
+ sol_reg1 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+ usleep_range(1000, 2000);
+ sol_reg2 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+ if (sol_reg1 == sol_reg2)
+ return 0;
+ }
+ dev_err(adev->dev, "TOS unload failed, C2PMSG_33: %x C2PMSG_81: %x",
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_33),
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81));
+
+ return -ETIME;
+}
+
+static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
int ret;
int retry_loop;
+ /* For a reset done at the end of S3, only wait for TOS to be unloaded */
+ if (adev->in_s3 && !(adev->flags & AMD_IS_APU) && amdgpu_in_reset(adev))
+ return psp_v11_wait_for_tos_unload(psp);
+
for (retry_loop = 0; retry_loop < 20; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 36b1ca73c2ed..a1443990d5c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -2361,11 +2361,15 @@ static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
- if ((adev->gfx.mec_fw_version >= 0xb0) && amdgpu_dpm_reset_sdma_is_supported(adev))
+ if ((adev->gfx.mec_fw_version >= 0xb0) &&
+ amdgpu_dpm_reset_sdma_is_supported(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
case IP_VERSION(9, 5, 0):
- if ((adev->gfx.mec_fw_version >= 0xf) && amdgpu_dpm_reset_sdma_is_supported(adev))
+ if ((adev->gfx.mec_fw_version >= 0xf) &&
+ amdgpu_dpm_reset_sdma_is_supported(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 7dc67a22a7a0..8ddc4df06a1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1429,7 +1429,8 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(5, 0, 2):
case IP_VERSION(5, 0, 5):
if ((adev->sdma.instance[0].fw_version >= 35) &&
- !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 3bd44c24f692..51101b0aa2fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -342,7 +342,7 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->me > 1) {
- amdgpu_asic_flush_hdp(adev, ring);
+ amdgpu_hdp_flush(adev, ring);
} else {
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
@@ -1348,12 +1348,14 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(5, 2, 3):
case IP_VERSION(5, 2, 4):
if ((adev->sdma.instance[0].fw_version >= 76) &&
- !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
case IP_VERSION(5, 2, 5):
if ((adev->sdma.instance[0].fw_version >= 34) &&
- !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index db6e41967f12..217040044987 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -1356,7 +1356,8 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
if ((adev->sdma.instance[0].fw_version >= 21) &&
- !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1389,7 +1390,7 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 0, 3):
- if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq)
+ if (adev->sdma.instance[0].fw_version >= 29 && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 1, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index 326ecc8d37d2..2b81344dcd66 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -1337,7 +1337,8 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->sdma.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
- if (!amdgpu_sriov_vf(adev))
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index e0f139de7991..f7288372ee61 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -45,6 +45,7 @@
#include "dce_v6_0.h"
#include "si.h"
#include "uvd_v3_1.h"
+#include "vce_v1_0.h"
#include "uvd/uvd_4_0_d.h"
@@ -921,8 +922,6 @@ static const u32 hainan_mgcg_cgcg_init[] =
0x3630, 0xfffffff0, 0x00000100,
};
-/* XXX: update when we support VCE */
-#if 0
/* tahiti, pitcairn, verde */
static const struct amdgpu_video_codec_info tahiti_video_codecs_encode_array[] =
{
@@ -940,13 +939,7 @@ static const struct amdgpu_video_codecs tahiti_video_codecs_encode =
.codec_count = ARRAY_SIZE(tahiti_video_codecs_encode_array),
.codec_array = tahiti_video_codecs_encode_array,
};
-#else
-static const struct amdgpu_video_codecs tahiti_video_codecs_encode =
-{
- .codec_count = 0,
- .codec_array = NULL,
-};
-#endif
+
/* oland and hainan don't support encode */
static const struct amdgpu_video_codecs hainan_video_codecs_encode =
{
@@ -1925,6 +1918,14 @@ static int si_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
~VCEPLL_BYPASS_EN_MASK);
if (!evclk || !ecclk) {
+ /*
+ * On some chips, the PLL takes way too long to get out of
+ * sleep mode, causing a timeout waiting on CTLACK/CTLACK2.
+ * Leave the PLL running in bypass mode.
+ */
+ if (adev->pdev->device == 0x6780)
+ return 0;
+
/* Keep the Bypass mode, put PLL to sleep */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
~VCEPLL_SLEEP_MASK);
@@ -2717,7 +2718,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
else
amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
- /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
+ amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block);
break;
case CHIP_OLAND:
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
@@ -2735,7 +2736,6 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
else
amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
- /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
case CHIP_HAINAN:
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 1df00f8a2406..66f650f87243 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -96,6 +96,9 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
pci_set_master(adev->pdev);
si_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -112,6 +115,9 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
@@ -127,6 +133,8 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(IH_RB_CNTL, tmp);
}
+
+out:
return (wptr & ih->ptr_mask);
}
@@ -175,6 +183,10 @@ static int si_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
return amdgpu_irq_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h
index cbd4f8951cfa..561462a8332e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sid.h
+++ b/drivers/gpu/drm/amd/amdgpu/sid.h
@@ -582,45 +582,6 @@
#define DMA_PACKET_NOP 0xf
/* VCE */
-#define VCE_STATUS 0x20004
-#define VCE_VCPU_CNTL 0x20014
-#define VCE_CLK_EN (1 << 0)
-#define VCE_VCPU_CACHE_OFFSET0 0x20024
-#define VCE_VCPU_CACHE_SIZE0 0x20028
-#define VCE_VCPU_CACHE_OFFSET1 0x2002c
-#define VCE_VCPU_CACHE_SIZE1 0x20030
-#define VCE_VCPU_CACHE_OFFSET2 0x20034
-#define VCE_VCPU_CACHE_SIZE2 0x20038
-#define VCE_SOFT_RESET 0x20120
-#define VCE_ECPU_SOFT_RESET (1 << 0)
-#define VCE_FME_SOFT_RESET (1 << 2)
-#define VCE_RB_BASE_LO2 0x2016c
-#define VCE_RB_BASE_HI2 0x20170
-#define VCE_RB_SIZE2 0x20174
-#define VCE_RB_RPTR2 0x20178
-#define VCE_RB_WPTR2 0x2017c
-#define VCE_RB_BASE_LO 0x20180
-#define VCE_RB_BASE_HI 0x20184
-#define VCE_RB_SIZE 0x20188
-#define VCE_RB_RPTR 0x2018c
-#define VCE_RB_WPTR 0x20190
-#define VCE_CLOCK_GATING_A 0x202f8
-#define VCE_CLOCK_GATING_B 0x202fc
-#define VCE_UENC_CLOCK_GATING 0x205bc
-#define VCE_UENC_REG_CLOCK_GATING 0x205c0
-#define VCE_FW_REG_STATUS 0x20e10
-# define VCE_FW_REG_STATUS_BUSY (1 << 0)
-# define VCE_FW_REG_STATUS_PASS (1 << 3)
-# define VCE_FW_REG_STATUS_DONE (1 << 11)
-#define VCE_LMI_FW_START_KEYSEL 0x20e18
-#define VCE_LMI_FW_PERIODIC_CTRL 0x20e20
-#define VCE_LMI_CTRL2 0x20e74
-#define VCE_LMI_CTRL 0x20e98
-#define VCE_LMI_VM_CTRL 0x20ea0
-#define VCE_LMI_SWAP_CNTL 0x20eb4
-#define VCE_LMI_SWAP_CNTL1 0x20eb8
-#define VCE_LMI_CACHE_CTRL 0x20ef4
-
#define VCE_CMD_NO_OP 0x00000000
#define VCE_CMD_END 0x00000001
#define VCE_CMD_IB 0x00000002
@@ -629,7 +590,6 @@
#define VCE_CMD_IB_AUTO 0x00000005
#define VCE_CMD_SEMAPHORE 0x00000006
-
//#dce stupp
/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
#define CRTC0_REGISTER_OFFSET (0x1b7c - 0x1b7c) //(0x6df0 - 0x6df0)/4
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 9785fada4fa7..42f5d9c0e3af 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -853,10 +853,6 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
{
u32 sol_reg;
- /* CP hangs in IGT reloading test on RN, reset to WA */
- if (adev->asic_type == CHIP_RENOIR)
- return true;
-
if (amdgpu_gmc_need_reset_on_init(adev))
return true;
if (amdgpu_psp_tos_reload_needed(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 7d17ae56f901..ee8038df17e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -159,6 +159,9 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
tonga_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -196,6 +199,9 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -306,6 +312,10 @@ static int tonga_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih;
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 8dc32787d625..0f5b1719fda5 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -711,6 +711,19 @@ static uint32_t umc_v12_0_get_die_id(struct amdgpu_device *adev,
return die;
}
+static void umc_v12_0_mca_ipid_parse(struct amdgpu_device *adev, uint64_t ipid,
+ uint32_t *did, uint32_t *ch, uint32_t *umc_inst, uint32_t *sid)
+{
+ if (did)
+ *did = MCA_IPID_2_DIE_ID(ipid);
+ if (ch)
+ *ch = MCA_IPID_2_UMC_CH(ipid);
+ if (umc_inst)
+ *umc_inst = MCA_IPID_2_UMC_INST(ipid);
+ if (sid)
+ *sid = MCA_IPID_2_SOCKET_ID(ipid);
+}
+
struct amdgpu_umc_ras umc_v12_0_ras = {
.ras_block = {
.hw_ops = &umc_v12_0_ras_hw_ops,
@@ -724,5 +737,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
.convert_ras_err_addr = umc_v12_0_convert_error_address,
.get_die_id_from_pa = umc_v12_0_get_die_id,
.get_retire_flip_bits = umc_v12_0_get_retire_flip_bits,
+ .mca_ipid_parse = umc_v12_0_mca_ipid_parse,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c
new file mode 100644
index 000000000000..9ae424618556
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c
@@ -0,0 +1,839 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ * Copyright 2025 Valve Corporation
+ * Copyright 2025 Alexandre Demers
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ * Timur Kristóf <timur.kristof@gmail.com>
+ * Alexandre Demers <alexandre.f.demers@gmail.com>
+ */
+
+#include <linux/firmware.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vce.h"
+#include "amdgpu_gart.h"
+#include "sid.h"
+#include "vce_v1_0.h"
+#include "vce/vce_1_0_d.h"
+#include "vce/vce_1_0_sh_mask.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
+
+#define VCE_V1_0_FW_SIZE (256 * 1024)
+#define VCE_V1_0_STACK_SIZE (64 * 1024)
+#define VCE_V1_0_DATA_SIZE (7808 * (AMDGPU_MAX_VCE_HANDLES + 1))
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
+
+#define VCE_V1_0_GART_PAGE_START \
+ (AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS)
+#define VCE_V1_0_GART_ADDR_START \
+ (VCE_V1_0_GART_PAGE_START * AMDGPU_GPU_PAGE_SIZE)
+
+static void vce_v1_0_set_ring_funcs(struct amdgpu_device *adev);
+static void vce_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+
+struct vce_v1_0_fw_signature {
+ int32_t offset;
+ uint32_t length;
+ int32_t number;
+ struct {
+ uint32_t chip_id;
+ uint32_t keyselect;
+ uint32_t nonce[4];
+ uint32_t sigval[4];
+ } val[8];
+};
+
+/**
+ * vce_v1_0_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t vce_v1_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->me == 0)
+ return RREG32(mmVCE_RB_RPTR);
+ else
+ return RREG32(mmVCE_RB_RPTR2);
+}
+
+/**
+ * vce_v1_0_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t vce_v1_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->me == 0)
+ return RREG32(mmVCE_RB_WPTR);
+ else
+ return RREG32(mmVCE_RB_WPTR2);
+}
+
+/**
+ * vce_v1_0_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void vce_v1_0_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->me == 0)
+ WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+ else
+ WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+}
+
+static int vce_v1_0_lmi_clean(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ if (RREG32(mmVCE_LMI_STATUS) & 0x337f)
+ return 0;
+
+ mdelay(10);
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int vce_v1_0_firmware_loaded(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ if (RREG32(mmVCE_STATUS) & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+ return 0;
+ mdelay(10);
+ }
+
+ dev_err(adev->dev, "VCE not responding, trying to reset the ECPU\n");
+
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ WREG32_P(mmVCE_SOFT_RESET, 0,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void vce_v1_0_init_cg(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32(mmVCE_CLOCK_GATING_A);
+ tmp |= VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK;
+ WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+ tmp = RREG32(mmVCE_CLOCK_GATING_B);
+ tmp |= 0x1e;
+ tmp &= ~0xe100e1;
+ WREG32(mmVCE_CLOCK_GATING_B, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp &= ~0xff9ff000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp &= ~0x3ff;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+}
+
+/**
+ * vce_v1_0_load_fw_signature - load firmware signature into VCPU BO
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * The VCE1 firmware validation mechanism needs a firmware signature.
+ * This function finds the signature appropriate for the current
+ * ASIC and writes that into the VCPU BO.
+ */
+static int vce_v1_0_load_fw_signature(struct amdgpu_device *adev)
+{
+ const struct common_firmware_header *hdr;
+ struct vce_v1_0_fw_signature *sign;
+ unsigned int ucode_offset;
+ uint32_t chip_id;
+ u32 *cpu_addr;
+ int i;
+
+ hdr = (const struct common_firmware_header *)adev->vce.fw->data;
+ ucode_offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+ cpu_addr = adev->vce.cpu_addr;
+
+ sign = (void *)adev->vce.fw->data + ucode_offset;
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ chip_id = 0x01000014;
+ break;
+ case CHIP_VERDE:
+ chip_id = 0x01000015;
+ break;
+ case CHIP_PITCAIRN:
+ chip_id = 0x01000016;
+ break;
+ default:
+ dev_err(adev->dev, "asic_type %#010x was not found!", adev->asic_type);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < le32_to_cpu(sign->number); ++i) {
+ if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
+ break;
+ }
+
+ if (i == le32_to_cpu(sign->number)) {
+ dev_err(adev->dev, "chip_id 0x%x for %s was not found in VCE firmware",
+ chip_id, amdgpu_asic_name[adev->asic_type]);
+ return -EINVAL;
+ }
+
+ cpu_addr += (256 - 64) / 4;
+ memcpy_toio(&cpu_addr[0], &sign->val[i].nonce[0], 16);
+ cpu_addr[4] = cpu_to_le32(le32_to_cpu(sign->length) + 64);
+
+ memset_io(&cpu_addr[5], 0, 44);
+ memcpy_toio(&cpu_addr[16], &sign[1], hdr->ucode_size_bytes - sizeof(*sign));
+
+ cpu_addr += (le32_to_cpu(sign->length) + 64) / 4;
+ memcpy_toio(&cpu_addr[0], &sign->val[i].sigval[0], 16);
+
+ adev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
+
+ return 0;
+}
+
+static int vce_v1_0_wait_for_fw_validation(struct amdgpu_device *adev)
+{
+ int i;
+
+ dev_dbg(adev->dev, "VCE keyselect: %d", adev->vce.keyselect);
+ WREG32(mmVCE_LMI_FW_START_KEYSEL, adev->vce.keyselect);
+
+ for (i = 0; i < 10; ++i) {
+ mdelay(10);
+ if (RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__DONE_MASK)
+ break;
+ }
+
+ if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__DONE_MASK)) {
+ dev_err(adev->dev, "VCE FW validation timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__PASS_MASK)) {
+ dev_err(adev->dev, "VCE FW validation failed\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 10; ++i) {
+ mdelay(10);
+ if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__BUSY_MASK))
+ break;
+ }
+
+ if (RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__BUSY_MASK) {
+ dev_err(adev->dev, "VCE FW busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int vce_v1_0_mc_resume(struct amdgpu_device *adev)
+{
+ uint32_t offset;
+ uint32_t size;
+
+ /*
+ * When the keyselect is already set, don't perturb VCE FW.
+ * Validation seems to always fail the second time.
+ */
+ if (RREG32(mmVCE_LMI_FW_START_KEYSEL)) {
+ dev_dbg(adev->dev, "keyselect already set: 0x%x (on CPU: 0x%x)\n",
+ RREG32(mmVCE_LMI_FW_START_KEYSEL), adev->vce.keyselect);
+
+ WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
+ return 0;
+ }
+
+ WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
+ WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
+ WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
+ WREG32(mmVCE_CLOCK_GATING_B, 0);
+
+ WREG32_P(mmVCE_LMI_FW_PERIODIC_CTRL, 0x4, ~0x4);
+
+ WREG32(mmVCE_LMI_CTRL, 0x00398000);
+
+ WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
+ WREG32(mmVCE_LMI_SWAP_CNTL, 0);
+ WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
+ WREG32(mmVCE_LMI_VM_CTRL, 0);
+
+ WREG32(mmVCE_VCPU_SCRATCH7, AMDGPU_MAX_VCE_HANDLES);
+
+ offset = adev->vce.gpu_addr + AMDGPU_VCE_FIRMWARE_OFFSET;
+ size = VCE_V1_0_FW_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
+
+ offset += size;
+ size = VCE_V1_0_STACK_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
+
+ offset += size;
+ size = VCE_V1_0_DATA_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
+
+ WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
+
+ return vce_v1_0_wait_for_fw_validation(adev);
+}
+
+/**
+ * vce_v1_0_is_idle() - Check idle status of VCE1 IP block
+ *
+ * @ip_block: amdgpu_ip_block pointer
+ *
+ * Check whether VCE is busy according to VCE_STATUS.
+ * Also check whether the SRBM thinks VCE is busy, although
+ * SRBM_STATUS.VCE_BUSY seems to be bogus because it
+ * appears to mirror the VCE_STATUS.VCPU_REPORT_FW_LOADED bit.
+ */
+static bool vce_v1_0_is_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool busy =
+ (RREG32(mmVCE_STATUS) & (VCE_STATUS__JOB_BUSY_MASK | VCE_STATUS__UENC_BUSY_MASK)) ||
+ (RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
+
+ return !busy;
+}
+
+static int vce_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ unsigned int i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ udelay(1);
+ if (vce_v1_0_is_idle(ip_block))
+ return 0;
+ }
+ return -ETIMEDOUT;
+}
+
+/**
+ * vce_v1_0_start - start VCE block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the VCE block
+ */
+static int vce_v1_0_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int r;
+
+ WREG32_P(mmVCE_STATUS, 1, ~1);
+
+ r = vce_v1_0_mc_resume(adev);
+ if (r)
+ return r;
+
+ ring = &adev->vce.ring[0];
+ WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+ ring = &adev->vce.ring[1];
+ WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_BASE_LO2, lower_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+
+ WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
+ ~VCE_VCPU_CNTL__CLK_EN_MASK);
+
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK,
+ ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK));
+
+ mdelay(100);
+
+ WREG32_P(mmVCE_SOFT_RESET, 0,
+ ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK));
+
+ r = vce_v1_0_firmware_loaded(adev);
+
+ /* Clear VCE_STATUS, otherwise SRBM thinks VCE1 is busy. */
+ WREG32(mmVCE_STATUS, 0);
+
+ if (r) {
+ dev_err(adev->dev, "VCE not responding, giving up\n");
+ return r;
+ }
+
+ return 0;
+}
+
+static int vce_v1_0_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ip_block *ip_block;
+ int status;
+ int i;
+
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
+ if (!ip_block)
+ return -EINVAL;
+
+ if (vce_v1_0_lmi_clean(adev))
+ dev_warn(adev->dev, "VCE not idle\n");
+
+ if (vce_v1_0_wait_for_idle(ip_block))
+ dev_warn(adev->dev, "VCE busy: VCE_STATUS=0x%x, SRBM_STATUS2=0x%x\n",
+ RREG32(mmVCE_STATUS), RREG32(mmSRBM_STATUS2));
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
+
+ for (i = 0; i < 100; ++i) {
+ status = RREG32(mmVCE_LMI_STATUS);
+ if (status & 0x240)
+ break;
+ mdelay(1);
+ }
+
+ WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK,
+ ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK));
+
+ WREG32(mmVCE_STATUS, 0);
+
+ return 0;
+}
+
+static void vce_v1_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
+ tmp = RREG32(mmVCE_CLOCK_GATING_A);
+ tmp |= VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK;
+ WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp &= ~0x1ff000;
+ tmp |= 0xff800000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp &= ~0x3ff;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+ } else {
+ tmp = RREG32(mmVCE_CLOCK_GATING_A);
+ tmp &= ~VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK;
+ WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp |= 0x1ff000;
+ tmp &= ~0xff800000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp |= 0x3ff;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+ }
+}
+
+static int vce_v1_0_early_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
+
+ adev->vce.num_rings = 2;
+
+ vce_v1_0_set_ring_funcs(adev);
+ vce_v1_0_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * vce_v1_0_ensure_vcpu_bo_32bit_addr() - ensure the VCPU BO has a 32-bit address
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Due to various hardware limitations, the VCE1 requires
+ * the VCPU BO to be in the low 32 bit address range.
+ * Ensure that the VCPU BO has a 32-bit GPU address,
+ * or return an error code when that isn't possible.
+ *
+ * To accomodate that, we put GART to the LOW address range
+ * and reserve some GART pages where we map the VCPU BO,
+ * so that it gets a 32-bit address.
+ */
+static int vce_v1_0_ensure_vcpu_bo_32bit_addr(struct amdgpu_device *adev)
+{
+ u64 gpu_addr = amdgpu_bo_gpu_offset(adev->vce.vcpu_bo);
+ u64 bo_size = amdgpu_bo_size(adev->vce.vcpu_bo);
+ u64 max_vcpu_bo_addr = 0xffffffff - bo_size;
+ u64 num_pages = ALIGN(bo_size, AMDGPU_GPU_PAGE_SIZE) / AMDGPU_GPU_PAGE_SIZE;
+ u64 pa = amdgpu_gmc_vram_pa(adev, adev->vce.vcpu_bo);
+ u64 flags = AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | AMDGPU_PTE_VALID;
+
+ /*
+ * Check if the VCPU BO already has a 32-bit address.
+ * Eg. if MC is configured to put VRAM in the low address range.
+ */
+ if (gpu_addr <= max_vcpu_bo_addr)
+ return 0;
+
+ /* Check if we can map the VCPU BO in GART to a 32-bit address. */
+ if (adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START > max_vcpu_bo_addr)
+ return -EINVAL;
+
+ amdgpu_gart_map_vram_range(adev, pa, VCE_V1_0_GART_PAGE_START,
+ num_pages, flags, adev->gart.ptr);
+ adev->vce.gpu_addr = adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START;
+ if (adev->vce.gpu_addr > max_vcpu_bo_addr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vce_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int r, i;
+
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_sw_init(adev, VCE_V1_0_FW_SIZE +
+ VCE_V1_0_STACK_SIZE + VCE_V1_0_DATA_SIZE);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_resume(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_load_fw_signature(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_ensure_vcpu_bo_32bit_addr(adev);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
+
+ ring = &adev->vce.ring[i];
+ sprintf(ring->name, "vce%d", i);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+ hw_prio, NULL);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+static int vce_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_suspend(adev);
+ if (r)
+ return r;
+
+ return amdgpu_vce_sw_fini(adev);
+}
+
+/**
+ * vce_v1_0_hw_init - start and test VCE block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+static int vce_v1_0_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vce(adev, true);
+ else
+ amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
+
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
+ if (r)
+ return r;
+ }
+
+ dev_info(adev->dev, "VCE initialized successfully.\n");
+
+ return 0;
+}
+
+static int vce_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ int r;
+
+ r = vce_v1_0_stop(ip_block->adev);
+ if (r)
+ return r;
+
+ cancel_delayed_work_sync(&ip_block->adev->vce.idle_work);
+ return 0;
+}
+
+static int vce_v1_0_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ /*
+ * Proper cleanups before halting the HW engine:
+ * - cancel the delayed idle work
+ * - enable powergating
+ * - enable clockgating
+ * - disable dpm
+ *
+ * TODO: to align with the VCN implementation, move the
+ * jobs for clockgating/powergating/dpm setting to
+ * ->set_powergating_state().
+ */
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_vce(adev, false);
+ } else {
+ amdgpu_asic_set_vce_clocks(adev, 0, 0);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
+ }
+
+ r = vce_v1_0_hw_fini(ip_block);
+ if (r) {
+ dev_err(adev->dev, "vce_v1_0_hw_fini() failed with error %i", r);
+ return r;
+ }
+
+ return amdgpu_vce_suspend(adev);
+}
+
+static int vce_v1_0_resume(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_resume(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_load_fw_signature(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_ensure_vcpu_bo_32bit_addr(adev);
+ if (r)
+ return r;
+
+ return vce_v1_0_hw_init(ip_block);
+}
+
+static int vce_v1_0_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ uint32_t val = 0;
+
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
+
+ WREG32_P(mmVCE_SYS_INT_EN, val,
+ ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ return 0;
+}
+
+static int vce_v1_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_dbg(adev->dev, "IH: VCE\n");
+ switch (entry->src_data[0]) {
+ case 0:
+ case 1:
+ amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
+ break;
+ default:
+ dev_err(adev->dev, "Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static int vce_v1_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ vce_v1_0_init_cg(adev);
+ vce_v1_0_enable_mgcg(adev, state == AMD_CG_STATE_GATE);
+
+ return 0;
+}
+
+static int vce_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ /*
+ * This doesn't actually powergate the VCE block.
+ * That's done in the dpm code via the SMC. This
+ * just re-inits the block as necessary. The actual
+ * gating still happens in the dpm code. We should
+ * revisit this when there is a cleaner line between
+ * the smc and the hw blocks
+ */
+ if (state == AMD_PG_STATE_GATE)
+ return vce_v1_0_stop(adev);
+ else
+ return vce_v1_0_start(adev);
+}
+
+static const struct amd_ip_funcs vce_v1_0_ip_funcs = {
+ .name = "vce_v1_0",
+ .early_init = vce_v1_0_early_init,
+ .sw_init = vce_v1_0_sw_init,
+ .sw_fini = vce_v1_0_sw_fini,
+ .hw_init = vce_v1_0_hw_init,
+ .hw_fini = vce_v1_0_hw_fini,
+ .suspend = vce_v1_0_suspend,
+ .resume = vce_v1_0_resume,
+ .is_idle = vce_v1_0_is_idle,
+ .wait_for_idle = vce_v1_0_wait_for_idle,
+ .set_clockgating_state = vce_v1_0_set_clockgating_state,
+ .set_powergating_state = vce_v1_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs vce_v1_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
+ .support_64bit_ptrs = false,
+ .no_user_fence = true,
+ .get_rptr = vce_v1_0_ring_get_rptr,
+ .get_wptr = vce_v1_0_ring_get_wptr,
+ .set_wptr = vce_v1_0_ring_set_wptr,
+ .parse_cs = amdgpu_vce_ring_parse_cs,
+ .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
+ .emit_ib = amdgpu_vce_ring_emit_ib,
+ .emit_fence = amdgpu_vce_ring_emit_fence,
+ .test_ring = amdgpu_vce_ring_test_ring,
+ .test_ib = amdgpu_vce_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vce_ring_begin_use,
+ .end_use = amdgpu_vce_ring_end_use,
+};
+
+static void vce_v1_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ adev->vce.ring[i].funcs = &vce_v1_0_ring_funcs;
+ adev->vce.ring[i].me = i;
+ }
+};
+
+static const struct amdgpu_irq_src_funcs vce_v1_0_irq_funcs = {
+ .set = vce_v1_0_set_interrupt_state,
+ .process = vce_v1_0_process_interrupt,
+};
+
+static void vce_v1_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->vce.irq.num_types = 1;
+ adev->vce.irq.funcs = &vce_v1_0_irq_funcs;
+};
+
+const struct amdgpu_ip_block_version vce_v1_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v1_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v1_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.h
new file mode 100644
index 000000000000..206e7bec897f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ * Copyright 2025 Valve Corporation
+ * Copyright 2025 Alexandre Demers
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VCE_V1_0_H__
+#define __VCE_V1_0_H__
+
+extern const struct amdgpu_ip_block_version vce_v1_0_ip_block;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index bee3e904a6bc..8ea8a6193492 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -407,6 +407,11 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
static int vce_v2_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
adev->vce.num_rings = 2;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 708123899c41..719e9643c43d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -399,6 +399,7 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
@@ -407,6 +408,10 @@ static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block)
(AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
return -ENOENT;
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
+
adev->vce.num_rings = 3;
vce_v3_0_set_ring_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 335bda64ff5b..2d64002bed61 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -410,6 +410,11 @@ static int vce_v4_0_stop(struct amdgpu_device *adev)
static int vce_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */
adev->vce.num_rings = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index eacf4e93ba2f..cb7123ec1a5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -141,7 +141,7 @@ static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
- if (amdgpu_dpm_reset_vcn_is_supported(adev))
+ if (amdgpu_dpm_reset_vcn_is_supported(adev) && !amdgpu_sriov_vf(adev))
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index 714350cabf2f..8bd457dea4cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -122,7 +122,9 @@ static int vcn_v5_0_1_late_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 12):
- if ((adev->psp.sos.fw_version >= 0x00450025) && amdgpu_dpm_reset_vcn_is_supported(adev))
+ if ((adev->psp.sos.fw_version >= 0x00450025) &&
+ amdgpu_dpm_reset_vcn_is_supported(adev) &&
+ !amdgpu_sriov_vf(adev))
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 0f0719528bcc..22925df6a791 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -2826,7 +2826,7 @@ retry:
static int runtime_disable(struct kfd_process *p)
{
- int i = 0, ret;
+ int i = 0, ret = 0;
bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED;
p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED;
@@ -2863,6 +2863,7 @@ static int runtime_disable(struct kfd_process *p)
/* disable ttmp setup */
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
+ int last_err = 0;
if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
pdd->spi_dbg_override =
@@ -2872,14 +2873,17 @@ static int runtime_disable(struct kfd_process *p)
pdd->dev->vm_info.last_vmid_kfd);
if (!pdd->dev->kfd->shared_resources.enable_mes)
- debug_refresh_runlist(pdd->dev->dqm);
+ last_err = debug_refresh_runlist(pdd->dev->dqm);
else
- kfd_dbg_set_mes_debug_mode(pdd,
+ last_err = kfd_dbg_set_mes_debug_mode(pdd,
!kfd_dbg_has_cwsr_workaround(pdd->dev));
+
+ if (last_err)
+ ret = last_err;
}
}
- return 0;
+ return ret;
}
static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 6c5c7c1bf5ed..d7a2e7178ea9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1209,6 +1209,15 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
pr_debug_ratelimited("Evicting process pid %d queues\n",
pdd->process->lead_thread->pid);
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
+ pdd->last_evict_timestamp = get_jiffies_64();
+ retval = suspend_all_queues_mes(dqm);
+ if (retval) {
+ dev_err(dev, "Suspending all queues failed");
+ goto out;
+ }
+ }
+
/* Mark all queues as evicted. Deactivate all active queues on
* the qpd.
*/
@@ -1221,23 +1230,27 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
decrement_queue_count(dqm, qpd, q);
if (dqm->dev->kfd->shared_resources.enable_mes) {
- int err;
-
- err = remove_queue_mes(dqm, q, qpd);
- if (err) {
+ retval = remove_queue_mes(dqm, q, qpd);
+ if (retval) {
dev_err(dev, "Failed to evict queue %d\n",
q->properties.queue_id);
- retval = err;
+ goto out;
}
}
}
- pdd->last_evict_timestamp = get_jiffies_64();
- if (!dqm->dev->kfd->shared_resources.enable_mes)
+
+ if (!dqm->dev->kfd->shared_resources.enable_mes) {
+ pdd->last_evict_timestamp = get_jiffies_64();
retval = execute_queues_cpsch(dqm,
qpd->is_debug ?
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD);
+ } else {
+ retval = resume_all_queues_mes(dqm);
+ if (retval)
+ dev_err(dev, "Resuming all queues failed");
+ }
out:
dqm_unlock(dqm);
@@ -1884,6 +1897,8 @@ fail_packet_manager_init:
static int stop_cpsch(struct device_queue_manager *dqm)
{
+ int ret = 0;
+
dqm_lock(dqm);
if (!dqm->sched_running) {
dqm_unlock(dqm);
@@ -1891,9 +1906,10 @@ static int stop_cpsch(struct device_queue_manager *dqm)
}
if (!dqm->dev->kfd->shared_resources.enable_mes)
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
+ ret = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
+ 0, USE_DEFAULT_GRACE_PERIOD, false);
else
- remove_all_kfd_queues_mes(dqm);
+ ret = remove_all_kfd_queues_mes(dqm);
dqm->sched_running = false;
@@ -1907,7 +1923,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
dqm->detect_hang_info = NULL;
dqm_unlock(dqm);
- return 0;
+ return ret;
}
static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
@@ -2078,7 +2094,8 @@ int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm,
while (*fence_addr != fence_value) {
/* Fatal err detected, this response won't come */
- if (amdgpu_amdkfd_is_fed(dqm->dev->adev))
+ if (amdgpu_amdkfd_is_fed(dqm->dev->adev) ||
+ amdgpu_in_reset(dqm->dev->adev))
return -EIO;
if (time_after(jiffies, end_jiffies)) {
@@ -3098,61 +3115,17 @@ out:
return ret;
}
-static int kfd_dqm_evict_pasid_mes(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
-{
- struct device *dev = dqm->dev->adev->dev;
- int ret = 0;
-
- /* Check if process is already evicted */
- dqm_lock(dqm);
- if (qpd->evicted) {
- /* Increment the evicted count to make sure the
- * process stays evicted before its terminated.
- */
- qpd->evicted++;
- dqm_unlock(dqm);
- goto out;
- }
- dqm_unlock(dqm);
-
- ret = suspend_all_queues_mes(dqm);
- if (ret) {
- dev_err(dev, "Suspending all queues failed");
- goto out;
- }
-
- ret = dqm->ops.evict_process_queues(dqm, qpd);
- if (ret) {
- dev_err(dev, "Evicting process queues failed");
- goto out;
- }
-
- ret = resume_all_queues_mes(dqm);
- if (ret)
- dev_err(dev, "Resuming all queues failed");
-
-out:
- return ret;
-}
-
int kfd_evict_process_device(struct kfd_process_device *pdd)
{
struct device_queue_manager *dqm;
struct kfd_process *p;
- int ret = 0;
p = pdd->process;
dqm = pdd->dev->dqm;
WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
- if (dqm->dev->kfd->shared_resources.enable_mes)
- ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd);
- else
- ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
-
- return ret;
+ return dqm->ops.evict_process_queues(dqm, &pdd->qpd);
}
int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 82905f3e54dd..5a190dd6be4e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -748,16 +748,6 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint64_t *slots = page_slots(p->signal_page);
uint32_t id;
- /*
- * If id is valid but slot is not signaled, GPU may signal the same event twice
- * before driver have chance to process the first interrupt, then signal slot is
- * auto-reset after set_event wakeup the user space, just drop the second event as
- * the application only need wakeup once.
- */
- if ((valid_id_bits > 31 || (1U << valid_id_bits) >= KFD_SIGNAL_EVENT_LIMIT) &&
- partial_id < KFD_SIGNAL_EVENT_LIMIT && slots[partial_id] == UNSIGNALED_EVENT_SLOT)
- goto out_unlock;
-
if (valid_id_bits)
pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
partial_id, valid_id_bits);
@@ -786,7 +776,6 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
}
}
-out_unlock:
rcu_read_unlock();
kfd_unref_process(p);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 4ceb251312a6..d76fb61869c7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -28,6 +28,7 @@
#include "kfd_device_queue_manager.h"
#include "kfd_smi_events.h"
#include "amdgpu_ras.h"
+#include "amdgpu_ras_mgr.h"
/*
* GFX9 SQ Interrupts
@@ -228,7 +229,11 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
kfd_signal_poison_consumed_event(dev, pasid);
- event_id = amdgpu_ras_acquire_event_id(dev->adev, type);
+ if (amdgpu_uniras_enabled(dev->adev))
+ event_id = amdgpu_ras_mgr_gen_ras_event_seqno(dev->adev,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION);
+ else
+ event_id = amdgpu_ras_acquire_event_id(dev->adev, type);
RAS_EVENT_LOG(dev->adev, event_id,
"poison is consumed by client %d, kick off gpu reset flow\n", client_id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 59a5a3fea65d..46c84fc60af1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -21,7 +21,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
-#include <linux/hmm.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/migrate.h>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
index 2eebf67f9c2c..2b7fd442d29c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
@@ -31,7 +31,6 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
-#include <linux/hmm.h>
#include "kfd_priv.h"
#include "kfd_svm.h"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index ddfe30c13e9d..a085faac9fe1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1083,7 +1083,6 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
* for auto suspend
*/
if (pdd->runtime_inuse) {
- pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
pdd->runtime_inuse = false;
}
@@ -1162,9 +1161,6 @@ static void kfd_process_wq_release(struct work_struct *work)
release_work);
struct dma_fence *ef;
- kfd_process_dequeue_from_all_devices(p);
- pqm_uninit(&p->pqm);
-
/*
* If GPU in reset, user queues may still running, wait for reset complete.
*/
@@ -1226,6 +1222,14 @@ static void kfd_process_notifier_release_internal(struct kfd_process *p)
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
+ /*
+ * Dequeue and destroy user queues, it is not safe for GPU to access
+ * system memory after mmu release notifier callback returns because
+ * exit_mmap free process memory afterwards.
+ */
+ kfd_process_dequeue_from_all_devices(p);
+ pqm_uninit(&p->pqm);
+
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index a65c67cf56ff..f1e7583650c4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -297,16 +297,16 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
goto out_err_unreserve;
}
- if (properties->ctx_save_restore_area_size != topo_dev->node_props.cwsr_size) {
- pr_debug("queue cwsr size 0x%x not equal to node cwsr size 0x%x\n",
+ if (properties->ctx_save_restore_area_size < topo_dev->node_props.cwsr_size) {
+ pr_debug("queue cwsr size 0x%x not sufficient for node cwsr size 0x%x\n",
properties->ctx_save_restore_area_size,
topo_dev->node_props.cwsr_size);
err = -EINVAL;
goto out_err_unreserve;
}
- total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
- * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = (properties->ctx_save_restore_area_size +
+ topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address,
@@ -352,8 +352,8 @@ int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_prope
topo_dev = kfd_topology_device_by_id(pdd->dev->id);
if (!topo_dev)
return -EINVAL;
- total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
- * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = (properties->ctx_save_restore_area_size +
+ topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
kfd_queue_buffer_svm_put(pdd, properties->ctx_save_restore_area_address, total_cwsr_size);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 9d72411c3379..97c2270f278f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1698,7 +1698,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
start = map_start << PAGE_SHIFT;
end = (map_last + 1) << PAGE_SHIFT;
for (addr = start; !r && addr < end; ) {
- struct hmm_range *hmm_range = NULL;
+ struct amdgpu_hmm_range *range = NULL;
unsigned long map_start_vma;
unsigned long map_last_vma;
struct vm_area_struct *vma;
@@ -1737,9 +1737,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
WRITE_ONCE(p->svms.faulting_task, current);
- r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
- readonly, owner,
- &hmm_range);
+ range = amdgpu_hmm_range_alloc(NULL);
+ if (likely(range))
+ r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
+ readonly, owner, range);
+ else
+ r = -ENOMEM;
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r)
pr_debug("failed %d to get svm range pages\n", r);
@@ -1750,7 +1753,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
if (!r) {
offset = (addr >> PAGE_SHIFT) - prange->start;
r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
- hmm_range->hmm_pfns);
+ range->hmm_range.hmm_pfns);
if (r)
pr_debug("failed %d to dma map range\n", r);
}
@@ -1758,14 +1761,17 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
svm_range_lock(prange);
/* Free backing memory of hmm_range if it was initialized
- * Overrride return value to TRY AGAIN only if prior returns
+ * Override return value to TRY AGAIN only if prior returns
* were successful
*/
- if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) {
+ if (range && !amdgpu_hmm_range_valid(range) && !r) {
pr_debug("hmm update the range, need validate again\n");
r = -EAGAIN;
}
+ /* Free the hmm range */
+ amdgpu_hmm_range_free(range);
+
if (!r && !list_empty(&prange->child_list)) {
pr_debug("range split by unmap in parallel, validate again\n");
r = -EAGAIN;
@@ -3687,6 +3693,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
/* TODO: unmap ranges from GPU that lost access */
}
+ update_mapping |= !p->xnack_enabled && !list_empty(&remap_list);
+
list_for_each_entry_safe(prange, next, &remove_list, update_list) {
pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange, prange->start,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 01c7a4877904..a63dfc95b602 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -31,7 +31,6 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
-#include <linux/hmm.h>
#include "amdgpu.h"
#include "kfd_priv.h"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 5c98746eb72d..811636af14ea 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -530,7 +530,9 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
dev->gpu->kfd->sdma_fw_version);
sysfs_show_64bit_prop(buffer, offs, "unique_id",
- dev->gpu->xcp ?
+ dev->gpu->xcp &&
+ (dev->gpu->xcp->xcp_mgr->mode !=
+ AMDGPU_SPX_PARTITION_MODE) ?
dev->gpu->xcp->unique_id :
dev->gpu->adev->unique_id);
sysfs_show_32bit_prop(buffer, offs, "num_xcc",
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 7329b8cc2576..8e949fe77312 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -39,7 +39,8 @@ AMDGPUDM = \
amdgpu_dm_psr.o \
amdgpu_dm_replay.o \
amdgpu_dm_quirks.o \
- amdgpu_dm_wb.o
+ amdgpu_dm_wb.o \
+ amdgpu_dm_colorop.o
ifdef CONFIG_DRM_AMD_DC_FP
AMDGPUDM += dc_fpu.o
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0d03e324d5b9..740711ac1037 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -551,13 +551,13 @@ static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
struct dc_stream_state *stream,
struct dc_crtc_timing_adjust *adjust)
{
- struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL);
+ struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT);
if (!offload_work) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
return;
}
- struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL);
+ struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT);
if (!adjust_copy) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
kfree(offload_work);
@@ -2085,8 +2085,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_hardware_init(adev->dm.dc);
- adev->dm.restore_backlight = true;
-
adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
if (!adev->dm.hpd_rx_offload_wq) {
drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n");
@@ -3394,6 +3392,67 @@ static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev,
}
}
+/**
+ * amdgpu_dm_dump_links_and_sinks - Debug dump of all DC links and their sinks
+ * @adev: amdgpu device pointer
+ *
+ * Iterates through all DC links and dumps information about local and remote
+ * (MST) sinks. Should be called after connector detection is complete to see
+ * the final state of all links.
+ */
+static void amdgpu_dm_dump_links_and_sinks(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct drm_device *dev = adev_to_drm(adev);
+ int li;
+
+ if (!dc)
+ return;
+
+ for (li = 0; li < dc->link_count; li++) {
+ struct dc_link *l = dc->links[li];
+ const char *name = NULL;
+ int rs;
+
+ if (!l)
+ continue;
+ if (l->local_sink && l->local_sink->edid_caps.display_name[0])
+ name = l->local_sink->edid_caps.display_name;
+ else
+ name = "n/a";
+
+ drm_dbg_kms(dev,
+ "LINK_DUMP[%d]: local_sink=%p type=%d sink_signal=%d sink_count=%u edid_name=%s mst_capable=%d mst_alloc_streams=%d\n",
+ li,
+ l->local_sink,
+ l->type,
+ l->local_sink ? l->local_sink->sink_signal : SIGNAL_TYPE_NONE,
+ l->sink_count,
+ name,
+ l->dpcd_caps.is_mst_capable,
+ l->mst_stream_alloc_table.stream_count);
+
+ /* Dump remote (MST) sinks if any */
+ for (rs = 0; rs < l->sink_count; rs++) {
+ struct dc_sink *rsink = l->remote_sinks[rs];
+ const char *rname = NULL;
+
+ if (!rsink)
+ continue;
+ if (rsink->edid_caps.display_name[0])
+ rname = rsink->edid_caps.display_name;
+ else
+ rname = "n/a";
+ drm_dbg_kms(dev,
+ " REMOTE_SINK[%d:%d]: sink=%p signal=%d edid_name=%s\n",
+ li, rs,
+ rsink,
+ rsink->sink_signal,
+ rname);
+ }
+ }
+}
+
static int dm_resume(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -3442,7 +3501,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
dc_resume(dm->dc);
- adev->dm.restore_backlight = true;
amdgpu_dm_irq_resume_early(adev);
@@ -3566,6 +3624,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+ bool init = false;
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -3575,10 +3634,23 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
aconnector->mst_root)
continue;
- drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
+ scoped_guard(mutex, &aconnector->mst_mgr.lock) {
+ init = !aconnector->mst_mgr.mst_primary;
+ }
+ if (init)
+ dm_helpers_dp_mst_start_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link, false);
+ else
+ drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
}
drm_connector_list_iter_end(&iter);
+ /* Debug dump: list all DC links and their associated sinks after detection
+ * is complete for all connectors. This provides a comprehensive view of the
+ * final state without repeating the dump for each connector.
+ */
+ amdgpu_dm_dump_links_and_sinks(adev);
+
amdgpu_dm_irq_resume_late(adev);
amdgpu_dm_smu_write_watermarks_table(adev);
@@ -3789,7 +3861,9 @@ void amdgpu_dm_update_connector_after_detect(
drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
aconnector->connector_id, aconnector->dc_sink, sink);
- guard(mutex)(&dev->mode_config.mutex);
+ /* When polling, DRM has already locked the mutex for us. */
+ if (!drm_kms_helper_is_poll_worker())
+ mutex_lock(&dev->mode_config.mutex);
/*
* 1. Update status of the drm connector
@@ -3852,6 +3926,101 @@ void amdgpu_dm_update_connector_after_detect(
}
update_subconnector_property(aconnector);
+
+ /* When polling, the mutex will be unlocked for us by DRM. */
+ if (!drm_kms_helper_is_poll_worker())
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static bool are_sinks_equal(const struct dc_sink *sink1, const struct dc_sink *sink2)
+{
+ if (!sink1 || !sink2)
+ return false;
+ if (sink1->sink_signal != sink2->sink_signal)
+ return false;
+
+ if (sink1->dc_edid.length != sink2->dc_edid.length)
+ return false;
+
+ if (memcmp(sink1->dc_edid.raw_edid, sink2->dc_edid.raw_edid,
+ sink1->dc_edid.length) != 0)
+ return false;
+ return true;
+}
+
+
+/**
+ * DOC: hdmi_hpd_debounce_work
+ *
+ * HDMI HPD debounce delay in milliseconds. When an HDMI display toggles HPD
+ * (such as during power save transitions), this delay determines how long to
+ * wait before processing the HPD event. This allows distinguishing between a
+ * physical unplug (>hdmi_hpd_debounce_delay)
+ * and a spontaneous RX HPD toggle (<hdmi_hpd_debounce_delay).
+ *
+ * If the toggle is less than this delay, the driver compares sink capabilities
+ * and permits a hotplug event if they changed.
+ *
+ * The default value of 1500ms was chosen based on experimental testing with
+ * various monitors that exhibit spontaneous HPD toggling behavior.
+ */
+static void hdmi_hpd_debounce_work(struct work_struct *work)
+{
+ struct amdgpu_dm_connector *aconnector =
+ container_of(to_delayed_work(work), struct amdgpu_dm_connector,
+ hdmi_hpd_debounce_work);
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = aconnector->dc_link->ctx->dc;
+ bool fake_reconnect = false;
+ bool reallow_idle = false;
+ bool ret = false;
+ guard(mutex)(&aconnector->hpd_lock);
+
+ /* Re-detect the display */
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ if (dc->caps.ips_support && dc->ctx->dmub_srv->idle_allowed) {
+ dc_allow_idle_optimizations(dc, false);
+ reallow_idle = true;
+ }
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ }
+
+ if (ret) {
+ /* Apply workaround delay for certain panels */
+ apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
+ /* Compare sinks to determine if this was a spontaneous HPD toggle */
+ if (are_sinks_equal(aconnector->dc_link->local_sink, aconnector->hdmi_prev_sink)) {
+ /*
+ * Sinks match - this was a spontaneous HDMI HPD toggle.
+ */
+ drm_dbg_kms(dev, "HDMI HPD: Sink unchanged after debounce, internal re-enable\n");
+ fake_reconnect = true;
+ }
+
+ /* Update connector state */
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ /* Only notify OS if sink actually changed */
+ if (!fake_reconnect && aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_hotplug_event(dev);
+ }
+
+ /* Release the cached sink reference */
+ if (aconnector->hdmi_prev_sink) {
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = NULL;
+ }
+
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ if (reallow_idle && dc->caps.ips_support)
+ dc_allow_idle_optimizations(dc, true);
+ }
}
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
@@ -3863,6 +4032,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
struct dc *dc = aconnector->dc_link->ctx->dc;
bool ret = false;
+ bool debounce_required = false;
if (adev->dm.disable_hpd_irq)
return;
@@ -3885,6 +4055,14 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
+ /*
+ * Check for HDMI disconnect with debounce enabled.
+ */
+ debounce_required = (aconnector->hdmi_hpd_debounce_delay_ms > 0 &&
+ dc_is_hdmi_signal(aconnector->dc_link->connector_signal) &&
+ new_connection_type == dc_connection_none &&
+ aconnector->dc_link->local_sink != NULL);
+
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
@@ -3894,7 +4072,34 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
drm_kms_helper_connector_hotplug_event(connector);
+ } else if (debounce_required) {
+ /*
+ * HDMI disconnect detected - schedule delayed work instead of
+ * processing immediately. This allows us to coalesce spurious
+ * HDMI signals from physical unplugs.
+ */
+ drm_dbg_kms(dev, "HDMI HPD: Disconnect detected, scheduling debounce work (%u ms)\n",
+ aconnector->hdmi_hpd_debounce_delay_ms);
+
+ /* Cache the current sink for later comparison */
+ if (aconnector->hdmi_prev_sink)
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = aconnector->dc_link->local_sink;
+ if (aconnector->hdmi_prev_sink)
+ dc_sink_retain(aconnector->hdmi_prev_sink);
+
+ /* Schedule delayed detection. */
+ if (mod_delayed_work(system_wq,
+ &aconnector->hdmi_hpd_debounce_work,
+ msecs_to_jiffies(aconnector->hdmi_hpd_debounce_delay_ms)))
+ drm_dbg_kms(dev, "HDMI HPD: Re-scheduled debounce work\n");
+
} else {
+
+ /* If the aconnector->hdmi_hpd_debounce_work is scheduled, exit early */
+ if (delayed_work_pending(&aconnector->hdmi_hpd_debounce_work))
+ return;
+
scoped_guard(mutex, &adev->dm.dc_lock) {
dc_exit_ips_for_hw_access(dc);
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
@@ -4920,6 +5125,21 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
struct dc_link *link;
u32 brightness;
bool rc, reallow_idle = false;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dm->ddev->mode_config.connector_list, head) {
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (aconnector->bl_idx != bl_idx)
+ continue;
+
+ /* if connector is off, save the brightness for next time it's on */
+ if (!aconnector->base.encoder) {
+ dm->brightness[bl_idx] = user_brightness;
+ dm->actual_brightness[bl_idx] = 0;
+ return;
+ }
+ }
amdgpu_dm_update_backlight_caps(dm, bl_idx);
caps = &dm->backlight_caps[bl_idx];
@@ -5136,6 +5356,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
static void setup_backlight_device(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector)
{
+ struct amdgpu_dm_backlight_caps *caps;
struct dc_link *link = aconnector->dc_link;
int bl_idx = dm->num_of_edps;
@@ -5155,6 +5376,13 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
dm->num_of_edps++;
update_connector_ext_caps(aconnector);
+ caps = &dm->backlight_caps[aconnector->bl_idx];
+
+ /* Only offer ABM property when non-OLED and user didn't turn off by module parameter */
+ if (!caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0)
+ drm_object_attach_property(&aconnector->base.base,
+ dm->adev->mode_info.abm_level_property,
+ ABM_SYSFS_CONTROL);
}
static void amdgpu_set_panel_orientation(struct drm_connector *connector);
@@ -5410,6 +5638,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_set_panel_orientation(&aconnector->base);
}
+ /* Debug dump: list all DC links and their associated sinks after detection
+ * is complete for all connectors. This provides a comprehensive view of the
+ * final state without repeating the dump for each connector.
+ */
+ amdgpu_dm_dump_links_and_sinks(adev);
+
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -5796,6 +6030,10 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
*color_space = COLOR_SPACE_SRGB;
+ /* Ignore properties when DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE is set */
+ if (plane_state->state && plane_state->state->plane_color_pipeline)
+ return 0;
+
/* DRM color properties only affect non-RGB formats. */
if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
return 0;
@@ -7148,29 +7386,117 @@ finish:
return stream;
}
+/**
+ * amdgpu_dm_connector_poll - Poll a connector to see if it's connected to a display
+ * @aconnector: DM connector to poll (owns @base drm_connector and @dc_link)
+ * @force: if true, force polling even when DAC load detection was used
+ *
+ * Used for connectors that don't support HPD (hotplug detection) to
+ * periodically check whether the connector is connected to a display.
+ *
+ * When connection was determined via DAC load detection, we avoid
+ * re-running it on normal polls to prevent visible glitches, unless
+ * @force is set.
+ *
+ * Return: The probed connector status (connected/disconnected/unknown).
+ */
static enum drm_connector_status
-amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+amdgpu_dm_connector_poll(struct amdgpu_dm_connector *aconnector, bool force)
{
- bool connected;
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc_link *link = aconnector->dc_link;
+ enum dc_connection_type conn_type = dc_connection_none;
+ enum drm_connector_status status = connector_status_disconnected;
- /*
- * Notes:
- * 1. This interface is NOT called in context of HPD irq.
- * 2. This interface *is called* in context of user-mode ioctl. Which
- * makes it a bad place for *any* MST-related activity.
+ /* When we determined the connection using DAC load detection,
+ * do NOT poll the connector do detect disconnect because
+ * that would run DAC load detection again which can cause
+ * visible visual glitches.
+ *
+ * Only allow to poll such a connector again when forcing.
*/
+ if (!force && link->local_sink && link->type == dc_connection_dac_load)
+ return connector->status;
- if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
- !aconnector->fake_enable)
- connected = (aconnector->dc_sink != NULL);
- else
- connected = (aconnector->base.force == DRM_FORCE_ON ||
- aconnector->base.force == DRM_FORCE_ON_DIGITAL);
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (dc_link_detect_connection_type(aconnector->dc_link, &conn_type) &&
+ conn_type != dc_connection_none) {
+ mutex_lock(&adev->dm.dc_lock);
+
+ /* Only call full link detection when a sink isn't created yet,
+ * ie. just when the display is plugged in, otherwise we risk flickering.
+ */
+ if (link->local_sink ||
+ dc_link_detect(link, DETECT_REASON_HPD))
+ status = connector_status_connected;
+
+ mutex_unlock(&adev->dm.dc_lock);
+ }
+
+ if (connector->status != status) {
+ if (status == connector_status_disconnected) {
+ if (link->local_sink)
+ dc_sink_release(link->local_sink);
+
+ link->local_sink = NULL;
+ link->dpcd_sink_count = 0;
+ link->type = dc_connection_none;
+ }
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ }
+
+ mutex_unlock(&aconnector->hpd_lock);
+ return status;
+}
+
+/**
+ * amdgpu_dm_connector_detect() - Detect whether a DRM connector is connected to a display
+ *
+ * A connector is considered connected when it has a sink that is not NULL.
+ * For connectors that support HPD (hotplug detection), the connection is
+ * handled in the HPD interrupt.
+ * For connectors that may not support HPD, such as analog connectors,
+ * DRM will call this function repeatedly to poll them.
+ *
+ * Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+ * makes it a bad place for *any* MST-related activity.
+ *
+ * @connector: The DRM connector we are checking. We convert it to
+ * amdgpu_dm_connector so we can read the DC link and state.
+ * @force: If true, do a full detect again. This is used even when
+ * a lighter check would normally be used to avoid flicker.
+ *
+ * Return: The connector status (connected, disconnected, or unknown).
+ *
+ */
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
update_subconnector_property(aconnector);
- return (connected ? connector_status_connected :
+ if (aconnector->base.force == DRM_FORCE_ON ||
+ aconnector->base.force == DRM_FORCE_ON_DIGITAL)
+ return connector_status_connected;
+ else if (aconnector->base.force == DRM_FORCE_OFF)
+ return connector_status_disconnected;
+
+ /* Poll analog connectors and only when either
+ * disconnected or connected to an analog display.
+ */
+ if (drm_kms_helper_is_poll_worker() &&
+ dc_connector_supports_analog(aconnector->dc_link->link_id.id) &&
+ (!aconnector->dc_sink || aconnector->dc_sink->edid_caps.analog))
+ return amdgpu_dm_connector_poll(aconnector, force);
+
+ return (aconnector->dc_sink ? connector_status_connected :
connector_status_disconnected);
}
@@ -7221,6 +7547,20 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
dm_new_state->underscan_enable = val;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ switch (val) {
+ case ABM_SYSFS_CONTROL:
+ dm_new_state->abm_sysfs_forbidden = false;
+ break;
+ case ABM_LEVEL_OFF:
+ dm_new_state->abm_sysfs_forbidden = true;
+ dm_new_state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
+ break;
+ default:
+ dm_new_state->abm_sysfs_forbidden = true;
+ dm_new_state->abm_level = val;
+ }
+ ret = 0;
}
return ret;
@@ -7263,6 +7603,13 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
*val = dm_state->underscan_enable;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ if (!dm_state->abm_sysfs_forbidden)
+ *val = ABM_SYSFS_CONTROL;
+ else
+ *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
+ dm_state->abm_level : 0;
+ ret = 0;
}
return ret;
@@ -7315,10 +7662,16 @@ static ssize_t panel_power_savings_store(struct device *device,
return -EINVAL;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- to_dm_connector_state(connector->state)->abm_level = val ?:
- ABM_LEVEL_IMMEDIATE_DISABLE;
+ if (to_dm_connector_state(connector->state)->abm_sysfs_forbidden)
+ ret = -EBUSY;
+ else
+ to_dm_connector_state(connector->state)->abm_level = val ?:
+ ABM_LEVEL_IMMEDIATE_DISABLE;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
drm_kms_helper_hotplug_event(dev);
return count;
@@ -7383,6 +7736,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
if (aconnector->mst_mgr.dev)
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
+ /* Cancel and flush any pending HDMI HPD debounce work */
+ cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work);
+ if (aconnector->hdmi_prev_sink) {
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = NULL;
+ }
+
if (aconnector->bl_idx != -1) {
backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
dm->backlight_dev[aconnector->bl_idx] = NULL;
@@ -8033,7 +8393,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
"mode %dx%d@%dHz is not native, enabling scaling\n",
adjusted_mode->hdisplay, adjusted_mode->vdisplay,
drm_mode_vrefresh(adjusted_mode));
- dm_new_connector_state->scaling = RMX_FULL;
+ dm_new_connector_state->scaling = RMX_ASPECT;
}
return 0;
}
@@ -8158,7 +8518,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
return 0;
}
-static int to_drm_connector_type(enum signal_type st)
+static int to_drm_connector_type(enum signal_type st, uint32_t connector_id)
{
switch (st) {
case SIGNAL_TYPE_HDMI_TYPE_A:
@@ -8174,6 +8534,10 @@ static int to_drm_connector_type(enum signal_type st)
return DRM_MODE_CONNECTOR_DisplayPort;
case SIGNAL_TYPE_DVI_DUAL_LINK:
case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ if (connector_id == CONNECTOR_ID_SINGLE_LINK_DVII ||
+ connector_id == CONNECTOR_ID_DUAL_LINK_DVII)
+ return DRM_MODE_CONNECTOR_DVII;
+
return DRM_MODE_CONNECTOR_DVID;
case SIGNAL_TYPE_VIRTUAL:
return DRM_MODE_CONNECTOR_VIRTUAL;
@@ -8225,7 +8589,7 @@ static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
- char *name,
+ const char *name,
int hdisplay, int vdisplay)
{
struct drm_device *dev = encoder->dev;
@@ -8247,6 +8611,24 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
}
+static const struct amdgpu_dm_mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
+ int w;
+ int h;
+} common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
+};
+
static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
@@ -8257,23 +8639,6 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
to_amdgpu_dm_connector(connector);
int i;
int n;
- struct mode_size {
- char name[DRM_DISPLAY_MODE_LEN];
- int w;
- int h;
- } common_modes[] = {
- { "640x480", 640, 480},
- { "800x600", 800, 600},
- { "1024x768", 1024, 768},
- { "1280x720", 1280, 720},
- { "1280x800", 1280, 800},
- {"1280x1024", 1280, 1024},
- { "1440x900", 1440, 900},
- {"1680x1050", 1680, 1050},
- {"1600x1200", 1600, 1200},
- {"1920x1080", 1920, 1080},
- {"1920x1200", 1920, 1200}
- };
if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
(connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
@@ -8474,6 +8839,16 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
if (!(amdgpu_freesync_vid_mode && drm_edid))
return;
+ if (!amdgpu_dm_connector->dc_sink || !amdgpu_dm_connector->dc_link)
+ return;
+
+ if (!dc_supports_vrr(amdgpu_dm_connector->dc_sink->ctx->dce_version))
+ return;
+
+ if (dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id) &&
+ amdgpu_dm_connector->dc_sink->edid_caps.analog)
+ return;
+
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
amdgpu_dm_connector->num_modes +=
add_fs_modes(amdgpu_dm_connector);
@@ -8483,11 +8858,11 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
+ struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
struct drm_encoder *encoder;
const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid;
- struct dc_link_settings *verified_link_cap =
- &amdgpu_dm_connector->dc_link->verified_link_cap;
- const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
+ struct dc_link_settings *verified_link_cap = &dc_link->verified_link_cap;
+ const struct dc *dc = dc_link->dc;
encoder = amdgpu_dm_connector_to_encoder(connector);
@@ -8497,6 +8872,17 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
amdgpu_dm_connector->num_modes +=
drm_add_modes_noedid(connector, 1920, 1080);
+
+ if (amdgpu_dm_connector->dc_sink &&
+ amdgpu_dm_connector->dc_sink->edid_caps.analog &&
+ dc_connector_supports_analog(dc_link->link_id.id)) {
+ /* Analog monitor connected by DAC load detection.
+ * Add common modes. It will be up to the user to select one that works.
+ */
+ for (int i = 0; i < ARRAY_SIZE(common_modes); i++)
+ amdgpu_dm_connector->num_modes += drm_add_modes_noedid(
+ connector, common_modes[i].w, common_modes[i].h);
+ }
} else {
amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
if (encoder)
@@ -8544,6 +8930,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
mutex_init(&aconnector->hpd_lock);
mutex_init(&aconnector->handle_mst_msg_ready);
+ aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS;
+ INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
+ aconnector->hdmi_prev_sink = NULL;
+
/*
* configure support HPD hot plug connector_>polled default value is 0
* which means HPD hot plug not supported
@@ -8565,6 +8955,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
case DRM_MODE_CONNECTOR_DVID:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_VGA:
+ aconnector->base.polled =
+ DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ break;
default:
break;
}
@@ -8766,7 +9161,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
goto out_free;
}
- connector_type = to_drm_connector_type(link->connector_signal);
+ connector_type = to_drm_connector_type(link->connector_signal, link->link_id.id);
res = drm_connector_init_with_ddc(
dm->ddev,
@@ -9969,6 +10364,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
bool mode_set_reset_required = false;
u32 i;
struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
+ bool set_backlight_level = false;
/* Disable writeback */
for_each_old_connector_in_state(state, connector, old_con_state, i) {
@@ -10088,6 +10484,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
acrtc->hw_mode = new_crtc_state->mode;
crtc->hwmode = new_crtc_state->mode;
mode_set_reset_required = true;
+ set_backlight_level = true;
} else if (modereset_required(new_crtc_state)) {
drm_dbg_atomic(dev,
"Atomic commit: RESET. crtc id %d:[%p]\n",
@@ -10144,16 +10541,13 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
* to fix a flicker issue.
* It will cause the dm->actual_brightness is not the current panel brightness
* level. (the dm->brightness is the correct panel level)
- * So we set the backlight level with dm->brightness value after initial
- * set mode. Use restore_backlight flag to avoid setting backlight level
- * for every subsequent mode set.
+ * So we set the backlight level with dm->brightness value after set mode
*/
- if (dm->restore_backlight) {
+ if (set_backlight_level) {
for (i = 0; i < dm->num_of_edps; i++) {
if (dm->backlight_dev[i])
amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
}
- dm->restore_backlight = false;
}
}
@@ -10523,7 +10917,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* Here we create an empty update on each plane.
* To fix this, DC should permit updating only stream properties.
*/
- dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
+ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_KERNEL);
if (!dummy_updates) {
drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n");
continue;
@@ -12450,7 +12844,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
int j = state->num_private_objs-1;
dm_atomic_destroy_state(obj,
- state->private_objs[i].state);
+ state->private_objs[i].state_to_destroy);
/* If i is not at the end of the array then the
* last element needs to be moved to where i was
@@ -12461,7 +12855,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
state->private_objs[j];
state->private_objs[j].ptr = NULL;
- state->private_objs[j].state = NULL;
+ state->private_objs[j].state_to_destroy = NULL;
state->private_objs[j].old_state = NULL;
state->private_objs[j].new_state = NULL;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 009f206226f0..ef97cede9926 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -59,6 +59,7 @@
#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
+#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
@@ -631,13 +632,6 @@ struct amdgpu_display_manager {
u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
/**
- * @restore_backlight:
- *
- * Flag to indicate whether to restore backlight after modeset.
- */
- bool restore_backlight;
-
- /**
* @aux_hpd_discon_quirk:
*
* quirk for hpd discon while aux is on-going.
@@ -826,6 +820,11 @@ struct amdgpu_dm_connector {
bool pack_sdp_v1_3;
enum adaptive_sync_type as_type;
struct amdgpu_hdmi_vsdb_info vsdb_info;
+
+ /* HDMI HPD debounce support */
+ unsigned int hdmi_hpd_debounce_delay_ms;
+ struct delayed_work hdmi_hpd_debounce_work;
+ struct dc_sink *hdmi_prev_sink;
};
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
@@ -1000,6 +999,7 @@ struct dm_connector_state {
bool underscan_enable;
bool freesync_capable;
bool update_hdcp;
+ bool abm_sysfs_forbidden;
uint8_t abm_level;
int vcpi_slots;
uint64_t pbn;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index a4ac6d442278..1dcc79b35225 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -26,12 +26,39 @@
#include "amdgpu.h"
#include "amdgpu_mode.h"
#include "amdgpu_dm.h"
+#include "amdgpu_dm_colorop.h"
#include "dc.h"
#include "modules/color/color_gamma.h"
/**
* DOC: overview
*
+ * We have three types of color management in the AMD display driver.
+ * 1. the legacy &drm_crtc DEGAMMA, CTM, and GAMMA properties
+ * 2. AMD driver private color management on &drm_plane and &drm_crtc
+ * 3. AMD plane color pipeline
+ *
+ * The CRTC properties are the original color management. When they were
+ * implemented per-plane color management was not a thing yet. Because
+ * of that we could get away with plumbing the DEGAMMA and CTM
+ * properties to pre-blending HW functions. This is incompatible with
+ * per-plane color management, such as via the AMD private properties or
+ * the new drm_plane color pipeline. The only compatible CRTC property
+ * with per-plane color management is the GAMMA property as it is
+ * applied post-blending.
+ *
+ * The AMD driver private color management properties are only exposed
+ * when the kernel is built explicitly with -DAMD_PRIVATE_COLOR. They
+ * are temporary building blocks on the path to full-fledged &drm_plane
+ * and &drm_crtc color pipelines and lay the driver's groundwork for the
+ * color pipelines.
+ *
+ * The AMD plane color pipeline describes AMD's &drm_colorops via the
+ * &drm_plane's COLOR_PIPELINE property.
+ *
+ * drm_crtc Properties
+ * -------------------
+ *
* The DC interface to HW gives us the following color management blocks
* per pipe (surface):
*
@@ -42,36 +69,93 @@
* - Surface regamma LUT (normalized)
* - Output CSC (normalized)
*
- * But these aren't a direct mapping to DRM color properties. The current DRM
- * interface exposes CRTC degamma, CRTC CTM and CRTC regamma while our hardware
- * is essentially giving:
+ * But these aren't a direct mapping to DRM color properties. The
+ * current DRM interface exposes CRTC degamma, CRTC CTM and CRTC regamma
+ * while our hardware is essentially giving:
*
* Plane CTM -> Plane degamma -> Plane CTM -> Plane regamma -> Plane CTM
*
- * The input gamma LUT block isn't really applicable here since it operates
- * on the actual input data itself rather than the HW fp representation. The
- * input and output CSC blocks are technically available to use as part of
- * the DC interface but are typically used internally by DC for conversions
- * between color spaces. These could be blended together with user
- * adjustments in the future but for now these should remain untouched.
+ * The input gamma LUT block isn't really applicable here since it
+ * operates on the actual input data itself rather than the HW fp
+ * representation. The input and output CSC blocks are technically
+ * available to use as part of the DC interface but are typically used
+ * internally by DC for conversions between color spaces. These could be
+ * blended together with user adjustments in the future but for now
+ * these should remain untouched.
+ *
+ * The pipe blending also happens after these blocks so we don't
+ * actually support any CRTC props with correct blending with multiple
+ * planes - but we can still support CRTC color management properties in
+ * DM in most single plane cases correctly with clever management of the
+ * DC interface in DM.
+ *
+ * As per DRM documentation, blocks should be in hardware bypass when
+ * their respective property is set to NULL. A linear DGM/RGM LUT should
+ * also considered as putting the respective block into bypass mode.
+ *
+ * This means that the following configuration is assumed to be the
+ * default:
+ *
+ * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... CRTC
+ * DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass
+ *
+ * AMD Private Color Management on drm_plane
+ * -----------------------------------------
+ *
+ * The AMD private color management properties on a &drm_plane are:
*
- * The pipe blending also happens after these blocks so we don't actually
- * support any CRTC props with correct blending with multiple planes - but we
- * can still support CRTC color management properties in DM in most single
- * plane cases correctly with clever management of the DC interface in DM.
+ * - AMD_PLANE_DEGAMMA_LUT
+ * - AMD_PLANE_DEGAMMA_LUT_SIZE
+ * - AMD_PLANE_DEGAMMA_TF
+ * - AMD_PLANE_HDR_MULT
+ * - AMD_PLANE_CTM
+ * - AMD_PLANE_SHAPER_LUT
+ * - AMD_PLANE_SHAPER_LUT_SIZE
+ * - AMD_PLANE_SHAPER_TF
+ * - AMD_PLANE_LUT3D
+ * - AMD_PLANE_LUT3D_SIZE
+ * - AMD_PLANE_BLEND_LUT
+ * - AMD_PLANE_BLEND_LUT_SIZE
+ * - AMD_PLANE_BLEND_TF
*
- * As per DRM documentation, blocks should be in hardware bypass when their
- * respective property is set to NULL. A linear DGM/RGM LUT should also
- * considered as putting the respective block into bypass mode.
+ * The AMD private color management property on a &drm_crtc is:
*
- * This means that the following
- * configuration is assumed to be the default:
+ * - AMD_CRTC_REGAMMA_TF
+ *
+ * Use of these properties is discouraged.
+ *
+ * AMD plane color pipeline
+ * ------------------------
+ *
+ * The AMD &drm_plane color pipeline is advertised for DCN generations
+ * 3.0 and newer. It exposes these elements in this order:
+ *
+ * 1. 1D curve colorop
+ * 2. Multiplier
+ * 3. 3x4 CTM
+ * 4. 1D curve colorop
+ * 5. 1D LUT
+ * 6. 3D LUT
+ * 7. 1D curve colorop
+ * 8. 1D LUT
+ *
+ * The multiplier (#2) is a simple multiplier that is applied to all
+ * channels.
+ *
+ * The 3x4 CTM (#3) is a simple 3x4 matrix.
+ *
+ * #1, and #7 are non-linear to linear curves. #4 is a linear to
+ * non-linear curve. They support sRGB, PQ, and BT.709/BT.2020 EOTFs or
+ * their inverse.
+ *
+ * The 1D LUTs (#5 and #8) are plain 4096 entry LUTs.
+ *
+ * The 3DLUT (#6) is a tetrahedrally interpolated 17 cube LUT.
*
- * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ...
- * CRTC DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass
*/
#define MAX_DRM_LUT_VALUE 0xFFFF
+#define MAX_DRM_LUT32_VALUE 0xFFFFFFFF
#define SDR_WHITE_LEVEL_INIT_VALUE 80
/**
@@ -342,6 +426,21 @@ __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size)
}
/**
+ * __extract_blob_lut32 - Extracts the DRM lut and lut size from a blob.
+ * @blob: DRM color mgmt property blob
+ * @size: lut size
+ *
+ * Returns:
+ * DRM LUT or NULL
+ */
+static const struct drm_color_lut32 *
+__extract_blob_lut32(const struct drm_property_blob *blob, uint32_t *size)
+{
+ *size = blob ? drm_color_lut32_size(blob) : 0;
+ return blob ? (struct drm_color_lut32 *)blob->data : NULL;
+}
+
+/**
* __is_lut_linear - check if the given lut is a linear mapping of values
* @lut: given lut to check values
* @size: lut size
@@ -415,6 +514,24 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut,
}
/**
+ * __drm_lut32_to_dc_gamma - convert the drm_color_lut to dc_gamma.
+ * @lut: DRM lookup table for color conversion
+ * @gamma: DC gamma to set entries
+ *
+ * The conversion depends on the size of the lut - whether or not it's legacy.
+ */
+static void __drm_lut32_to_dc_gamma(const struct drm_color_lut32 *lut, struct dc_gamma *gamma)
+{
+ int i;
+
+ for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) {
+ gamma->entries.red[i] = dc_fixpt_from_fraction(lut[i].red, MAX_DRM_LUT32_VALUE);
+ gamma->entries.green[i] = dc_fixpt_from_fraction(lut[i].green, MAX_DRM_LUT32_VALUE);
+ gamma->entries.blue[i] = dc_fixpt_from_fraction(lut[i].blue, MAX_DRM_LUT32_VALUE);
+ }
+}
+
+/**
* __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix
* @ctm: DRM color transformation matrix
* @matrix: DC CSC float matrix
@@ -566,6 +683,63 @@ static int __set_output_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}
+/**
+ * __set_output_tf_32 - calculates the output transfer function based on expected input space.
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut
+ * @has_rom: if ROM can be used for hardcoded curve
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
+static int __set_output_tf_32(struct dc_transfer_func *func,
+ const struct drm_color_lut32 *lut, uint32_t lut_size,
+ bool has_rom)
+{
+ struct dc_gamma *gamma = NULL;
+ struct calculate_buffer cal_buffer = {0};
+ bool res;
+
+ cal_buffer.buffer_index = -1;
+
+ if (lut_size) {
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->num_entries = lut_size;
+ __drm_lut32_to_dc_gamma(lut, gamma);
+ }
+
+ if (func->tf == TRANSFER_FUNCTION_LINEAR) {
+ /*
+ * Color module doesn't like calculating regamma params
+ * on top of a linear input. But degamma params can be used
+ * instead to simulate this.
+ */
+ if (gamma)
+ gamma->type = GAMMA_CUSTOM;
+ res = mod_color_calculate_degamma_params(NULL, func,
+ gamma, gamma != NULL);
+ } else {
+ /*
+ * Assume sRGB. The actual mapping will depend on whether the
+ * input was legacy or not.
+ */
+ if (gamma)
+ gamma->type = GAMMA_CS_TFM_1D;
+ res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL,
+ has_rom, NULL, &cal_buffer);
+ }
+
+ if (gamma)
+ dc_gamma_release(&gamma);
+
+ return res ? 0 : -ENOMEM;
+}
+
+
static int amdgpu_dm_set_atomic_regamma(struct dc_transfer_func *out_tf,
const struct drm_color_lut *regamma_lut,
uint32_t regamma_size, bool has_rom,
@@ -638,6 +812,42 @@ static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *f
return res ? 0 : -ENOMEM;
}
+/**
+ * __set_input_tf_32 - calculates the input transfer function based on expected
+ * input space.
+ * @caps: dc color capabilities
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut.
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
+static int __set_input_tf_32(struct dc_color_caps *caps, struct dc_transfer_func *func,
+ const struct drm_color_lut32 *lut, uint32_t lut_size)
+{
+ struct dc_gamma *gamma = NULL;
+ bool res;
+
+ if (lut_size) {
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->type = GAMMA_CUSTOM;
+ gamma->num_entries = lut_size;
+
+ __drm_lut32_to_dc_gamma(lut, gamma);
+ }
+
+ res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL);
+
+ if (gamma)
+ dc_gamma_release(&gamma);
+
+ return res ? 0 : -ENOMEM;
+}
+
static enum dc_transfer_func_predefined
amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
{
@@ -667,6 +877,27 @@ amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
}
}
+static enum dc_transfer_func_predefined
+amdgpu_colorop_tf_to_dc_tf(enum drm_colorop_curve_1d_type tf)
+{
+ switch (tf) {
+ case DRM_COLOROP_1D_CURVE_SRGB_EOTF:
+ case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF:
+ return TRANSFER_FUNCTION_SRGB;
+ case DRM_COLOROP_1D_CURVE_PQ_125_EOTF:
+ case DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF:
+ return TRANSFER_FUNCTION_PQ;
+ case DRM_COLOROP_1D_CURVE_BT2020_INV_OETF:
+ case DRM_COLOROP_1D_CURVE_BT2020_OETF:
+ return TRANSFER_FUNCTION_BT709;
+ case DRM_COLOROP_1D_CURVE_GAMMA22:
+ case DRM_COLOROP_1D_CURVE_GAMMA22_INV:
+ return TRANSFER_FUNCTION_GAMMA22;
+ default:
+ return TRANSFER_FUNCTION_LINEAR;
+ }
+}
+
static void __to_dc_lut3d_color(struct dc_rgb *rgb,
const struct drm_color_lut lut,
int bit_precision)
@@ -720,6 +951,59 @@ static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut,
__to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth);
}
+static void __to_dc_lut3d_32_color(struct dc_rgb *rgb,
+ const struct drm_color_lut32 lut,
+ int bit_precision)
+{
+ rgb->red = drm_color_lut32_extract(lut.red, bit_precision);
+ rgb->green = drm_color_lut32_extract(lut.green, bit_precision);
+ rgb->blue = drm_color_lut32_extract(lut.blue, bit_precision);
+}
+
+static void __drm_3dlut32_to_dc_3dlut(const struct drm_color_lut32 *lut,
+ uint32_t lut3d_size,
+ struct tetrahedral_params *params,
+ bool use_tetrahedral_9,
+ int bit_depth)
+{
+ struct dc_rgb *lut0;
+ struct dc_rgb *lut1;
+ struct dc_rgb *lut2;
+ struct dc_rgb *lut3;
+ int lut_i, i;
+
+
+ if (use_tetrahedral_9) {
+ lut0 = params->tetrahedral_9.lut0;
+ lut1 = params->tetrahedral_9.lut1;
+ lut2 = params->tetrahedral_9.lut2;
+ lut3 = params->tetrahedral_9.lut3;
+ } else {
+ lut0 = params->tetrahedral_17.lut0;
+ lut1 = params->tetrahedral_17.lut1;
+ lut2 = params->tetrahedral_17.lut2;
+ lut3 = params->tetrahedral_17.lut3;
+ }
+
+ for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) {
+ /*
+ * We should consider the 3D LUT RGB values are distributed
+ * along four arrays lut0-3 where the first sizes 1229 and the
+ * other 1228. The bit depth supported for 3dlut channel is
+ * 12-bit, but DC also supports 10-bit.
+ *
+ * TODO: improve color pipeline API to enable the userspace set
+ * bit depth and 3D LUT size/stride, as specified by VA-API.
+ */
+ __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth);
+ __to_dc_lut3d_32_color(&lut1[lut_i], lut[i + 1], bit_depth);
+ __to_dc_lut3d_32_color(&lut2[lut_i], lut[i + 2], bit_depth);
+ __to_dc_lut3d_32_color(&lut3[lut_i], lut[i + 3], bit_depth);
+ }
+ /* lut0 has 1229 points (lut_size/4 + 1) */
+ __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth);
+}
+
/* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream
* @drm_lut3d: user 3D LUT
* @drm_lut3d_size: size of 3D LUT
@@ -1178,6 +1462,360 @@ __set_dm_plane_degamma(struct drm_plane_state *plane_state,
}
static int
+__set_colorop_in_tf_1d_curve(struct dc_plane_state *dc_plane_state,
+ struct drm_colorop_state *colorop_state)
+{
+ struct dc_transfer_func *tf = &dc_plane_state->in_transfer_func;
+ struct drm_colorop *colorop = colorop_state->colorop;
+ struct drm_device *drm = colorop->dev;
+
+ if (colorop->type != DRM_COLOROP_1D_CURVE)
+ return -EINVAL;
+
+ if (!(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs))
+ return -EINVAL;
+
+ if (colorop_state->bypass) {
+ tf->type = TF_TYPE_BYPASS;
+ tf->tf = TRANSFER_FUNCTION_LINEAR;
+ return 0;
+ }
+
+ drm_dbg(drm, "Degamma colorop with ID: %d\n", colorop->base.id);
+
+ tf->type = TF_TYPE_PREDEFINED;
+ tf->tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_degamma(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ int i = 0;
+
+ old_colorop = colorop;
+
+ /* 1st op: 1d curve - degamma */
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (!colorop_state)
+ return -EINVAL;
+
+ return __set_colorop_in_tf_1d_curve(dc_plane_state, colorop_state);
+}
+
+static int
+__set_dm_plane_colorop_3x4_matrix(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct drm_device *dev = colorop->dev;
+ const struct drm_property_blob *blob;
+ struct drm_color_ctm_3x4 *ctm = NULL;
+ int i = 0;
+
+ /* 3x4 matrix */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_CTM_3X4) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_CTM_3X4) {
+ drm_dbg(dev, "3x4 matrix colorop with ID: %d\n", colorop->base.id);
+ blob = colorop_state->data;
+ if (blob->length == sizeof(struct drm_color_ctm_3x4)) {
+ ctm = (struct drm_color_ctm_3x4 *) blob->data;
+ __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix);
+ dc_plane_state->gamut_remap_matrix.enable_remap = true;
+ dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
+ } else {
+ drm_warn(dev, "blob->length (%zu) isn't equal to drm_color_ctm_3x4 (%zu)\n",
+ blob->length, sizeof(struct drm_color_ctm_3x4));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_multiplier(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct drm_device *dev = colorop->dev;
+ int i = 0;
+
+ /* Multiplier */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_MULTIPLIER) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_MULTIPLIER) {
+ drm_dbg(dev, "Multiplier colorop with ID: %d\n", colorop->base.id);
+ dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(colorop_state->multiplier);
+ }
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_shaper(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR;
+ struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func;
+ const struct drm_color_lut32 *shaper_lut;
+ struct drm_device *dev = colorop->dev;
+ bool enabled = false;
+ u32 shaper_size;
+ int i = 0, ret = 0;
+
+ /* 1D Curve - SHAPER TF */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_shaper_tfs)) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE) {
+ drm_dbg(dev, "Shaper TF colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ ret = __set_output_tf(tf, 0, 0, false);
+ if (ret)
+ return ret;
+ enabled = true;
+ }
+
+ /* 1D LUT - SHAPER LUT */
+ colorop = old_colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Shaper LUT colorop found\n");
+ return -EINVAL;
+ }
+
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT) {
+ drm_dbg(dev, "Shaper LUT colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf;
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ shaper_lut = __extract_blob_lut32(colorop_state->data, &shaper_size);
+ shaper_size = shaper_lut != NULL ? shaper_size : 0;
+
+ /* Custom LUT size must be the same as supported size */
+ if (shaper_size == colorop->size) {
+ ret = __set_output_tf_32(tf, shaper_lut, shaper_size, false);
+ if (ret)
+ return ret;
+ enabled = true;
+ }
+ }
+
+ if (!enabled)
+ tf->type = TF_TYPE_BYPASS;
+
+ return 0;
+}
+
+/* __set_colorop_3dlut - set DRM 3D LUT to DC stream
+ * @drm_lut3d: user 3D LUT
+ * @drm_lut3d_size: size of 3D LUT
+ * @lut3d: DC 3D LUT
+ *
+ * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it
+ * on DCN accordingly.
+ *
+ * Returns:
+ * 0 on success. -EINVAL if drm_lut3d_size is zero.
+ */
+static int __set_colorop_3dlut(const struct drm_color_lut32 *drm_lut3d,
+ uint32_t drm_lut3d_size,
+ struct dc_3dlut *lut)
+{
+ if (!drm_lut3d_size) {
+ lut->state.bits.initialized = 0;
+ return -EINVAL;
+ }
+
+ /* Only supports 17x17x17 3D LUT (12-bit) now */
+ lut->lut_3d.use_12bits = true;
+ lut->lut_3d.use_tetrahedral_9 = false;
+
+ lut->state.bits.initialized = 1;
+ __drm_3dlut32_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d,
+ lut->lut_3d.use_tetrahedral_9, 12);
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_3dlut(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct amdgpu_device *adev = drm_to_adev(colorop->dev);
+ const struct drm_device *dev = colorop->dev;
+ const struct drm_color_lut32 *lut3d;
+ uint32_t lut3d_size;
+ int i = 0, ret = 0;
+
+ /* 3D LUT */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_3D_LUT) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_3D_LUT) {
+ if (!adev->dm.dc->caps.color.dpp.hw_3d_lut) {
+ drm_dbg(dev, "3D LUT is not supported by hardware\n");
+ return -EINVAL;
+ }
+
+ drm_dbg(dev, "3D LUT colorop with ID: %d\n", colorop->base.id);
+ lut3d = __extract_blob_lut32(colorop_state->data, &lut3d_size);
+ lut3d_size = lut3d != NULL ? lut3d_size : 0;
+ ret = __set_colorop_3dlut(lut3d, lut3d_size, &dc_plane_state->lut3d_func);
+ if (ret) {
+ drm_dbg(dev, "3D LUT colorop with ID: %d has LUT size = %d\n",
+ colorop->base.id, lut3d_size);
+ return ret;
+ }
+
+ /* 3D LUT requires shaper. If shaper colorop is bypassed, enable shaper curve
+ * with TRANSFER_FUNCTION_LINEAR
+ */
+ if (tf->type == TF_TYPE_BYPASS) {
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = TRANSFER_FUNCTION_LINEAR;
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ ret = __set_output_tf_32(tf, NULL, 0, false);
+ }
+ }
+
+ return ret;
+}
+
+static int
+__set_dm_plane_colorop_blend(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR;
+ struct dc_transfer_func *tf = &dc_plane_state->blend_tf;
+ const struct drm_color_lut32 *blend_lut = NULL;
+ struct drm_device *dev = colorop->dev;
+ uint32_t blend_size = 0;
+ int i = 0;
+
+ /* 1D Curve - BLND TF */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE &&
+ (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
+ drm_dbg(dev, "Blend TF colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ __set_input_tf_32(NULL, tf, blend_lut, blend_size);
+ }
+
+ /* 1D Curve - BLND LUT */
+ colorop = old_colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Blend LUT colorop found\n");
+ return -EINVAL;
+ }
+
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT &&
+ (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
+ drm_dbg(dev, "Blend LUT colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf;
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ blend_lut = __extract_blob_lut32(colorop_state->data, &blend_size);
+ blend_size = blend_lut != NULL ? blend_size : 0;
+
+ /* Custom LUT size must be the same as supported size */
+ if (blend_size == colorop->size)
+ __set_input_tf_32(NULL, tf, blend_lut, blend_size);
+ }
+
+ return 0;
+}
+
+static int
amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state)
{
@@ -1227,6 +1865,93 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
return 0;
}
+static int
+amdgpu_dm_plane_set_colorop_properties(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state)
+{
+ struct drm_colorop *colorop = plane_state->color_pipeline;
+ struct drm_device *dev = plane_state->plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int ret;
+
+ /* 1D Curve - DEGAM TF */
+ if (!colorop)
+ return -EINVAL;
+
+ ret = __set_dm_plane_colorop_degamma(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* Multiplier */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no multiplier colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_multiplier(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* 3x4 matrix */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no 3x4 matrix colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_3x4_matrix(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ if (adev->dm.dc->caps.color.dpp.hw_3d_lut) {
+ /* 1D Curve & LUT - SHAPER TF & LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Shaper TF colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_shaper(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* Shaper LUT colorop is already handled, just skip here */
+ colorop = colorop->next;
+ if (!colorop)
+ return -EINVAL;
+
+ /* 3D LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no 3D LUT colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_3dlut(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+ }
+
+ /* 1D Curve & LUT - BLND TF & LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Blend TF colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_blend(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* BLND LUT colorop is already handled, just skip here */
+ colorop = colorop->next;
+ if (!colorop)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane.
* @crtc: amdgpu_dm crtc state
@@ -1323,5 +2048,8 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
}
+ if (!amdgpu_dm_plane_set_colorop_properties(plane_state, dc_plane_state))
+ return 0;
+
return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
new file mode 100644
index 000000000000..d585618b8064
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <drm/drm_print.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_property.h>
+#include <drm/drm_colorop.h>
+
+#include "amdgpu.h"
+#include "amdgpu_dm_colorop.h"
+#include "dc.h"
+
+const u64 amdgpu_dm_supported_degam_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) |
+ BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV);
+
+const u64 amdgpu_dm_supported_shaper_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_BT2020_OETF) |
+ BIT(DRM_COLOROP_1D_CURVE_GAMMA22);
+
+const u64 amdgpu_dm_supported_blnd_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) |
+ BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV);
+
+#define MAX_COLOR_PIPELINE_OPS 10
+
+#define LUT3D_SIZE 17
+
+int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list)
+{
+ struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS];
+ struct drm_device *dev = plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int ret;
+ int i = 0;
+
+ memset(ops, 0, sizeof(ops));
+
+ /* 1D curve - DEGAM TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
+ amdgpu_dm_supported_degam_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ list->type = ops[i]->base.id;
+ list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id);
+
+ i++;
+
+ /* Multiplier */
+ ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_mult_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ /* 3x4 matrix */
+ ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ if (adev->dm.dc->caps.color.dpp.hw_3d_lut) {
+ /* 1D curve - SHAPER TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
+ amdgpu_dm_supported_shaper_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ /* 1D LUT - SHAPER LUT */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ /* 3D LUT */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, LUT3D_SIZE,
+ DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+ }
+
+ /* 1D curve - BLND TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
+ amdgpu_dm_supported_blnd_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i - 1], ops[i]);
+
+ i++;
+
+ /* 1D LUT - BLND LUT */
+ ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+ return 0;
+
+cleanup:
+ if (ret == -ENOMEM)
+ drm_err(plane->dev, "KMS: Failed to allocate colorop\n");
+
+ drm_colorop_pipeline_destroy(dev);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h
new file mode 100644
index 000000000000..2e1617ffc8ee
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_COLOROP_H__
+#define __AMDGPU_DM_COLOROP_H__
+
+extern const u64 amdgpu_dm_supported_degam_tfs;
+extern const u64 amdgpu_dm_supported_shaper_tfs;
+extern const u64 amdgpu_dm_supported_blnd_tfs;
+
+int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list);
+
+#endif /* __AMDGPU_DM_COLOROP_H__*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 1ec9d03ad747..697e232acebf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -248,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
+ int r;
mutex_lock(&dm->dc_lock);
@@ -277,7 +279,16 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
if (dm->active_vblank_irq_count == 0) {
dc_post_update_surfaces_to_stream(dm->dc);
+
+ r = amdgpu_dpm_pause_power_profile(adev, true);
+ if (r)
+ dev_warn(adev->dev, "failed to set default power profile mode\n");
+
dc_allow_idle_optimizations(dm->dc, true);
+
+ r = amdgpu_dpm_pause_power_profile(adev, false);
+ if (r)
+ dev_warn(adev->dev, "failed to restore the power profile mode\n");
}
mutex_unlock(&dm->dc_lock);
@@ -297,8 +308,12 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
int irq_type;
int rc = 0;
- if (acrtc->otg_inst == -1)
- goto skip;
+ if (enable && !acrtc->base.enabled) {
+ drm_dbg_vbl(crtc->dev,
+ "Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n",
+ acrtc->crtc_id, acrtc->base.enabled);
+ return -EINVAL;
+ }
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
@@ -383,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
return rc;
}
#endif
-skip:
+
if (amdgpu_in_reset(adev))
return 0;
@@ -721,7 +736,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
{
struct amdgpu_crtc *acrtc = NULL;
struct drm_plane *cursor_plane;
- bool is_dcn;
+ bool has_degamma;
int res = -ENOMEM;
cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
@@ -760,20 +775,18 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
dm->adev->mode_info.crtcs[crtc_index] = acrtc;
- /* Don't enable DRM CRTC degamma property for DCE since it doesn't
- * support programmable degamma anywhere.
- */
- is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
- /* Dont't enable DRM CRTC degamma property for DCN401 since the
- * pre-blending degamma LUT doesn't apply to cursor, and therefore
- * can't work similar to a post-blending degamma LUT as in other hw
- * versions.
- * TODO: revisit it once KMS plane color API is merged.
+ /* Don't enable DRM CRTC degamma property for
+ * 1. Degamma is replaced by color pipeline.
+ * 2. DCE since it doesn't support programmable degamma anywhere.
+ * 3. DCN401 since pre-blending degamma LUT doesn't apply to cursor.
*/
- drm_crtc_enable_color_mgmt(&acrtc->base,
- (is_dcn &&
- dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ?
- MAX_COLOR_LUT_ENTRIES : 0,
+ if (plane->color_pipeline_property)
+ has_degamma = false;
+ else
+ has_degamma = dm->adev->dm.dc->caps.color.dpp.dcn_arch &&
+ dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01;
+
+ drm_crtc_enable_color_mgmt(&acrtc->base, has_degamma ? MAX_COLOR_LUT_ENTRIES : 0,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index f263e1a4537e..a9839485f2a2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -759,6 +759,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
int max_param_num = 11;
enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
bool disable_hpd = false;
+ bool supports_hpd = link->irq_source_hpd != DC_IRQ_SOURCE_INVALID;
bool valid_test_pattern = false;
uint8_t param_nums = 0;
/* init with default 80bit custom pattern */
@@ -850,7 +851,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
* because it might have been disabled after a test pattern was set.
* AUX depends on HPD * sequence dependent, do not move!
*/
- if (!disable_hpd)
+ if (supports_hpd && !disable_hpd)
dc_link_enable_hpd(link);
prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
@@ -888,7 +889,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
* Need disable interrupt to avoid SW driver disable DP output. This is
* done after the test pattern is set.
*/
- if (valid_test_pattern && disable_hpd)
+ if (valid_test_pattern && supports_hpd && disable_hpd)
dc_link_disable_hpd(link);
kfree(wr_buf);
@@ -1302,7 +1303,8 @@ static int odm_combine_segments_show(struct seq_file *m, void *unused)
if (connector->status != connector_status_connected)
return -ENODEV;
- if (pipe_ctx != NULL && pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
+ if (pipe_ctx && pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments(pipe_ctx->stream_res.tg, &segments);
seq_printf(m, "%d\n", segments);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 19038f336155..85ce558cefc5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -201,6 +201,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
struct mod_hdcp_link_adjustment link_adjust;
struct mod_hdcp_display_adjustment display_adjust;
unsigned int conn_index = aconnector->base.index;
+ const struct dc *dc = aconnector->dc_link->dc;
guard(mutex)(&hdcp_w->mutex);
drm_connector_get(&aconnector->base);
@@ -231,6 +232,9 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
link_adjust.hdcp1.disable = 1;
link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
}
+ link_adjust.hdcp2.use_fw_locality_check =
+ (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable);
+ link_adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
schedule_delayed_work(&hdcp_w->property_validate_dwork,
msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
@@ -534,6 +538,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
struct dc_sink *sink = NULL;
bool link_is_hdcp14 = false;
+ const struct dc *dc = aconnector->dc_link->dc;
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
@@ -575,6 +580,8 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->adjust.auth_delay = 2;
link->adjust.retry_limit = MAX_NUM_OF_ATTEMPTS;
link->adjust.hdcp1.disable = 0;
+ link->adjust.hdcp2.use_fw_locality_check = (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable);
+ link->adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
@@ -786,15 +793,8 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
ddc_funcs->read_i2c = lp_read_i2c;
ddc_funcs->write_dpcd = lp_write_dpcd;
ddc_funcs->read_dpcd = lp_read_dpcd;
-
- config->debug.lc_enable_sw_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
- if (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable) {
- ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c;
- ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux;
- } else {
- ddc_funcs->atomic_write_poll_read_i2c = NULL;
- ddc_funcs->atomic_write_poll_read_aux = NULL;
- }
+ ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c;
+ ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux;
memset(hdcp_work[i].aconnector, 0,
sizeof(struct amdgpu_dm_connector *) *
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index fe100e4c9801..ac98c746c3de 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -83,6 +83,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
+ case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):
drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_colorimetry = true;
break;
@@ -130,6 +131,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->serial_number = edid_buf->serial;
edid_caps->manufacture_week = edid_buf->mfg_week;
edid_caps->manufacture_year = edid_buf->mfg_year;
+ edid_caps->analog = !(edid_buf->input & DRM_EDID_INPUT_DIGITAL);
drm_edid_get_monitor_name(edid_buf,
edid_caps->display_name,
@@ -996,8 +998,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_connector *connector = &aconnector->base;
struct i2c_adapter *ddc;
- int retry = 3;
- enum dc_edid_status edid_status;
+ int retry = 25;
+ enum dc_edid_status edid_status = EDID_NO_RESPONSE;
const struct drm_edid *drm_edid;
const struct edid *edid;
@@ -1027,7 +1029,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
}
if (!drm_edid)
- return EDID_NO_RESPONSE;
+ continue;
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
if (!edid ||
@@ -1045,7 +1047,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
&sink->dc_edid,
&sink->edid_caps);
- } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
+ } while ((edid_status == EDID_BAD_CHECKSUM || edid_status == EDID_NO_RESPONSE) && --retry > 0);
if (edid_status != EDID_OK)
DRM_ERROR("EDID err: %d, on connector: %s",
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index a1c722112c22..0a2a3f233a0e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -476,6 +476,7 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
{
+ struct drm_device *dev = adev_to_drm(adev);
int src;
struct list_head *hnd_list_h;
struct list_head *hnd_list_l;
@@ -512,6 +513,9 @@ void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_disable(dev);
}
void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
@@ -537,6 +541,7 @@ void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
{
+ struct drm_device *dev = adev_to_drm(adev);
int src;
struct list_head *hnd_list_h, *hnd_list_l;
unsigned long irq_table_flags;
@@ -557,6 +562,9 @@ void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
}
/*
@@ -893,6 +901,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_connector_list_iter iter;
int irq_type;
int i;
+ bool use_polling = false;
/* First, clear all hpd and hpdrx interrupts */
for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
@@ -906,6 +915,8 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct amdgpu_dm_connector *amdgpu_dm_connector;
const struct dc_link *dc_link;
+ use_polling |= connector->polled != DRM_CONNECTOR_POLL_HPD;
+
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -947,6 +958,9 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ if (use_polling)
+ drm_kms_helper_poll_init(dev);
}
/**
@@ -997,4 +1011,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_fini(dev);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 5e92eaa67aa3..dbd1da4d85d3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -884,26 +884,28 @@ struct dsc_mst_fairness_params {
};
#if defined(CONFIG_DRM_AMD_DC_FP)
-static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
+static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn)
{
- u8 link_coding_cap;
- uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
+ uint64_t effective_kbps = (uint64_t)kbps;
- link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
- if (link_coding_cap == DP_128b_132b_ENCODING)
- fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
+ if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps
+ effective_kbps *= 1006;
+ effective_kbps = div_u64(effective_kbps, 1000);
+ }
- return fec_overhead_multiplier_x1000;
+ return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000));
}
-static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
+static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin)
{
- u64 peak_kbps = kbps;
+ uint64_t pbn_effective = (uint64_t)pbn;
+
+ if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn
+ pbn_effective *= (1000000 / PEAK_FACTOR_X1000);
+ else
+ pbn_effective *= 1000;
- peak_kbps *= 1006;
- peak_kbps *= fec_overhead_multiplier_x1000;
- peak_kbps = div_u64(peak_kbps, 1000 * 1000);
- return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
+ return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64);
}
static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
@@ -974,7 +976,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
- kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
+ kbps = pbn_to_kbps(pbn, false);
dc_dsc_compute_config(
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
@@ -1003,12 +1005,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
- kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
+ kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@@ -1104,7 +1105,6 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
int var_pbn;
for (i = 0; i < count; i++) {
@@ -1137,7 +1137,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
var_pbn = vars[next_index].pbn;
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1197,7 +1197,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
struct drm_connector_state *new_conn_state;
memset(params, 0, sizeof(params));
@@ -1278,7 +1277,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@@ -1300,7 +1299,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1308,7 +1307,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1763,18 +1762,6 @@ clean_exit:
return ret;
}
-static uint32_t kbps_from_pbn(unsigned int pbn)
-{
- uint64_t kbps = (uint64_t)pbn;
-
- kbps *= (1000000 / PEAK_FACTOR_X1000);
- kbps *= 8;
- kbps *= 54;
- kbps /= 64;
-
- return (uint32_t)kbps;
-}
-
static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
struct dc_dsc_bw_range *bw_range)
{
@@ -1873,7 +1860,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
dc_link_get_highest_encoding_format(stream->link));
cur_link_settings = stream->link->verified_link_cap;
root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
- virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
+ virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true);
/* pick the end to end bw bottleneck */
end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
@@ -1926,7 +1913,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
if (immediate_upstream_port) {
- virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
+ virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true);
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
} else {
/* For topology LCT 1 case - only one mstb*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index e027798ece03..2e3ee78999d9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -37,6 +37,7 @@
#include "amdgpu_display.h"
#include "amdgpu_dm_trace.h"
#include "amdgpu_dm_plane.h"
+#include "amdgpu_dm_colorop.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
@@ -1782,6 +1783,39 @@ dm_atomic_plane_get_property(struct drm_plane *plane,
return 0;
}
+#else
+
+#define MAX_COLOR_PIPELINES 5
+
+static int
+dm_plane_init_colorops(struct drm_plane *plane)
+{
+ struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES];
+ struct drm_device *dev = plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = adev->dm.dc;
+ int len = 0;
+ int ret;
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return 0;
+
+ /* initialize pipeline */
+ if (dc->ctx->dce_version >= DCN_VERSION_3_0) {
+ ret = amdgpu_dm_initialize_default_pipeline(plane, &pipelines[len]);
+ if (ret) {
+ drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n",
+ plane->base.id, ret);
+ return ret;
+ }
+ len++;
+
+ /* Create COLOR_PIPELINE property and attach */
+ drm_plane_create_color_pipeline_property(plane, pipelines, len);
+ }
+
+ return 0;
+}
#endif
static const struct drm_plane_funcs dm_plane_funcs = {
@@ -1890,7 +1924,12 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
#ifdef AMD_PRIVATE_COLOR
dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
+#else
+ res = dm_plane_init_colorops(plane);
+ if (res)
+ return res;
#endif
+
/* Create (reset) the plane state */
if (plane->funcs->reset)
plane->funcs->reset(plane);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
index 80704d709e44..da94e3544b65 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
@@ -162,7 +162,7 @@ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait)
if (link) {
link->dc->link_srv->edp_setup_replay(link, stream);
- link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total);
+ link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total, 0);
DRM_DEBUG_DRIVER("Enabling replay...\n");
link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, wait, false, NULL);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index dc943abd6dba..7277ed21552f 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -36,7 +36,7 @@ DC_LIBS += dcn30
DC_LIBS += dcn301
DC_LIBS += dcn31
DC_LIBS += dml
-DC_LIBS += dml2
+DC_LIBS += dml2_0
DC_LIBS += soc_and_ip_translator
endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 154fd2c18e88..d1471f34e419 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -67,7 +67,9 @@ static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
ATOM_OBJECT *object);
static struct device_id device_type_from_device_id(uint16_t device_id);
static uint32_t signal_to_ss_id(enum as_signal_type signal);
-static uint32_t get_support_mask_for_device_id(struct device_id device_id);
+static uint32_t get_support_mask_for_device_id(
+ enum dal_device_type device_type,
+ uint32_t enum_id);
static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
struct bios_parser *bp,
ATOM_OBJECT *object);
@@ -441,6 +443,7 @@ static enum bp_result get_firmware_info_v1_4(
le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
info->pll_info.max_output_pxl_clk_pll_frequency =
le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
+ info->max_pixel_clock = le16_to_cpu(firmware_info->usMaxPixelClock) * 10;
if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
/* Since there is no information on the SS, report conservative
@@ -497,6 +500,7 @@ static enum bp_result get_firmware_info_v2_1(
info->external_clock_source_frequency_for_dp =
le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10;
info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level;
+ info->max_pixel_clock = le16_to_cpu(firmwareInfo->usMaxPixelClock) * 10;
/* There should be only one entry in the SS info table for Memory Clock
*/
@@ -736,18 +740,94 @@ static enum bp_result bios_parser_transmitter_control(
return bp->cmd_tbl.transmitter_control(bp, cntl);
}
+static enum bp_result bios_parser_select_crtc_source(
+ struct dc_bios *dcb,
+ struct bp_crtc_source_select *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.select_crtc_source)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.select_crtc_source(bp, bp_params);
+}
+
static enum bp_result bios_parser_encoder_control(
struct dc_bios *dcb,
struct bp_encoder_control *cntl)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
+ if (cntl->engine_id == ENGINE_ID_DACA) {
+ if (!bp->cmd_tbl.dac1_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dac1_encoder_control(
+ bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ cntl->pixel_clock, ATOM_DAC1_PS2);
+ } else if (cntl->engine_id == ENGINE_ID_DACB) {
+ if (!bp->cmd_tbl.dac2_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dac2_encoder_control(
+ bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ cntl->pixel_clock, ATOM_DAC1_PS2);
+ }
+
if (!bp->cmd_tbl.dig_encoder_control)
return BP_RESULT_FAILURE;
return bp->cmd_tbl.dig_encoder_control(bp, cntl);
}
+static enum bp_result bios_parser_dac_load_detection(
+ struct dc_bios *dcb,
+ enum engine_id engine_id,
+ enum dal_device_type device_type,
+ uint32_t enum_id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct dc_context *ctx = dcb->ctx;
+ struct bp_load_detection_parameters bp_params = {0};
+ enum bp_result bp_result;
+ uint32_t bios_0_scratch;
+ uint32_t device_id_mask = 0;
+
+ bp_params.engine_id = engine_id;
+ bp_params.device_id = get_support_mask_for_device_id(device_type, enum_id);
+
+ if (engine_id != ENGINE_ID_DACA &&
+ engine_id != ENGINE_ID_DACB)
+ return BP_RESULT_UNSUPPORTED;
+
+ if (!bp->cmd_tbl.dac_load_detection)
+ return BP_RESULT_UNSUPPORTED;
+
+ if (bp_params.device_id == ATOM_DEVICE_CRT1_SUPPORT)
+ device_id_mask = ATOM_S0_CRT1_MASK;
+ else if (bp_params.device_id == ATOM_DEVICE_CRT2_SUPPORT)
+ device_id_mask = ATOM_S0_CRT2_MASK;
+ else
+ return BP_RESULT_UNSUPPORTED;
+
+ /* BIOS will write the detected devices to BIOS_SCRATCH_0, clear corresponding bit */
+ bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0);
+ bios_0_scratch &= ~device_id_mask;
+ dm_write_reg(ctx, bp->base.regs->BIOS_SCRATCH_0, bios_0_scratch);
+
+ bp_result = bp->cmd_tbl.dac_load_detection(bp, &bp_params);
+
+ if (bp_result != BP_RESULT_OK)
+ return bp_result;
+
+ bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0);
+
+ if (bios_0_scratch & device_id_mask)
+ return BP_RESULT_OK;
+
+ return BP_RESULT_FAILURE;
+}
+
static enum bp_result bios_parser_adjust_pixel_clock(
struct dc_bios *dcb,
struct bp_adjust_pixel_clock_parameters *bp_params)
@@ -858,7 +938,7 @@ static bool bios_parser_is_device_id_supported(
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
- uint32_t mask = get_support_mask_for_device_id(id);
+ uint32_t mask = get_support_mask_for_device_id(id.device_type, id.enum_id);
return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0;
}
@@ -2149,11 +2229,10 @@ static uint32_t signal_to_ss_id(enum as_signal_type signal)
return clk_id_ss;
}
-static uint32_t get_support_mask_for_device_id(struct device_id device_id)
+static uint32_t get_support_mask_for_device_id(
+ enum dal_device_type device_type,
+ uint32_t enum_id)
{
- enum dal_device_type device_type = device_id.device_type;
- uint32_t enum_id = device_id.enum_id;
-
switch (device_type) {
case DEVICE_TYPE_LCD:
switch (enum_id) {
@@ -2829,8 +2908,12 @@ static const struct dc_vbios_funcs vbios_funcs = {
.is_device_id_supported = bios_parser_is_device_id_supported,
/* COMMANDS */
+ .select_crtc_source = bios_parser_select_crtc_source,
+
.encoder_control = bios_parser_encoder_control,
+ .dac_load_detection = bios_parser_dac_load_detection,
+
.transmitter_control = bios_parser_transmitter_control,
.enable_crtc = bios_parser_enable_crtc,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 04eb647acc4e..550a9f1d03f8 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1480,10 +1480,10 @@ static enum bp_result get_embedded_panel_info_v2_1(
/* not provided by VBIOS */
info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = 0;
- info->lcd_timing.misc_info.H_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo
- & ATOM_HSYNC_POLARITY);
- info->lcd_timing.misc_info.V_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo
- & ATOM_VSYNC_POLARITY);
+ info->lcd_timing.misc_info.H_SYNC_POLARITY = !(lvds->lcd_timing.miscinfo &
+ ATOM_HSYNC_POLARITY);
+ info->lcd_timing.misc_info.V_SYNC_POLARITY = !(lvds->lcd_timing.miscinfo &
+ ATOM_VSYNC_POLARITY);
/* not provided by VBIOS */
info->lcd_timing.misc_info.VERTICAL_CUT_OFF = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 58e88778da7f..22457f417e65 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -52,7 +52,9 @@ static void init_transmitter_control(struct bios_parser *bp);
static void init_set_pixel_clock(struct bios_parser *bp);
static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp);
static void init_adjust_display_pll(struct bios_parser *bp);
+static void init_select_crtc_source(struct bios_parser *bp);
static void init_dac_encoder_control(struct bios_parser *bp);
+static void init_dac_load_detection(struct bios_parser *bp);
static void init_dac_output_control(struct bios_parser *bp);
static void init_set_crtc_timing(struct bios_parser *bp);
static void init_enable_crtc(struct bios_parser *bp);
@@ -69,7 +71,9 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
init_set_pixel_clock(bp);
init_enable_spread_spectrum_on_ppll(bp);
init_adjust_display_pll(bp);
+ init_select_crtc_source(bp);
init_dac_encoder_control(bp);
+ init_dac_load_detection(bp);
init_dac_output_control(bp);
init_set_crtc_timing(bp);
init_enable_crtc(bp);
@@ -1612,6 +1616,198 @@ static enum bp_result adjust_display_pll_v3(
/*******************************************************************************
********************************************************************************
**
+ ** SELECT CRTC SOURCE
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result select_crtc_source_v1(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+
+static void init_select_crtc_source(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) {
+ case 1:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v1;
+ break;
+ case 2:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
+ break;
+ default:
+ bp->cmd_tbl.select_crtc_source = NULL;
+ break;
+ }
+}
+
+static enum bp_result select_crtc_source_v1(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PS_ALLOCATION params;
+
+ if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &params.ucCRTC))
+ return BP_RESULT_BADINPUT;
+
+ switch (bp_params->engine_id) {
+ case ENGINE_ID_DACA:
+ params.ucDevice = ATOM_DEVICE_CRT1_INDEX;
+ break;
+ case ENGINE_ID_DACB:
+ params.ucDevice = ATOM_DEVICE_CRT2_INDEX;
+ break;
+ default:
+ return BP_RESULT_BADINPUT;
+ }
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static bool select_crtc_source_v2_encoder_id(
+ enum engine_id engine_id, uint8_t *out_encoder_id)
+{
+ uint8_t encoder_id = 0;
+
+ switch (engine_id) {
+ case ENGINE_ID_DIGA:
+ encoder_id = ASIC_INT_DIG1_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGB:
+ encoder_id = ASIC_INT_DIG2_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGC:
+ encoder_id = ASIC_INT_DIG3_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGD:
+ encoder_id = ASIC_INT_DIG4_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGE:
+ encoder_id = ASIC_INT_DIG5_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGF:
+ encoder_id = ASIC_INT_DIG6_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGG:
+ encoder_id = ASIC_INT_DIG7_ENCODER_ID;
+ break;
+ case ENGINE_ID_DACA:
+ encoder_id = ASIC_INT_DAC1_ENCODER_ID;
+ break;
+ case ENGINE_ID_DACB:
+ encoder_id = ASIC_INT_DAC2_ENCODER_ID;
+ break;
+ default:
+ return false;
+ }
+
+ *out_encoder_id = encoder_id;
+ return true;
+}
+
+static bool select_crtc_source_v2_encoder_mode(
+ enum signal_type signal_type, uint8_t *out_encoder_mode)
+{
+ uint8_t encoder_mode = 0;
+
+ switch (signal_type) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ encoder_mode = ATOM_ENCODER_MODE_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ encoder_mode = ATOM_ENCODER_MODE_HDMI;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ encoder_mode = ATOM_ENCODER_MODE_LVDS;
+ break;
+ case SIGNAL_TYPE_RGB:
+ encoder_mode = ATOM_ENCODER_MODE_CRT;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ encoder_mode = ATOM_ENCODER_MODE_DP;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ encoder_mode = ATOM_ENCODER_MODE_DP_MST;
+ break;
+ case SIGNAL_TYPE_EDP:
+ encoder_mode = ATOM_ENCODER_MODE_DP;
+ break;
+ default:
+ return false;
+ }
+
+ *out_encoder_mode = encoder_mode;
+ return true;
+}
+
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
+
+ if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &params.ucCRTC))
+ return BP_RESULT_BADINPUT;
+
+ if (!select_crtc_source_v2_encoder_id(
+ bp_params->engine_id,
+ &params.ucEncoderID))
+ return BP_RESULT_BADINPUT;
+ if (!select_crtc_source_v2_encoder_mode(
+ bp_params->sink_signal,
+ &params.ucEncodeMode))
+ return BP_RESULT_BADINPUT;
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
+
+ if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &params.ucCRTC))
+ return BP_RESULT_BADINPUT;
+
+ if (!select_crtc_source_v2_encoder_id(
+ bp_params->engine_id,
+ &params.ucEncoderID))
+ return BP_RESULT_BADINPUT;
+ if (!select_crtc_source_v2_encoder_mode(
+ bp_params->sink_signal,
+ &params.ucEncodeMode))
+ return BP_RESULT_BADINPUT;
+
+ params.ucDstBpc = bp_params->bit_depth;
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
** DAC ENCODER CONTROL
**
********************************************************************************
@@ -1711,6 +1907,96 @@ static enum bp_result dac2_encoder_control_v1(
/*******************************************************************************
********************************************************************************
**
+ ** DAC LOAD DETECTION
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result dac_load_detection_v1(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params);
+
+static enum bp_result dac_load_detection_v3(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params);
+
+static void init_dac_load_detection(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC_LoadDetection)) {
+ case 1:
+ case 2:
+ bp->cmd_tbl.dac_load_detection = dac_load_detection_v1;
+ break;
+ case 3:
+ default:
+ bp->cmd_tbl.dac_load_detection = dac_load_detection_v3;
+ break;
+ }
+}
+
+static void dac_load_detect_prepare_params(
+ struct _DAC_LOAD_DETECTION_PS_ALLOCATION *params,
+ enum engine_id engine_id,
+ uint16_t device_id,
+ uint8_t misc)
+{
+ uint8_t dac_type = ENGINE_ID_DACA;
+
+ if (engine_id == ENGINE_ID_DACB)
+ dac_type = ATOM_DAC_B;
+
+ params->sDacload.usDeviceID = cpu_to_le16(device_id);
+ params->sDacload.ucDacType = dac_type;
+ params->sDacload.ucMisc = misc;
+}
+
+static enum bp_result dac_load_detection_v1(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_LOAD_DETECTION_PS_ALLOCATION params;
+
+ dac_load_detect_prepare_params(
+ &params,
+ bp_params->engine_id,
+ bp_params->device_id,
+ 0);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result dac_load_detection_v3(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_LOAD_DETECTION_PS_ALLOCATION params;
+
+ uint8_t misc = 0;
+
+ if (bp_params->device_id == ATOM_DEVICE_CV_SUPPORT ||
+ bp_params->device_id == ATOM_DEVICE_TV1_SUPPORT)
+ misc = DAC_LOAD_MISC_YPrPb;
+
+ dac_load_detect_prepare_params(
+ &params,
+ bp_params->engine_id,
+ bp_params->device_id,
+ misc);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
** DAC OUTPUT CONTROL
**
********************************************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
index ad533775e724..e89b1ba0048b 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
@@ -52,6 +52,9 @@ struct cmd_tbl {
enum bp_result (*adjust_display_pll)(
struct bios_parser *bp,
struct bp_adjust_pixel_clock_parameters *bp_params);
+ enum bp_result (*select_crtc_source)(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
enum bp_result (*dac1_encoder_control)(
struct bios_parser *bp,
bool enable,
@@ -68,6 +71,9 @@ struct cmd_tbl {
enum bp_result (*dac2_output_control)(
struct bios_parser *bp,
bool enable);
+ enum bp_result (*dac_load_detection)(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params);
enum bp_result (*set_crtc_timing)(
struct bios_parser *bp,
struct bp_hw_crtc_timing_parameters *bp_params);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 9e63fa72101c..db687a13174d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -509,16 +509,16 @@ void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_b
regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4)
+ if (regs_and_bypass->dppclk_bypass > 4)
regs_and_bypass->dppclk_bypass = 0;
regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4)
+ if (regs_and_bypass->dcfclk_bypass > 4)
regs_and_bypass->dcfclk_bypass = 0;
regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4)
+ if (regs_and_bypass->dispclk_bypass > 4)
regs_and_bypass->dispclk_bypass = 0;
regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4)
+ if (regs_and_bypass->dprefclk_bypass > 4)
regs_and_bypass->dprefclk_bypass = 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index b315ed91e010..3a881451e9da 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -40,7 +40,7 @@
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
-
+#include "reg_helper.h"
#include "logger_types.h"
#undef DC_LOGGER
#define DC_LOGGER \
@@ -48,9 +48,43 @@
#include "link_service.h"
+#define MAX_INSTANCE 7
+#define MAX_SEGMENT 8
+
+struct IP_BASE_INSTANCE {
+ unsigned int segment[MAX_SEGMENT];
+};
+
+struct IP_BASE {
+ struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
+};
+
+static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0, 0, 0 } },
+ { { 0x00016E00, 0x02401C00, 0, 0, 0, 0, 0, 0 } },
+ { { 0x00017000, 0x02402000, 0, 0, 0, 0, 0, 0 } },
+ { { 0x00017200, 0x02402400, 0, 0, 0, 0, 0, 0 } },
+ { { 0x0001B000, 0x0242D800, 0, 0, 0, 0, 0, 0 } },
+ { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } } } };
+
+#define regCLK1_CLK0_CURRENT_CNT 0x0314
+#define regCLK1_CLK0_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK1_CURRENT_CNT 0x0315
+#define regCLK1_CLK1_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK2_CURRENT_CNT 0x0316
+#define regCLK1_CLK2_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK3_CURRENT_CNT 0x0317
+#define regCLK1_CLK3_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK4_CURRENT_CNT 0x0318
+#define regCLK1_CLK4_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK5_CURRENT_CNT 0x0319
+#define regCLK1_CLK5_CURRENT_CNT_BASE_IDX 0
+
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)
+#define REG(reg_name) \
+ (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+
#define UNSUPPORTED_DCFCLK 10000000
#define MIN_DPP_DISP_CLK 100000
@@ -245,9 +279,38 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+static void dcn315_dump_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ // read dtbclk
+ internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
+
+ // read dcfclk
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+
+ // read dppclk
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+
+ // read dprefclk
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+
+ // read dispclk
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+}
+
static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
+ struct dcn35_clk_internal internal = {0};
+
+ dcn315_dump_clk_registers_internal(&internal, clk_mgr_base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+ regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
return;
}
@@ -594,13 +657,32 @@ static struct clk_mgr_funcs dcn315_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn315_update_clocks,
- .init_clocks = dcn31_init_clocks,
+ .init_clocks = dcn315_init_clocks,
.enable_pme_wa = dcn315_enable_pme_wa,
.are_clock_states_equal = dcn31_are_clock_states_equal,
.notify_wm_ranges = dcn315_notify_wm_ranges
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
+void dcn315_init_clocks(struct clk_mgr *clk_mgr)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+ struct clk_mgr_dcn315 *clk_mgr_dcn315 = TO_CLK_MGR_DCN315(clk_mgr_int);
+ struct clk_log_info log_info = {0};
+
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+
+ dcn315_dump_clk_registers(&clk_mgr->boot_snapshot, &clk_mgr_dcn315->base.base, &log_info);
+ clk_mgr->clks.dispclk_khz = clk_mgr->boot_snapshot.dispclk * 1000;
+}
+
void dcn315_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn315 *clk_mgr,
@@ -661,6 +743,7 @@ void dcn315_clk_mgr_construct(
/* Saved clocks configured at boot for debug purposes */
dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
&clk_mgr->base.base, &log_info);
+ clk_mgr->base.base.clks.dispclk_khz = clk_mgr->base.base.boot_snapshot.dispclk * 1000;
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h
index ac36ddf5dd1a..642ae3d4a790 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h
@@ -44,6 +44,7 @@ void dcn315_clk_mgr_construct(struct dc_context *ctx,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg);
+void dcn315_init_clocks(struct clk_mgr *clk_mgr);
void dcn315_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
#endif //__DCN315_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index b11383fba35f..dfd0c9505af0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -394,6 +394,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
new_clocks->ref_dtbclk_khz = 600000;
+ else if (!new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000)
+ new_clocks->ref_dtbclk_khz = 0;
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
@@ -435,7 +437,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT);
- if (actual_dtbclk) {
+ if (actual_dtbclk > 590000) {
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
@@ -633,16 +635,16 @@ static void dcn35_save_clk_registers(struct clk_state_registers_and_bypass *regs
regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4)
+ if (regs_and_bypass->dppclk_bypass > 4)
regs_and_bypass->dppclk_bypass = 0;
regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4)
+ if (regs_and_bypass->dcfclk_bypass > 4)
regs_and_bypass->dcfclk_bypass = 0;
regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4)
+ if (regs_and_bypass->dispclk_bypass > 4)
regs_and_bypass->dispclk_bypass = 0;
regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4)
+ if (regs_and_bypass->dprefclk_bypass > 4)
regs_and_bypass->dprefclk_bypass = 0;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
@@ -1293,6 +1295,35 @@ static void dcn35_update_clocks_fpga(struct clk_mgr *clk_mgr,
dcn35_update_clocks_update_dtb_dto(clk_mgr_int, context, clk_mgr->clks.ref_dtbclk_khz);
}
+static unsigned int dcn35_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ unsigned int num_clk_levels;
+
+ switch (clk_type) {
+ case CLK_TYPE_DISPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return num_clk_levels ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dispclk;
+ case CLK_TYPE_DPPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
+ return num_clk_levels ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dppclk;
+ case CLK_TYPE_DSCCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return num_clk_levels ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 :
+ clk_mgr->base.boot_snapshot.dispclk / 3;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static struct clk_mgr_funcs dcn35_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
@@ -1304,6 +1335,7 @@ static struct clk_mgr_funcs dcn35_funcs = {
.set_low_power_state = dcn35_set_low_power_state,
.exit_low_power_state = dcn35_exit_low_power_state,
.is_ips_supported = dcn35_is_ips_supported,
+ .get_max_clock_khz = dcn35_get_max_clock_khz,
};
struct clk_mgr_funcs dcn35_fpga_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5f2d5638c819..8be9cbd43e18 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -83,7 +83,7 @@
#include "hw_sequencer_private.h"
#if defined(CONFIG_DRM_AMD_DC_FP)
-#include "dml2/dml2_internal_types.h"
+#include "dml2_0/dml2_internal_types.h"
#include "soc_and_ip_translator.h"
#endif
@@ -148,10 +148,16 @@ static const char DC_BUILD_ID[] = "production-build";
/* Private functions */
-static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
+static inline void elevate_update_type(
+ struct surface_update_descriptor *descriptor,
+ enum surface_update_type new_type,
+ enum dc_lock_descriptor new_locks
+)
{
- if (new > *original)
- *original = new;
+ if (new_type > descriptor->update_type)
+ descriptor->update_type = new_type;
+
+ descriptor->lock_descriptor |= new_locks;
}
static void destroy_links(struct dc *dc)
@@ -297,6 +303,7 @@ static bool create_links(
link->link_id.id = CONNECTOR_ID_VIRTUAL;
link->link_id.enum_id = ENUM_ID_1;
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED;
link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
if (!link->link_enc) {
@@ -493,9 +500,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
1,
*adjust);
stream->adjust.timing_adjust_pending = false;
+
+ if (dc->hwss.notify_cursor_offload_drr_update)
+ dc->hwss.notify_cursor_offload_drr_update(dc, dc->current_state, stream);
+
return true;
}
}
+
return false;
}
@@ -1143,8 +1155,8 @@ static bool dc_construct(struct dc *dc,
/* set i2c speed if not done by the respective dcnxxx__resource.c */
if (dc->caps.i2c_speed_in_khz_hdcp == 0)
dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
- if (dc->caps.max_optimizable_video_width == 0)
- dc->caps.max_optimizable_video_width = 5120;
+ if (dc->check_config.max_optimizable_video_width == 0)
+ dc->check_config.max_optimizable_video_width = 5120;
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
@@ -2135,6 +2147,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (!dcb->funcs->is_accelerated_mode(dcb)) {
disable_vbios_mode_if_required(dc, context);
dc->hwss.enable_accelerated_mode(dc, context);
+ } else if (get_seamless_boot_stream_count(dc->current_state) > 0) {
+ /* If the previous Stream still retains the apply seamless boot flag,
+ * it means the OS has not actually performed a flip yet.
+ * At this point, if we receive dc_commit_streams again, we should
+ * once more check whether the actual HW timing matches what the OS
+ * has provided
+ */
+ disable_vbios_mode_if_required(dc, context);
}
if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) {
@@ -2158,8 +2178,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
*/
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, true);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, true);
if (dc->hwss.update_dsc_pg)
dc->hwss.update_dsc_pg(dc, context, false);
@@ -2188,8 +2208,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
}
+ for (i = 0; i < dc->current_state->stream_count; i++)
+ dc_dmub_srv_control_cursor_offload(dc, dc->current_state, dc->current_state->streams[i], false);
+
result = dc->hwss.apply_ctx_to_hw(dc, context);
+ for (i = 0; i < context->stream_count; i++)
+ dc_dmub_srv_control_cursor_offload(dc, context, context->streams[i], true);
+
if (result != DC_OK) {
/* Application of dc_state to hardware stopped. */
dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
@@ -2229,8 +2255,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.commit_subvp_config(dc, context);
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
@@ -2645,47 +2671,50 @@ static bool is_surface_in_context(
return false;
}
-static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u)
+static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
- enum surface_update_type update_type = UPDATE_TYPE_FAST;
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (!u->plane_info)
- return UPDATE_TYPE_FAST;
+ return update_type;
+
+ // `plane_info` present means at least `STREAM` lock is required
+ elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
if (u->plane_info->color_space != u->surface->color_space) {
update_flags->bits.color_space_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
update_flags->bits.horizontal_mirror_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->rotation != u->surface->rotation) {
update_flags->bits.rotation_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->format != u->surface->format) {
update_flags->bits.pixel_format_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->stereo_format != u->surface->stereo_format) {
update_flags->bits.stereo_format_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
update_flags->bits.per_pixel_alpha_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
update_flags->bits.global_alpha_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->dcc.enable != u->surface->dcc.enable
@@ -2697,7 +2726,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
* recalculate stutter period.
*/
update_flags->bits.dcc_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (resource_pixel_format_to_bpp(u->plane_info->format) !=
@@ -2706,30 +2735,41 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
* and DML calculation
*/
update_flags->bits.bpp_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
|| u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
update_flags->bits.plane_size_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
+ const struct dc_tiling_info *tiling = &u->plane_info->tiling_info;
- if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
- sizeof(struct dc_tiling_info)) != 0) {
+ if (memcmp(tiling, &u->surface->tiling_info, sizeof(*tiling)) != 0) {
update_flags->bits.swizzle_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
-
- /* todo: below are HW dependent, we should add a hook to
- * DCE/N resource and validated there.
- */
- if (!dc->debug.skip_full_updated_if_possible) {
- /* swizzled mode requires RQ to be setup properly,
- * thus need to run DML to calculate RQ settings
- */
- update_flags->bits.bandwidth_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+
+ switch (tiling->gfxversion) {
+ case DcGfxVersion9:
+ case DcGfxVersion10:
+ case DcGfxVersion11:
+ if (tiling->gfx9.swizzle != DC_SW_LINEAR) {
+ update_flags->bits.bandwidth_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
+ break;
+ case DcGfxAddr3:
+ if (tiling->gfx_addr3.swizzle != DC_ADDR3_SW_LINEAR) {
+ update_flags->bits.bandwidth_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
+ break;
+ case DcGfxVersion7:
+ case DcGfxVersion8:
+ case DcGfxVersionUnknown:
+ default:
+ break;
}
}
@@ -2737,14 +2777,18 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
return update_type;
}
-static enum surface_update_type get_scaling_info_update_type(
- const struct dc *dc,
+static struct surface_update_descriptor get_scaling_info_update_type(
+ const struct dc_check_config *check_config,
const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (!u->scaling_info)
- return UPDATE_TYPE_FAST;
+ return update_type;
+
+ // `scaling_info` present means at least `STREAM` lock is required
+ elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
if (u->scaling_info->src_rect.width != u->surface->src_rect.width
|| u->scaling_info->src_rect.height != u->surface->src_rect.height
@@ -2755,6 +2799,7 @@ static enum surface_update_type get_scaling_info_update_type(
|| u->scaling_info->scaling_quality.integer_scaling !=
u->surface->scaling_quality.integer_scaling) {
update_flags->bits.scaling_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
if (u->scaling_info->src_rect.width > u->surface->src_rect.width
|| u->scaling_info->src_rect.height > u->surface->src_rect.height)
@@ -2768,7 +2813,7 @@ static enum surface_update_type get_scaling_info_update_type(
/* Making dst rect smaller requires a bandwidth change */
update_flags->bits.bandwidth_change = 1;
- if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
+ if (u->scaling_info->src_rect.width > check_config->max_optimizable_video_width &&
(u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
/* Changing clip size of a large surface may result in MPC slice count change */
@@ -2780,123 +2825,109 @@ static enum surface_update_type get_scaling_info_update_type(
|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
|| u->scaling_info->clip_rect.y != u->surface->clip_rect.y
|| u->scaling_info->dst_rect.x != u->surface->dst_rect.x
- || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
+ || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) {
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
update_flags->bits.position_change = 1;
+ }
- /* process every update flag before returning */
- if (update_flags->bits.clock_change
- || update_flags->bits.bandwidth_change
- || update_flags->bits.scaling_change)
- return UPDATE_TYPE_FULL;
-
- if (update_flags->bits.position_change)
- return UPDATE_TYPE_MED;
-
- return UPDATE_TYPE_FAST;
+ return update_type;
}
-static enum surface_update_type det_surface_update(const struct dc *dc,
- const struct dc_surface_update *u)
+static struct surface_update_descriptor det_surface_update(
+ const struct dc_check_config *check_config,
+ struct dc_surface_update *u)
{
- const struct dc_state *context = dc->current_state;
- enum surface_update_type type;
- enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
union surface_update_flags *update_flags = &u->surface->update_flags;
- if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
+ if (u->surface->force_full_update) {
update_flags->raw = 0xFFFFFFFF;
- return UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ return overall_type;
}
update_flags->raw = 0; // Reset all flags
- type = get_plane_info_update_type(dc, u);
- elevate_update_type(&overall_type, type);
+ struct surface_update_descriptor inner_type = get_plane_info_update_type(u);
+
+ elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
- type = get_scaling_info_update_type(dc, u);
- elevate_update_type(&overall_type, type);
+ inner_type = get_scaling_info_update_type(check_config, u);
+ elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
if (u->flip_addr) {
update_flags->bits.addr_update = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+
if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
update_flags->bits.tmz_changed = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
}
- if (u->in_transfer_func)
+ if (u->in_transfer_func) {
update_flags->bits.in_transfer_func_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->input_csc_color_matrix)
+ if (u->input_csc_color_matrix) {
update_flags->bits.input_csc_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->coeff_reduction_factor)
+ if (u->coeff_reduction_factor) {
update_flags->bits.coeff_reduction_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->gamut_remap_matrix)
+ if (u->gamut_remap_matrix) {
update_flags->bits.gamut_remap_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->blend_tf)
+ if (u->blend_tf || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) {
update_flags->bits.gamma_change = 1;
-
- if (u->gamma) {
- enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
-
- if (u->plane_info)
- format = u->plane_info->format;
- else
- format = u->surface->format;
-
- if (dce_use_lut(format))
- update_flags->bits.gamma_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
- if (u->lut3d_func || u->func_shaper)
+ if (u->lut3d_func || u->func_shaper) {
update_flags->bits.lut_3d = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
if (u->hdr_mult.value)
if (u->hdr_mult.value != u->surface->hdr_mult.value) {
+ // TODO: Should be fast?
update_flags->bits.hdr_mult = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_MED);
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->sdr_white_level_nits)
if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) {
+ // TODO: Should be fast?
update_flags->bits.sdr_white_level_nits = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->cm2_params) {
- if ((u->cm2_params->component_settings.shaper_3dlut_setting
- != u->surface->mcm_shaper_3dlut_setting)
- || (u->cm2_params->component_settings.lut1d_enable
- != u->surface->mcm_lut1d_enable))
- update_flags->bits.mcm_transfer_function_enable_change = 1;
- if (u->cm2_params->cm2_luts.lut3d_data.lut3d_src
- != u->surface->mcm_luts.lut3d_data.lut3d_src)
+ if (u->cm2_params->component_settings.shaper_3dlut_setting != u->surface->mcm_shaper_3dlut_setting
+ || u->cm2_params->component_settings.lut1d_enable != u->surface->mcm_lut1d_enable
+ || u->cm2_params->cm2_luts.lut3d_data.lut3d_src != u->surface->mcm_luts.lut3d_data.lut3d_src) {
update_flags->bits.mcm_transfer_function_enable_change = 1;
- }
- if (update_flags->bits.in_transfer_func_change) {
- type = UPDATE_TYPE_MED;
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
}
if (update_flags->bits.lut_3d &&
u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
- type = UPDATE_TYPE_FULL;
- elevate_update_type(&overall_type, type);
- }
- if (update_flags->bits.mcm_transfer_function_enable_change) {
- type = UPDATE_TYPE_FULL;
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
- if (dc->debug.enable_legacy_fast_update &&
+ if (check_config->enable_legacy_fast_update &&
(update_flags->bits.gamma_change ||
update_flags->bits.gamut_remap_change ||
update_flags->bits.input_csc_change ||
update_flags->bits.coeff_reduction_change)) {
- type = UPDATE_TYPE_FULL;
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
return overall_type;
}
@@ -2924,32 +2955,26 @@ static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_upda
}
}
-static enum surface_update_type check_update_surfaces_for_stream(
- struct dc *dc,
+static struct surface_update_descriptor check_update_surfaces_for_stream(
+ const struct dc_check_config *check_config,
struct dc_surface_update *updates,
int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status)
+ struct dc_stream_update *stream_update)
{
- int i;
- enum surface_update_type overall_type = UPDATE_TYPE_FAST;
-
- if (dc->idle_optimizations_allowed || dc_can_clear_cursor_limit(dc))
- overall_type = UPDATE_TYPE_FULL;
-
- if (stream_status == NULL || stream_status->plane_count != surface_count)
- overall_type = UPDATE_TYPE_FULL;
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (stream_update && stream_update->pending_test_pattern) {
- overall_type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (stream_update && stream_update->hw_cursor_req) {
- overall_type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
/* some stream updates require passive update */
if (stream_update) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+
union stream_update_flags *su_flags = &stream_update->stream->update_flags;
if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
@@ -2957,14 +2982,16 @@ static enum surface_update_type check_update_surfaces_for_stream(
stream_update->integer_scaling_update)
su_flags->bits.scaling = 1;
- if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
+ if (check_config->enable_legacy_fast_update && stream_update->out_transfer_func)
su_flags->bits.out_tf = 1;
if (stream_update->abm_level)
su_flags->bits.abm_level = 1;
- if (stream_update->dpms_off)
+ if (stream_update->dpms_off) {
su_flags->bits.dpms_off = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL | LOCK_DESCRIPTOR_LINK);
+ }
if (stream_update->gamut_remap)
su_flags->bits.gamut_remap = 1;
@@ -2992,24 +3019,27 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->output_color_space)
su_flags->bits.out_csc = 1;
- if (su_flags->raw != 0)
- overall_type = UPDATE_TYPE_FULL;
+ // TODO: Make each elevation explicit, as to not override fast stream in crct_timing_adjust
+ if (su_flags->raw)
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
- if (stream_update->output_csc_transform)
+ // Non-global cases
+ if (stream_update->output_csc_transform) {
su_flags->bits.out_csc = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- /* Output transfer function changes do not require bandwidth recalculation,
- * so don't trigger a full update
- */
- if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
+ if (!check_config->enable_legacy_fast_update && stream_update->out_transfer_func) {
su_flags->bits.out_tf = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
}
- for (i = 0 ; i < surface_count; i++) {
- enum surface_update_type type =
- det_surface_update(dc, &updates[i]);
+ for (int i = 0 ; i < surface_count; i++) {
+ struct surface_update_descriptor inner_type =
+ det_surface_update(check_config, &updates[i]);
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
}
return overall_type;
@@ -3020,44 +3050,18 @@ static enum surface_update_type check_update_surfaces_for_stream(
*
* See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
*/
-enum surface_update_type dc_check_update_surfaces_for_stream(
- struct dc *dc,
+struct surface_update_descriptor dc_check_update_surfaces_for_stream(
+ const struct dc_check_config *check_config,
struct dc_surface_update *updates,
int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status)
+ struct dc_stream_update *stream_update)
{
- int i;
- enum surface_update_type type;
-
if (stream_update)
stream_update->stream->update_flags.raw = 0;
- for (i = 0; i < surface_count; i++)
+ for (size_t i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0;
- type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
- if (type == UPDATE_TYPE_FULL) {
- if (stream_update) {
- uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
- stream_update->stream->update_flags.raw = 0xFFFFFFFF;
- stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
- }
- for (i = 0; i < surface_count; i++)
- updates[i].surface->update_flags.raw = 0xFFFFFFFF;
- }
-
- if (type == UPDATE_TYPE_FAST) {
- // If there's an available clock comparator, we use that.
- if (dc->clk_mgr->funcs->are_clock_states_equal) {
- if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
- dc->optimized_required = true;
- // Else we fallback to mem compare.
- } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
- dc->optimized_required = true;
- }
- }
-
- return type;
+ return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update);
}
static struct dc_stream_status *stream_get_status(
@@ -3388,7 +3392,11 @@ static void restore_planes_and_stream_state(
for (i = 0; i < status->plane_count; i++) {
dc_plane_copy_config(status->plane_states[i], &scratch->plane_states[i]);
}
+
+ // refcount is persistent
+ struct kref temp_refcount = stream->refcount;
*stream = scratch->stream_state;
+ stream->refcount = temp_refcount;
}
/**
@@ -3426,6 +3434,13 @@ static void update_seamless_boot_flags(struct dc *dc,
}
}
+static bool full_update_required_weak(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
+ int surface_count,
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream);
+
/**
* update_planes_and_stream_state() - The function takes planes and stream
* updates as inputs and determines the appropriate update type. If update type
@@ -3472,7 +3487,10 @@ static bool update_planes_and_stream_state(struct dc *dc,
context = dc->current_state;
update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
+ &dc->check_config, srf_updates, surface_count, stream_update).update_type;
+ if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
+ update_type = UPDATE_TYPE_FULL;
+
/* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
* E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
* Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
@@ -3504,6 +3522,16 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
}
+ if (update_type == UPDATE_TYPE_FULL) {
+ if (stream_update) {
+ uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
+ stream_update->stream->update_flags.raw = 0xFFFFFFFF;
+ stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
+ }
+ for (i = 0; i < surface_count; i++)
+ srf_updates[i].surface->update_flags.raw = 0xFFFFFFFF;
+ }
+
if (update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
@@ -4149,7 +4177,7 @@ static void commit_planes_for_stream(struct dc *dc,
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
- if (should_use_dmub_lock(stream->link)) {
+ if (should_use_dmub_inbox1_lock(dc, stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -4176,16 +4204,16 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, true);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, true);
dc->hwss.interdependent_update_lock(dc, context, true);
} else {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, true);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, true);
/* Lock the top pipe while updating plane addrs, since freesync requires
* plane addr update event triggers to be synchronized.
@@ -4228,9 +4256,8 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
-
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
return;
}
@@ -4419,7 +4446,7 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg,
CRTC_STATE_VACTIVE);
- if (should_use_dmub_lock(stream->link)) {
+ if (should_use_dmub_inbox1_lock(dc, stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -4467,13 +4494,13 @@ static void commit_planes_for_stream(struct dc *dc,
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
} else {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
}
// Fire manual trigger only when bottom plane is flipped
@@ -4489,6 +4516,8 @@ static void commit_planes_for_stream(struct dc *dc,
pipe_ctx->plane_state->skip_manual_trigger)
continue;
+ if (dc->hwss.program_cursor_offload_now)
+ dc->hwss.program_cursor_offload_now(dc, pipe_ctx);
if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
}
@@ -4994,7 +5023,7 @@ void populate_fast_updates(struct dc_fast_update *fast_update,
}
}
-static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
+static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count)
{
int i;
@@ -5035,18 +5064,44 @@ bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_
return false;
}
-static bool full_update_required(struct dc *dc,
- struct dc_surface_update *srf_updates,
+static bool full_update_required_weak(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
int surface_count,
- struct dc_stream_update *stream_update,
- struct dc_stream_state *stream)
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
{
-
- int i;
- struct dc_stream_status *stream_status;
const struct dc_state *context = dc->current_state;
+ if (srf_updates)
+ for (int i = 0; i < surface_count; i++)
+ if (!is_surface_in_context(context, srf_updates[i].surface))
+ return true;
- for (i = 0; i < surface_count; i++) {
+ if (stream) {
+ const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream);
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ return true;
+ }
+ if (dc->idle_optimizations_allowed)
+ return true;
+
+ if (dc_can_clear_cursor_limit(dc))
+ return true;
+
+ return false;
+}
+
+static bool full_update_required(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
+ int surface_count,
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
+{
+ if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
+ return true;
+
+ for (int i = 0; i < surface_count; i++) {
if (srf_updates &&
(srf_updates[i].plane_info ||
srf_updates[i].scaling_info ||
@@ -5062,8 +5117,7 @@ static bool full_update_required(struct dc *dc,
srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
(srf_updates[i].cm2_params &&
(srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting ||
- srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)) ||
- !is_surface_in_context(context, srf_updates[i].surface)))
+ srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable))))
return true;
}
@@ -5099,26 +5153,16 @@ static bool full_update_required(struct dc *dc,
stream_update->hw_cursor_req))
return true;
- if (stream) {
- stream_status = dc_stream_get_status(stream);
- if (stream_status == NULL || stream_status->plane_count != surface_count)
- return true;
- }
- if (dc->idle_optimizations_allowed)
- return true;
-
- if (dc_can_clear_cursor_limit(dc))
- return true;
-
return false;
}
-static bool fast_update_only(struct dc *dc,
- struct dc_fast_update *fast_update,
- struct dc_surface_update *srf_updates,
+static bool fast_update_only(
+ const struct dc *dc,
+ const struct dc_fast_update *fast_update,
+ const struct dc_surface_update *srf_updates,
int surface_count,
- struct dc_stream_update *stream_update,
- struct dc_stream_state *stream)
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
{
return fast_updates_exist(fast_update, surface_count)
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
@@ -5181,7 +5225,7 @@ static bool update_planes_and_stream_v2(struct dc *dc,
commit_minimal_transition_state_in_dc_update(dc, context, stream,
srf_updates, surface_count);
- if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
+ if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -5224,7 +5268,7 @@ static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
stream_update);
if (fast_update_only(dc, fast_update, srf_updates, surface_count,
stream_update, stream) &&
- !dc->debug.enable_legacy_fast_update)
+ !dc->check_config.enable_legacy_fast_update)
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -5350,7 +5394,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
* specially handle compatibility problems with transitions among those
* features as they are now transparent to the new sequence.
*/
- if (dc->ctx->dce_version >= DCN_VERSION_4_01)
+ if (dc->ctx->dce_version >= DCN_VERSION_4_01 || dc->ctx->dce_version == DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_21)
ret = update_planes_and_stream_v3(dc, srf_updates,
surface_count, stream, stream_update);
else
@@ -5935,6 +5980,101 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
return true;
}
+bool dc_smart_power_oled_enable(const struct dc_link *link, bool enable, uint16_t peak_nits,
+ uint8_t debug_control, uint16_t fixed_CLL, uint32_t triggerline)
+{
+ bool status = false;
+ struct dc *dc = link->ctx->dc;
+ union dmub_rb_cmd cmd;
+ uint8_t otg_inst = 0;
+ unsigned int panel_inst = 0;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+ int i = 0;
+
+ // get panel_inst
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return status;
+
+ // get otg_inst
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx &&
+ res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ pipe_ctx = &res_ctx->pipe_ctx[i];
+ //TODO: refactor for multi edp support
+ break;
+ }
+ }
+
+ if (pipe_ctx)
+ otg_inst = pipe_ctx->stream_res.tg->inst;
+
+ // before enable smart power OLED, we need to call set pipe for DMUB to set ABM config
+ if (enable) {
+ if (dc->hwss.set_pipe && pipe_ctx)
+ dc->hwss.set_pipe(pipe_ctx);
+ }
+
+ // fill in cmd
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.smart_power_oled_enable.header.type = DMUB_CMD__SMART_POWER_OLED;
+ cmd.smart_power_oled_enable.header.sub_type = DMUB_CMD__SMART_POWER_OLED_ENABLE;
+ cmd.smart_power_oled_enable.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_smart_power_oled_enable_data) - sizeof(struct dmub_cmd_header);
+ cmd.smart_power_oled_enable.header.ret_status = 1;
+ cmd.smart_power_oled_enable.data.enable = enable;
+ cmd.smart_power_oled_enable.data.panel_inst = panel_inst;
+ cmd.smart_power_oled_enable.data.peak_nits = peak_nits;
+ cmd.smart_power_oled_enable.data.otg_inst = otg_inst;
+ cmd.smart_power_oled_enable.data.digfe_inst = link->link_enc->preferred_engine;
+ cmd.smart_power_oled_enable.data.digbe_inst = link->link_enc->transmitter;
+
+ cmd.smart_power_oled_enable.data.debugcontrol = debug_control;
+ cmd.smart_power_oled_enable.data.triggerline = triggerline;
+ cmd.smart_power_oled_enable.data.fixed_max_cll = fixed_CLL;
+
+ // send cmd
+ status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return status;
+}
+
+bool dc_smart_power_oled_get_max_cll(const struct dc_link *link, unsigned int *pCurrent_MaxCLL)
+{
+ struct dc *dc = link->ctx->dc;
+ union dmub_rb_cmd cmd;
+ bool status = false;
+ unsigned int panel_inst = 0;
+
+ // get panel_inst
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return status;
+
+ // fill in cmd
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.smart_power_oled_getmaxcll.header.type = DMUB_CMD__SMART_POWER_OLED;
+ cmd.smart_power_oled_getmaxcll.header.sub_type = DMUB_CMD__SMART_POWER_OLED_GETMAXCLL;
+ cmd.smart_power_oled_getmaxcll.header.payload_bytes = sizeof(cmd.smart_power_oled_getmaxcll.data);
+ cmd.smart_power_oled_getmaxcll.header.ret_status = 1;
+
+ cmd.smart_power_oled_getmaxcll.data.input.panel_inst = panel_inst;
+
+ // send cmd and wait for reply
+ status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+
+ if (status)
+ *pCurrent_MaxCLL = cmd.smart_power_oled_getmaxcll.data.output.current_max_cll;
+ else
+ *pCurrent_MaxCLL = 0;
+
+ return status;
+}
+
uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
uint8_t dpia_port_index)
{
@@ -6349,7 +6489,7 @@ bool dc_is_cursor_limit_pending(struct dc *dc)
return false;
}
-bool dc_can_clear_cursor_limit(struct dc *dc)
+bool dc_can_clear_cursor_limit(const struct dc *dc)
{
uint32_t i;
@@ -6378,3 +6518,576 @@ void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst,
if (dc->hwss.get_underflow_debug_data)
dc->hwss.get_underflow_debug_data(dc, tg, out_data);
}
+
+void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst,
+ struct power_features *out_data)
+{
+ out_data->uclk_p_state = dc->current_state->clk_mgr->clks.p_state_change_support;
+ out_data->fams = dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching;
+}
+
+bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state)
+{
+ struct dc_state *context;
+ struct resource_context *res_ctx;
+ int i;
+
+ if (!dc || !dc->current_state || !state) {
+ if (state)
+ state->state_valid = false;
+ return false;
+ }
+
+ /* Initialize the state structure */
+ memset(state, 0, sizeof(struct dc_register_software_state));
+
+ context = dc->current_state;
+ res_ctx = &context->res_ctx;
+
+ /* Count active pipes and streams */
+ state->active_pipe_count = 0;
+ state->active_stream_count = context->stream_count;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (res_ctx->pipe_ctx[i].stream)
+ state->active_pipe_count++;
+ }
+
+ /* Capture HUBP programming state for each pipe */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ state->hubp[i].valid_stream = false;
+ if (!pipe_ctx->stream)
+ continue;
+
+ state->hubp[i].valid_stream = true;
+
+ /* HUBP register programming variables */
+ if (pipe_ctx->stream_res.tg)
+ state->hubp[i].vtg_sel = pipe_ctx->stream_res.tg->inst;
+
+ state->hubp[i].hubp_clock_enable = (pipe_ctx->plane_res.hubp != NULL) ? 1 : 0;
+
+ state->hubp[i].valid_plane_state = false;
+ if (pipe_ctx->plane_state) {
+ state->hubp[i].valid_plane_state = true;
+ state->hubp[i].surface_pixel_format = pipe_ctx->plane_state->format;
+ state->hubp[i].rotation_angle = pipe_ctx->plane_state->rotation;
+ state->hubp[i].h_mirror_en = pipe_ctx->plane_state->horizontal_mirror ? 1 : 0;
+
+ /* Surface size */
+ if (pipe_ctx->plane_state->plane_size.surface_size.width > 0) {
+ state->hubp[i].surface_size_width = pipe_ctx->plane_state->plane_size.surface_size.width;
+ state->hubp[i].surface_size_height = pipe_ctx->plane_state->plane_size.surface_size.height;
+ }
+
+ /* Viewport dimensions from scaler data */
+ if (pipe_ctx->plane_state->src_rect.width > 0) {
+ state->hubp[i].pri_viewport_width = pipe_ctx->plane_state->src_rect.width;
+ state->hubp[i].pri_viewport_height = pipe_ctx->plane_state->src_rect.height;
+ state->hubp[i].pri_viewport_x_start = pipe_ctx->plane_state->src_rect.x;
+ state->hubp[i].pri_viewport_y_start = pipe_ctx->plane_state->src_rect.y;
+ }
+
+ /* DCC settings */
+ state->hubp[i].surface_dcc_en = (pipe_ctx->plane_state->dcc.enable) ? 1 : 0;
+ state->hubp[i].surface_dcc_ind_64b_blk = pipe_ctx->plane_state->dcc.independent_64b_blks;
+ state->hubp[i].surface_dcc_ind_128b_blk = pipe_ctx->plane_state->dcc.dcc_ind_blk;
+
+ /* Surface pitch */
+ state->hubp[i].surface_pitch = pipe_ctx->plane_state->plane_size.surface_pitch;
+ state->hubp[i].meta_pitch = pipe_ctx->plane_state->dcc.meta_pitch;
+ state->hubp[i].chroma_pitch = pipe_ctx->plane_state->plane_size.chroma_pitch;
+ state->hubp[i].meta_pitch_c = pipe_ctx->plane_state->dcc.meta_pitch_c;
+
+ /* Surface addresses - primary */
+ state->hubp[i].primary_surface_address_low = pipe_ctx->plane_state->address.grph.addr.low_part;
+ state->hubp[i].primary_surface_address_high = pipe_ctx->plane_state->address.grph.addr.high_part;
+ state->hubp[i].primary_meta_surface_address_low = pipe_ctx->plane_state->address.grph.meta_addr.low_part;
+ state->hubp[i].primary_meta_surface_address_high = pipe_ctx->plane_state->address.grph.meta_addr.high_part;
+
+ /* TMZ settings */
+ state->hubp[i].primary_surface_tmz = pipe_ctx->plane_state->address.tmz_surface;
+ state->hubp[i].primary_meta_surface_tmz = pipe_ctx->plane_state->address.tmz_surface;
+
+ /* Tiling configuration */
+ state->hubp[i].min_dc_gfx_version9 = false;
+ if (pipe_ctx->plane_state->tiling_info.gfxversion >= DcGfxVersion9) {
+ state->hubp[i].min_dc_gfx_version9 = true;
+ state->hubp[i].sw_mode = pipe_ctx->plane_state->tiling_info.gfx9.swizzle;
+ state->hubp[i].num_pipes = pipe_ctx->plane_state->tiling_info.gfx9.num_pipes;
+ state->hubp[i].num_banks = pipe_ctx->plane_state->tiling_info.gfx9.num_banks;
+ state->hubp[i].pipe_interleave = pipe_ctx->plane_state->tiling_info.gfx9.pipe_interleave;
+ state->hubp[i].num_shader_engines = pipe_ctx->plane_state->tiling_info.gfx9.num_shader_engines;
+ state->hubp[i].num_rb_per_se = pipe_ctx->plane_state->tiling_info.gfx9.num_rb_per_se;
+ state->hubp[i].num_pkrs = pipe_ctx->plane_state->tiling_info.gfx9.num_pkrs;
+ }
+ }
+
+ /* DML Request Size Configuration */
+ if (pipe_ctx->rq_regs.rq_regs_l.chunk_size > 0) {
+ state->hubp[i].rq_chunk_size = pipe_ctx->rq_regs.rq_regs_l.chunk_size;
+ state->hubp[i].rq_min_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_chunk_size;
+ state->hubp[i].rq_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size;
+ state->hubp[i].rq_min_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size;
+ state->hubp[i].rq_dpte_group_size = pipe_ctx->rq_regs.rq_regs_l.dpte_group_size;
+ state->hubp[i].rq_mpte_group_size = pipe_ctx->rq_regs.rq_regs_l.mpte_group_size;
+ state->hubp[i].rq_swath_height_l = pipe_ctx->rq_regs.rq_regs_l.swath_height;
+ state->hubp[i].rq_pte_row_height_l = pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear;
+ }
+
+ /* Chroma request size configuration */
+ if (pipe_ctx->rq_regs.rq_regs_c.chunk_size > 0) {
+ state->hubp[i].rq_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.chunk_size;
+ state->hubp[i].rq_min_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_chunk_size;
+ state->hubp[i].rq_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.meta_chunk_size;
+ state->hubp[i].rq_min_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_meta_chunk_size;
+ state->hubp[i].rq_dpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.dpte_group_size;
+ state->hubp[i].rq_mpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.mpte_group_size;
+ state->hubp[i].rq_swath_height_c = pipe_ctx->rq_regs.rq_regs_c.swath_height;
+ state->hubp[i].rq_pte_row_height_c = pipe_ctx->rq_regs.rq_regs_c.pte_row_height_linear;
+ }
+
+ /* DML expansion modes */
+ state->hubp[i].drq_expansion_mode = pipe_ctx->rq_regs.drq_expansion_mode;
+ state->hubp[i].prq_expansion_mode = pipe_ctx->rq_regs.prq_expansion_mode;
+ state->hubp[i].mrq_expansion_mode = pipe_ctx->rq_regs.mrq_expansion_mode;
+ state->hubp[i].crq_expansion_mode = pipe_ctx->rq_regs.crq_expansion_mode;
+
+ /* DML DLG parameters - nominal */
+ state->hubp[i].dst_y_per_vm_vblank = pipe_ctx->dlg_regs.dst_y_per_vm_vblank;
+ state->hubp[i].dst_y_per_row_vblank = pipe_ctx->dlg_regs.dst_y_per_row_vblank;
+ state->hubp[i].dst_y_per_vm_flip = pipe_ctx->dlg_regs.dst_y_per_vm_flip;
+ state->hubp[i].dst_y_per_row_flip = pipe_ctx->dlg_regs.dst_y_per_row_flip;
+
+ /* DML prefetch settings */
+ state->hubp[i].dst_y_prefetch = pipe_ctx->dlg_regs.dst_y_prefetch;
+ state->hubp[i].vratio_prefetch = pipe_ctx->dlg_regs.vratio_prefetch;
+ state->hubp[i].vratio_prefetch_c = pipe_ctx->dlg_regs.vratio_prefetch_c;
+
+ /* TTU parameters */
+ state->hubp[i].qos_level_low_wm = pipe_ctx->ttu_regs.qos_level_low_wm;
+ state->hubp[i].qos_level_high_wm = pipe_ctx->ttu_regs.qos_level_high_wm;
+ state->hubp[i].qos_level_flip = pipe_ctx->ttu_regs.qos_level_flip;
+ state->hubp[i].min_ttu_vblank = pipe_ctx->ttu_regs.min_ttu_vblank;
+ }
+
+ /* Capture HUBBUB programming state */
+ if (dc->res_pool->hubbub) {
+ /* Individual DET buffer sizes - software state variables that program DET registers */
+ for (i = 0; i < 4 && i < dc->res_pool->pipe_count; i++) {
+ uint32_t det_size = res_ctx->pipe_ctx[i].det_buffer_size_kb;
+ switch (i) {
+ case 0:
+ state->hubbub.det0_size = det_size;
+ break;
+ case 1:
+ state->hubbub.det1_size = det_size;
+ break;
+ case 2:
+ state->hubbub.det2_size = det_size;
+ break;
+ case 3:
+ state->hubbub.det3_size = det_size;
+ break;
+ }
+ }
+
+ /* Compression buffer configuration - software state that programs COMPBUF_SIZE register */
+ // TODO: Handle logic for legacy DCN pre-DCN401
+ state->hubbub.compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
+ }
+
+ /* Capture DPP programming state for each pipe */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ state->dpp[i].dpp_clock_enable = (pipe_ctx->plane_res.dpp != NULL) ? 1 : 0;
+
+ if (pipe_ctx->plane_state && pipe_ctx->plane_res.scl_data.recout.width > 0) {
+ /* Access dscl_prog_data directly - this contains the actual software state used for register programming */
+ struct dscl_prog_data *dscl_data = &pipe_ctx->plane_res.scl_data.dscl_prog_data;
+
+ /* Recout (Rectangle of Interest) configuration - software state that programs RECOUT registers */
+ state->dpp[i].recout_start_x = dscl_data->recout.x;
+ state->dpp[i].recout_start_y = dscl_data->recout.y;
+ state->dpp[i].recout_width = dscl_data->recout.width;
+ state->dpp[i].recout_height = dscl_data->recout.height;
+
+ /* MPC (Multiple Pipe/Plane Combiner) size - software state that programs MPC_SIZE registers */
+ state->dpp[i].mpc_width = dscl_data->mpc_size.width;
+ state->dpp[i].mpc_height = dscl_data->mpc_size.height;
+
+ /* DSCL mode - software state that programs SCL_MODE registers */
+ state->dpp[i].dscl_mode = dscl_data->dscl_mode;
+
+ /* Scaler ratios - software state that programs scale ratio registers (use actual programmed ratios) */
+ state->dpp[i].horz_ratio_int = dscl_data->ratios.h_scale_ratio >> 19; // Extract integer part from programmed ratio
+ state->dpp[i].vert_ratio_int = dscl_data->ratios.v_scale_ratio >> 19; // Extract integer part from programmed ratio
+
+ /* Basic scaler taps - software state that programs tap control registers (use actual programmed taps) */
+ state->dpp[i].h_taps = dscl_data->taps.h_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back
+ state->dpp[i].v_taps = dscl_data->taps.v_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back
+ }
+ }
+
+ /* Capture essential clock state for underflow analysis */
+ if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz > 0) {
+ /* Core display clocks affecting bandwidth and timing */
+ state->dccg.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
+
+ /* Per-pipe clock configuration - only capture what's essential */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+ if (pipe_ctx->stream) {
+ /* Essential clocks that directly affect underflow risk */
+ state->dccg.dppclk_khz[i] = dc->clk_mgr->clks.dppclk_khz;
+ state->dccg.pixclk_khz[i] = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ state->dccg.dppclk_enable[i] = 1;
+
+ /* DP stream clock only for DP signals */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ state->dccg.dpstreamclk_enable[i] = 1;
+ } else {
+ state->dccg.dpstreamclk_enable[i] = 0;
+ }
+ } else {
+ /* Inactive pipe - no clocks */
+ state->dccg.dppclk_khz[i] = 0;
+ state->dccg.pixclk_khz[i] = 0;
+ state->dccg.dppclk_enable[i] = 0;
+ if (i < 4) {
+ state->dccg.dpstreamclk_enable[i] = 0;
+ }
+ }
+ }
+
+ /* DSC clock state - only when actually using DSC */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = (i < dc->res_pool->pipe_count) ? &res_ctx->pipe_ctx[i] : NULL;
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) {
+ state->dccg.dscclk_khz[i] = 400000; /* Typical DSC clock frequency */
+ } else {
+ state->dccg.dscclk_khz[i] = 0;
+ }
+ }
+
+ /* SYMCLK32 LE Control - only the essential HPO state for underflow analysis */
+ for (i = 0; i < 2; i++) {
+ state->dccg.symclk32_le_enable[i] = 0; /* Default: disabled */
+ }
+
+ }
+
+ /* Capture essential DSC configuration for underflow analysis */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) {
+ /* DSC is enabled - capture essential configuration */
+ state->dsc[i].dsc_clock_enable = 1;
+
+ /* DSC configuration affecting bandwidth and timing */
+ struct dc_dsc_config *dsc_cfg = &pipe_ctx->stream->timing.dsc_cfg;
+ state->dsc[i].dsc_num_slices_h = dsc_cfg->num_slices_h;
+ state->dsc[i].dsc_num_slices_v = dsc_cfg->num_slices_v;
+ state->dsc[i].dsc_bits_per_pixel = dsc_cfg->bits_per_pixel;
+
+ /* OPP pipe source for DSC forwarding */
+ if (pipe_ctx->stream_res.opp) {
+ state->dsc[i].dscrm_dsc_forward_enable = 1;
+ state->dsc[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst;
+ } else {
+ state->dsc[i].dscrm_dsc_forward_enable = 0;
+ state->dsc[i].dscrm_dsc_opp_pipe_source = 0;
+ }
+ } else {
+ /* DSC not enabled - clear all fields */
+ memset(&state->dsc[i], 0, sizeof(state->dsc[i]));
+ }
+ }
+
+ /* Capture MPC programming state - comprehensive register field coverage */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->plane_state && pipe_ctx->stream) {
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+
+ /* MPCC blending tree and mode control - capture actual blend configuration */
+ state->mpc.mpcc_mode[i] = (plane_state->blend_tf.type != TF_TYPE_BYPASS) ? 1 : 0;
+ state->mpc.mpcc_alpha_blend_mode[i] = plane_state->per_pixel_alpha ? 1 : 0;
+ state->mpc.mpcc_alpha_multiplied_mode[i] = plane_state->pre_multiplied_alpha ? 1 : 0;
+ state->mpc.mpcc_blnd_active_overlap_only[i] = 0; /* Default - no overlap restriction */
+ state->mpc.mpcc_global_alpha[i] = plane_state->global_alpha_value;
+ state->mpc.mpcc_global_gain[i] = plane_state->global_alpha ? 255 : 0;
+ state->mpc.mpcc_bg_bpc[i] = 8; /* Standard 8-bit background */
+ state->mpc.mpcc_bot_gain_mode[i] = 0; /* Standard gain mode */
+
+ /* MPCC blending tree connections - capture tree topology */
+ if (pipe_ctx->bottom_pipe) {
+ state->mpc.mpcc_bot_sel[i] = pipe_ctx->bottom_pipe->pipe_idx;
+ } else {
+ state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */
+ }
+ state->mpc.mpcc_top_sel[i] = pipe_ctx->pipe_idx; /* This pipe's DPP ID */
+
+ /* MPCC output gamma control - capture gamma programming */
+ if (plane_state->gamma_correction.type != GAMMA_CS_TFM_1D && plane_state->gamma_correction.num_entries > 0) {
+ state->mpc.mpcc_ogam_mode[i] = 1; /* Gamma enabled */
+ state->mpc.mpcc_ogam_select[i] = 0; /* Bank A selection */
+ state->mpc.mpcc_ogam_pwl_disable[i] = 0; /* PWL enabled */
+ } else {
+ state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass mode */
+ state->mpc.mpcc_ogam_select[i] = 0;
+ state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */
+ }
+
+ /* MPCC pipe assignment and operational status */
+ if (pipe_ctx->stream_res.opp) {
+ state->mpc.mpcc_opp_id[i] = pipe_ctx->stream_res.opp->inst;
+ } else {
+ state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */
+ }
+
+ /* MPCC status indicators - active pipe state */
+ state->mpc.mpcc_idle[i] = 0; /* Active pipe - not idle */
+ state->mpc.mpcc_busy[i] = 1; /* Active pipe - busy processing */
+
+ } else {
+ /* Pipe not active - set disabled/idle state for all fields */
+ state->mpc.mpcc_mode[i] = 0;
+ state->mpc.mpcc_alpha_blend_mode[i] = 0;
+ state->mpc.mpcc_alpha_multiplied_mode[i] = 0;
+ state->mpc.mpcc_blnd_active_overlap_only[i] = 0;
+ state->mpc.mpcc_global_alpha[i] = 0;
+ state->mpc.mpcc_global_gain[i] = 0;
+ state->mpc.mpcc_bg_bpc[i] = 0;
+ state->mpc.mpcc_bot_gain_mode[i] = 0;
+ state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */
+ state->mpc.mpcc_top_sel[i] = 0xF; /* No top connection */
+ state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass */
+ state->mpc.mpcc_ogam_select[i] = 0;
+ state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */
+ state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */
+ state->mpc.mpcc_idle[i] = 1; /* Idle */
+ state->mpc.mpcc_busy[i] = 0; /* Not busy */
+ }
+ }
+
+ /* Capture OPP programming state for each pipe - comprehensive register field coverage */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.opp) {
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+
+ /* OPP Pipe Control */
+ state->opp[i].opp_pipe_clock_enable = 1; /* Active pipe has clock enabled */
+
+ /* Display Pattern Generator (DPG) Control - 19 fields */
+ if (pipe_ctx->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
+ state->opp[i].dpg_enable = 1;
+ } else {
+ /* Video mode - DPG disabled */
+ state->opp[i].dpg_enable = 0;
+ }
+
+ /* Format Control (FMT) - 18 fields */
+ state->opp[i].fmt_pixel_encoding = timing->pixel_encoding;
+
+ /* Chroma subsampling mode based on pixel encoding */
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ state->opp[i].fmt_subsampling_mode = 1; /* 4:2:0 subsampling */
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ state->opp[i].fmt_subsampling_mode = 2; /* 4:2:2 subsampling */
+ } else {
+ state->opp[i].fmt_subsampling_mode = 0; /* No subsampling (4:4:4) */
+ }
+
+ state->opp[i].fmt_cbcr_bit_reduction_bypass = (timing->pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ state->opp[i].fmt_stereosync_override = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0;
+
+ /* Dithering control based on bit depth */
+ if (timing->display_color_depth < COLOR_DEPTH_121212) {
+ state->opp[i].fmt_spatial_dither_frame_counter_max = 15; /* Typical frame counter max */
+ state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0; /* No bit swapping */
+ state->opp[i].fmt_spatial_dither_enable = 1;
+ state->opp[i].fmt_spatial_dither_mode = 0; /* Spatial dithering mode */
+ state->opp[i].fmt_spatial_dither_depth = timing->display_color_depth;
+ state->opp[i].fmt_temporal_dither_enable = 0; /* Spatial dithering preferred */
+ } else {
+ state->opp[i].fmt_spatial_dither_frame_counter_max = 0;
+ state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0;
+ state->opp[i].fmt_spatial_dither_enable = 0;
+ state->opp[i].fmt_spatial_dither_mode = 0;
+ state->opp[i].fmt_spatial_dither_depth = 0;
+ state->opp[i].fmt_temporal_dither_enable = 0;
+ }
+
+ /* Truncation control for bit depth reduction */
+ if (timing->display_color_depth < COLOR_DEPTH_121212) {
+ state->opp[i].fmt_truncate_enable = 1;
+ state->opp[i].fmt_truncate_depth = timing->display_color_depth;
+ state->opp[i].fmt_truncate_mode = 0; /* Round mode */
+ } else {
+ state->opp[i].fmt_truncate_enable = 0;
+ state->opp[i].fmt_truncate_depth = 0;
+ state->opp[i].fmt_truncate_mode = 0;
+ }
+
+ /* Data clamping control */
+ state->opp[i].fmt_clamp_data_enable = 1; /* Clamping typically enabled */
+ state->opp[i].fmt_clamp_color_format = timing->pixel_encoding;
+
+ /* Dynamic expansion for limited range content */
+ if (timing->pixel_encoding != PIXEL_ENCODING_RGB) {
+ state->opp[i].fmt_dynamic_exp_enable = 1; /* YCbCr typically needs expansion */
+ state->opp[i].fmt_dynamic_exp_mode = 0; /* Standard expansion */
+ } else {
+ state->opp[i].fmt_dynamic_exp_enable = 0; /* RGB typically full range */
+ state->opp[i].fmt_dynamic_exp_mode = 0;
+ }
+
+ /* Legacy field for compatibility */
+ state->opp[i].fmt_bit_depth_control = timing->display_color_depth;
+
+ /* Output Buffer (OPPBUF) Control - 6 fields */
+ state->opp[i].oppbuf_active_width = timing->h_addressable;
+ state->opp[i].oppbuf_pixel_repetition = 0; /* No pixel repetition by default */
+
+ /* Multi-Stream Output (MSO) / ODM segmentation */
+ if (pipe_ctx->next_odm_pipe) {
+ state->opp[i].oppbuf_display_segmentation = 1; /* Segmented display */
+ state->opp[i].oppbuf_overlap_pixel_num = 0; /* ODM overlap pixels */
+ } else {
+ state->opp[i].oppbuf_display_segmentation = 0; /* Single segment */
+ state->opp[i].oppbuf_overlap_pixel_num = 0;
+ }
+
+ /* 3D/Stereo control */
+ if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) {
+ state->opp[i].oppbuf_3d_vact_space1_size = 30; /* Typical stereo blanking */
+ state->opp[i].oppbuf_3d_vact_space2_size = 30;
+ } else {
+ state->opp[i].oppbuf_3d_vact_space1_size = 0;
+ state->opp[i].oppbuf_3d_vact_space2_size = 0;
+ }
+
+ /* DSC Forward Config - 3 fields */
+ if (timing->dsc_cfg.num_slices_h > 0) {
+ state->opp[i].dscrm_dsc_forward_enable = 1;
+ state->opp[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst;
+ state->opp[i].dscrm_dsc_forward_enable_status = 1; /* Status follows enable */
+ } else {
+ state->opp[i].dscrm_dsc_forward_enable = 0;
+ state->opp[i].dscrm_dsc_opp_pipe_source = 0;
+ state->opp[i].dscrm_dsc_forward_enable_status = 0;
+ }
+ } else {
+ /* No OPP resource - set all fields to disabled state */
+ memset(&state->opp[i], 0, sizeof(state->opp[i]));
+ }
+ }
+
+ /* Capture OPTC programming state for each pipe - comprehensive register field coverage */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.tg) {
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+
+ state->optc[i].otg_master_inst = pipe_ctx->stream_res.tg->inst;
+
+ /* OTG_CONTROL register - 5 fields */
+ state->optc[i].otg_master_enable = 1; /* Active stream */
+ state->optc[i].otg_disable_point_cntl = 0; /* Normal operation */
+ state->optc[i].otg_start_point_cntl = 0; /* Normal start */
+ state->optc[i].otg_field_number_cntl = (timing->flags.INTERLACE) ? 1 : 0;
+ state->optc[i].otg_out_mux = 0; /* Direct output */
+
+ /* OTG Horizontal Timing - 7 fields */
+ state->optc[i].otg_h_total = timing->h_total;
+ state->optc[i].otg_h_blank_start = timing->h_addressable;
+ state->optc[i].otg_h_blank_end = timing->h_total - timing->h_front_porch;
+ state->optc[i].otg_h_sync_start = timing->h_addressable + timing->h_front_porch;
+ state->optc[i].otg_h_sync_end = timing->h_addressable + timing->h_front_porch + timing->h_sync_width;
+ state->optc[i].otg_h_sync_polarity = timing->flags.HSYNC_POSITIVE_POLARITY ? 0 : 1;
+ state->optc[i].otg_h_timing_div_mode = (pipe_ctx->next_odm_pipe) ? 1 : 0; /* ODM divide mode */
+
+ /* OTG Vertical Timing - 7 fields */
+ state->optc[i].otg_v_total = timing->v_total;
+ state->optc[i].otg_v_blank_start = timing->v_addressable;
+ state->optc[i].otg_v_blank_end = timing->v_total - timing->v_front_porch;
+ state->optc[i].otg_v_sync_start = timing->v_addressable + timing->v_front_porch;
+ state->optc[i].otg_v_sync_end = timing->v_addressable + timing->v_front_porch + timing->v_sync_width;
+ state->optc[i].otg_v_sync_polarity = timing->flags.VSYNC_POSITIVE_POLARITY ? 0 : 1;
+ state->optc[i].otg_v_sync_mode = 0; /* Normal sync mode */
+
+ /* Initialize remaining core fields with appropriate defaults */
+ // TODO: Update logic for accurate vtotal min/max
+ state->optc[i].otg_v_total_max = timing->v_total + 100; /* Typical DRR range */
+ state->optc[i].otg_v_total_min = timing->v_total - 50;
+ state->optc[i].otg_v_total_mid = timing->v_total;
+
+ /* ODM configuration */
+ // TODO: Update logic to have complete ODM mappings (e.g. 3:1 and 4:1) stored in single pipe
+ if (pipe_ctx->next_odm_pipe) {
+ state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0;
+ state->optc[i].optc_seg1_src_sel = pipe_ctx->next_odm_pipe->stream_res.opp ? pipe_ctx->next_odm_pipe->stream_res.opp->inst : 0;
+ state->optc[i].optc_num_of_input_segment = 1; /* 2 segments - 1 */
+ } else {
+ state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0;
+ state->optc[i].optc_seg1_src_sel = 0;
+ state->optc[i].optc_num_of_input_segment = 0; /* Single segment */
+ }
+
+ /* DSC configuration */
+ if (timing->dsc_cfg.num_slices_h > 0) {
+ state->optc[i].optc_dsc_mode = 1; /* DSC enabled */
+ state->optc[i].optc_dsc_bytes_per_pixel = timing->dsc_cfg.bits_per_pixel / 16; /* Convert to bytes */
+ state->optc[i].optc_dsc_slice_width = timing->h_addressable / timing->dsc_cfg.num_slices_h;
+ } else {
+ state->optc[i].optc_dsc_mode = 0;
+ state->optc[i].optc_dsc_bytes_per_pixel = 0;
+ state->optc[i].optc_dsc_slice_width = 0;
+ }
+
+ /* Essential control fields */
+ state->optc[i].otg_stereo_enable = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0;
+ state->optc[i].otg_interlace_enable = timing->flags.INTERLACE ? 1 : 0;
+ state->optc[i].otg_clock_enable = 1; /* OTG clock enabled */
+ state->optc[i].vtg0_enable = 1; /* VTG enabled for timing generation */
+
+ /* Initialize other key fields to defaults */
+ state->optc[i].optc_input_pix_clk_en = 1;
+ state->optc[i].optc_segment_width = (pipe_ctx->next_odm_pipe) ? (timing->h_addressable / 2) : timing->h_addressable;
+ state->optc[i].otg_vready_offset = 1;
+ state->optc[i].otg_vstartup_start = timing->v_addressable + 10;
+ state->optc[i].otg_vupdate_offset = 0;
+ state->optc[i].otg_vupdate_width = 5;
+ } else {
+ /* No timing generator resource - initialize all fields to 0 */
+ memset(&state->optc[i], 0, sizeof(state->optc[i]));
+ }
+ }
+
+ state->state_valid = true;
+ return true;
+}
+
+void dc_log_preos_dmcub_info(const struct dc *dc)
+{
+ dc_dmub_srv_log_preos_dmcub_info(dc->ctx->dmub_srv);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index d82b1cb467f4..e2763b60482a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -32,6 +32,13 @@
#include "resource.h"
#include "dc_dmub_srv.h"
#include "dc_state_priv.h"
+#include "opp.h"
+#include "dsc.h"
+#include "dchubbub.h"
+#include "dccg.h"
+#include "abm.h"
+#include "dcn10/dcn10_hubbub.h"
+#include "dce/dmub_hw_lock_mgr.h"
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
#define MAX_NUM_MCACHE 8
@@ -258,7 +265,7 @@ void color_space_to_black_color(
black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
break;
- /**
+ /*
* Remove default and add case for all color space
* so when we forget to add new color space
* compiler will give a warning
@@ -755,11 +762,13 @@ void hwss_build_fast_sequence(struct dc *dc,
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
- if (dc->hwss.fams2_global_control_lock_fast) {
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = true;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
- block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST;
+ if (dc->hwss.dmub_hw_control_lock_fast) {
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = true;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required =
+ dc_state_is_fams2_in_use(dc, context) ||
+ dmub_hw_lock_mgr_does_link_require_lock(dc, stream->link);
+ block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST;
(*num_steps)++;
}
if (dc->hwss.pipe_control_lock) {
@@ -784,7 +793,7 @@ void hwss_build_fast_sequence(struct dc *dc,
while (current_mpc_pipe) {
if (current_mpc_pipe->plane_state) {
if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state->update_flags.raw) {
- block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.set_flip_control_gsl_params.hubp = current_mpc_pipe->plane_res.hubp;
block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate;
block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL;
(*num_steps)++;
@@ -894,11 +903,11 @@ void hwss_build_fast_sequence(struct dc *dc,
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
- if (dc->hwss.fams2_global_control_lock_fast) {
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = false;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
- block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST;
+ if (dc->hwss.dmub_hw_control_lock_fast) {
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = false;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
+ block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST;
(*num_steps)++;
}
@@ -911,6 +920,13 @@ void hwss_build_fast_sequence(struct dc *dc,
current_mpc_pipe->stream && current_mpc_pipe->plane_state &&
current_mpc_pipe->plane_state->update_flags.bits.addr_update &&
!current_mpc_pipe->plane_state->skip_manual_trigger) {
+ if (dc->hwss.program_cursor_offload_now) {
+ block_sequence[*num_steps].params.program_cursor_update_now_params.dc = dc;
+ block_sequence[*num_steps].params.program_cursor_update_now_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = PROGRAM_CURSOR_UPDATE_NOW;
+ (*num_steps)++;
+ }
+
block_sequence[*num_steps].params.program_manual_trigger_params.pipe_ctx = current_mpc_pipe;
block_sequence[*num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER;
(*num_steps)++;
@@ -942,8 +958,9 @@ void hwss_execute_sequence(struct dc *dc,
params->pipe_control_lock_params.lock);
break;
case HUBP_SET_FLIP_CONTROL_GSL:
- dc->hwss.set_flip_control_gsl(params->set_flip_control_gsl_params.pipe_ctx,
- params->set_flip_control_gsl_params.flip_immediate);
+ params->set_flip_control_gsl_params.hubp->funcs->hubp_set_flip_control_surface_gsl(
+ params->set_flip_control_gsl_params.hubp,
+ params->set_flip_control_gsl_params.flip_immediate);
break;
case HUBP_PROGRAM_TRIPLEBUFFER:
dc->hwss.program_triplebuffer(params->program_triplebuffer_params.dc,
@@ -1001,8 +1018,301 @@ void hwss_execute_sequence(struct dc *dc,
params->wait_for_dcc_meta_propagation_params.dc,
params->wait_for_dcc_meta_propagation_params.top_pipe_to_program);
break;
- case DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST:
- dc->hwss.fams2_global_control_lock_fast(params);
+ case DMUB_HW_CONTROL_LOCK_FAST:
+ dc->hwss.dmub_hw_control_lock_fast(params);
+ break;
+ case HUBP_PROGRAM_SURFACE_CONFIG:
+ hwss_program_surface_config(params);
+ break;
+ case HUBP_PROGRAM_MCACHE_ID:
+ hwss_program_mcache_id_and_split_coordinate(params);
+ break;
+ case PROGRAM_CURSOR_UPDATE_NOW:
+ dc->hwss.program_cursor_offload_now(
+ params->program_cursor_update_now_params.dc,
+ params->program_cursor_update_now_params.pipe_ctx);
+ break;
+ case HUBP_WAIT_PIPE_READ_START:
+ params->hubp_wait_pipe_read_start_params.hubp->funcs->hubp_wait_pipe_read_start(
+ params->hubp_wait_pipe_read_start_params.hubp);
+ break;
+ case HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM:
+ dc->hwss.apply_update_flags_for_phantom(params->apply_update_flags_for_phantom_params.pipe_ctx);
+ break;
+ case HWS_UPDATE_PHANTOM_VP_POSITION:
+ dc->hwss.update_phantom_vp_position(params->update_phantom_vp_position_params.dc,
+ params->update_phantom_vp_position_params.context,
+ params->update_phantom_vp_position_params.pipe_ctx);
+ break;
+ case OPTC_SET_ODM_COMBINE:
+ hwss_set_odm_combine(params);
+ break;
+ case OPTC_SET_ODM_BYPASS:
+ hwss_set_odm_bypass(params);
+ break;
+ case OPP_PIPE_CLOCK_CONTROL:
+ hwss_opp_pipe_clock_control(params);
+ break;
+ case OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL:
+ hwss_opp_program_left_edge_extra_pixel(params);
+ break;
+ case DCCG_SET_DTO_DSCCLK:
+ hwss_dccg_set_dto_dscclk(params);
+ break;
+ case DSC_SET_CONFIG:
+ hwss_dsc_set_config(params);
+ break;
+ case DSC_ENABLE:
+ hwss_dsc_enable(params);
+ break;
+ case TG_SET_DSC_CONFIG:
+ hwss_tg_set_dsc_config(params);
+ break;
+ case DSC_DISCONNECT:
+ hwss_dsc_disconnect(params);
+ break;
+ case DSC_READ_STATE:
+ hwss_dsc_read_state(params);
+ break;
+ case DSC_CALCULATE_AND_SET_CONFIG:
+ hwss_dsc_calculate_and_set_config(params);
+ break;
+ case DSC_ENABLE_WITH_OPP:
+ hwss_dsc_enable_with_opp(params);
+ break;
+ case TG_PROGRAM_GLOBAL_SYNC:
+ hwss_tg_program_global_sync(params);
+ break;
+ case TG_WAIT_FOR_STATE:
+ hwss_tg_wait_for_state(params);
+ break;
+ case TG_SET_VTG_PARAMS:
+ hwss_tg_set_vtg_params(params);
+ break;
+ case TG_SETUP_VERTICAL_INTERRUPT2:
+ hwss_tg_setup_vertical_interrupt2(params);
+ break;
+ case DPP_SET_HDR_MULTIPLIER:
+ hwss_dpp_set_hdr_multiplier(params);
+ break;
+ case HUBP_PROGRAM_DET_SIZE:
+ hwss_program_det_size(params);
+ break;
+ case HUBP_PROGRAM_DET_SEGMENTS:
+ hwss_program_det_segments(params);
+ break;
+ case OPP_SET_DYN_EXPANSION:
+ hwss_opp_set_dyn_expansion(params);
+ break;
+ case OPP_PROGRAM_FMT:
+ hwss_opp_program_fmt(params);
+ break;
+ case OPP_PROGRAM_BIT_DEPTH_REDUCTION:
+ hwss_opp_program_bit_depth_reduction(params);
+ break;
+ case OPP_SET_DISP_PATTERN_GENERATOR:
+ hwss_opp_set_disp_pattern_generator(params);
+ break;
+ case ABM_SET_PIPE:
+ hwss_set_abm_pipe(params);
+ break;
+ case ABM_SET_LEVEL:
+ hwss_set_abm_level(params);
+ break;
+ case ABM_SET_IMMEDIATE_DISABLE:
+ hwss_set_abm_immediate_disable(params);
+ break;
+ case MPC_REMOVE_MPCC:
+ hwss_mpc_remove_mpcc(params);
+ break;
+ case OPP_SET_MPCC_DISCONNECT_PENDING:
+ hwss_opp_set_mpcc_disconnect_pending(params);
+ break;
+ case DC_SET_OPTIMIZED_REQUIRED:
+ hwss_dc_set_optimized_required(params);
+ break;
+ case HUBP_DISCONNECT:
+ hwss_hubp_disconnect(params);
+ break;
+ case HUBBUB_FORCE_PSTATE_CHANGE_CONTROL:
+ hwss_hubbub_force_pstate_change_control(params);
+ break;
+ case TG_ENABLE_CRTC:
+ hwss_tg_enable_crtc(params);
+ break;
+ case TG_SET_GSL:
+ hwss_tg_set_gsl(params);
+ break;
+ case TG_SET_GSL_SOURCE_SELECT:
+ hwss_tg_set_gsl_source_select(params);
+ break;
+ case HUBP_WAIT_FLIP_PENDING:
+ hwss_hubp_wait_flip_pending(params);
+ break;
+ case TG_WAIT_DOUBLE_BUFFER_PENDING:
+ hwss_tg_wait_double_buffer_pending(params);
+ break;
+ case UPDATE_FORCE_PSTATE:
+ hwss_update_force_pstate(params);
+ break;
+ case HUBBUB_APPLY_DEDCN21_147_WA:
+ hwss_hubbub_apply_dedcn21_147_wa(params);
+ break;
+ case HUBBUB_ALLOW_SELF_REFRESH_CONTROL:
+ hwss_hubbub_allow_self_refresh_control(params);
+ break;
+ case TG_GET_FRAME_COUNT:
+ hwss_tg_get_frame_count(params);
+ break;
+ case MPC_SET_DWB_MUX:
+ hwss_mpc_set_dwb_mux(params);
+ break;
+ case MPC_DISABLE_DWB_MUX:
+ hwss_mpc_disable_dwb_mux(params);
+ break;
+ case MCIF_WB_CONFIG_BUF:
+ hwss_mcif_wb_config_buf(params);
+ break;
+ case MCIF_WB_CONFIG_ARB:
+ hwss_mcif_wb_config_arb(params);
+ break;
+ case MCIF_WB_ENABLE:
+ hwss_mcif_wb_enable(params);
+ break;
+ case MCIF_WB_DISABLE:
+ hwss_mcif_wb_disable(params);
+ break;
+ case DWBC_ENABLE:
+ hwss_dwbc_enable(params);
+ break;
+ case DWBC_DISABLE:
+ hwss_dwbc_disable(params);
+ break;
+ case DWBC_UPDATE:
+ hwss_dwbc_update(params);
+ break;
+ case HUBP_UPDATE_MALL_SEL:
+ hwss_hubp_update_mall_sel(params);
+ break;
+ case HUBP_PREPARE_SUBVP_BUFFERING:
+ hwss_hubp_prepare_subvp_buffering(params);
+ break;
+ case HUBP_SET_BLANK_EN:
+ hwss_hubp_set_blank_en(params);
+ break;
+ case HUBP_DISABLE_CONTROL:
+ hwss_hubp_disable_control(params);
+ break;
+ case HUBBUB_SOFT_RESET:
+ hwss_hubbub_soft_reset(params);
+ break;
+ case HUBP_CLK_CNTL:
+ hwss_hubp_clk_cntl(params);
+ break;
+ case HUBP_INIT:
+ hwss_hubp_init(params);
+ break;
+ case HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS:
+ hwss_hubp_set_vm_system_aperture_settings(params);
+ break;
+ case HUBP_SET_FLIP_INT:
+ hwss_hubp_set_flip_int(params);
+ break;
+ case DPP_DPPCLK_CONTROL:
+ hwss_dpp_dppclk_control(params);
+ break;
+ case DISABLE_PHANTOM_CRTC:
+ hwss_disable_phantom_crtc(params);
+ break;
+ case DSC_PG_STATUS:
+ hwss_dsc_pg_status(params);
+ break;
+ case DSC_WAIT_DISCONNECT_PENDING_CLEAR:
+ hwss_dsc_wait_disconnect_pending_clear(params);
+ break;
+ case DSC_DISABLE:
+ hwss_dsc_disable(params);
+ break;
+ case DCCG_SET_REF_DSCCLK:
+ hwss_dccg_set_ref_dscclk(params);
+ break;
+ case DPP_PG_CONTROL:
+ hwss_dpp_pg_control(params);
+ break;
+ case HUBP_PG_CONTROL:
+ hwss_hubp_pg_control(params);
+ break;
+ case HUBP_RESET:
+ hwss_hubp_reset(params);
+ break;
+ case DPP_RESET:
+ hwss_dpp_reset(params);
+ break;
+ case DPP_ROOT_CLOCK_CONTROL:
+ hwss_dpp_root_clock_control(params);
+ break;
+ case DC_IP_REQUEST_CNTL:
+ hwss_dc_ip_request_cntl(params);
+ break;
+ case DCCG_UPDATE_DPP_DTO:
+ hwss_dccg_update_dpp_dto(params);
+ break;
+ case HUBP_VTG_SEL:
+ hwss_hubp_vtg_sel(params);
+ break;
+ case HUBP_SETUP2:
+ hwss_hubp_setup2(params);
+ break;
+ case HUBP_SETUP:
+ hwss_hubp_setup(params);
+ break;
+ case HUBP_SET_UNBOUNDED_REQUESTING:
+ hwss_hubp_set_unbounded_requesting(params);
+ break;
+ case HUBP_SETUP_INTERDEPENDENT2:
+ hwss_hubp_setup_interdependent2(params);
+ break;
+ case HUBP_SETUP_INTERDEPENDENT:
+ hwss_hubp_setup_interdependent(params);
+ break;
+ case DPP_SET_CURSOR_MATRIX:
+ hwss_dpp_set_cursor_matrix(params);
+ break;
+ case MPC_UPDATE_BLENDING:
+ hwss_mpc_update_blending(params);
+ break;
+ case MPC_ASSERT_IDLE_MPCC:
+ hwss_mpc_assert_idle_mpcc(params);
+ break;
+ case MPC_INSERT_PLANE:
+ hwss_mpc_insert_plane(params);
+ break;
+ case DPP_SET_SCALER:
+ hwss_dpp_set_scaler(params);
+ break;
+ case HUBP_MEM_PROGRAM_VIEWPORT:
+ hwss_hubp_mem_program_viewport(params);
+ break;
+ case ABORT_CURSOR_OFFLOAD_UPDATE:
+ hwss_abort_cursor_offload_update(params);
+ break;
+ case SET_CURSOR_ATTRIBUTE:
+ hwss_set_cursor_attribute(params);
+ break;
+ case SET_CURSOR_POSITION:
+ hwss_set_cursor_position(params);
+ break;
+ case SET_CURSOR_SDR_WHITE_LEVEL:
+ hwss_set_cursor_sdr_white_level(params);
+ break;
+ case PROGRAM_OUTPUT_CSC:
+ hwss_program_output_csc(params);
+ break;
+ case HUBP_SET_BLANK:
+ hwss_hubp_set_blank(params);
+ break;
+ case PHANTOM_HUBP_POST_ENABLE:
+ hwss_phantom_hubp_post_enable(params);
break;
default:
ASSERT(false);
@@ -1011,6 +1321,338 @@ void hwss_execute_sequence(struct dc *dc,
}
}
+/*
+ * Helper function to add OPTC pipe control lock to block sequence
+ */
+void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool lock)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.lock = lock;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_PIPE_CONTROL_LOCK;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP set flip control GSL to block sequence
+ */
+void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool flip_immediate)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.flip_immediate = flip_immediate;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_CONTROL_GSL;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP program triplebuffer to block sequence
+ */
+void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enableTripleBuffer)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.enableTripleBuffer = enableTripleBuffer;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP update plane address to block sequence
+ */
+void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_PLANE_ADDR;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP set input transfer function to block sequence
+ */
+void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.plane_state = plane_state;
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP program gamut remap to block sequence
+ */
+void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_gamut_remap_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_GAMUT_REMAP;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP program bias and scale to block sequence
+ */
+void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state, struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_bias_and_scale_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPTC program manual trigger to block sequence
+ */
+void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_manual_trigger_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP set output transfer function to block sequence
+ */
+void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.stream = stream;
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC update visual confirm to block sequence
+ */
+void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.mpcc_id = mpcc_id;
+ seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_VISUAL_CONFIRM;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC power on MPC mem PWR to block sequence
+ */
+void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int mpcc_id,
+ bool power_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpcc_id = mpcc_id;
+ seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.power_on = power_on;
+ seq_state->steps[*seq_state->num_steps].func = MPC_POWER_ON_MPC_MEM_PWR;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC set output CSC to block sequence
+ */
+void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int opp_id,
+ const uint16_t *regval,
+ enum mpc_output_csc_mode ocsc_mode)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.opp_id = opp_id;
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.regval = regval;
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.ocsc_mode = ocsc_mode;
+ seq_state->steps[*seq_state->num_steps].func = MPC_SET_OUTPUT_CSC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC set OCSC default to block sequence
+ */
+void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int opp_id,
+ enum dc_color_space colorspace,
+ enum mpc_output_csc_mode ocsc_mode)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.opp_id = opp_id;
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.color_space = colorspace;
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.ocsc_mode = ocsc_mode;
+ seq_state->steps[*seq_state->num_steps].func = MPC_SET_OCSC_DEFAULT;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DMUB send DMCUB command to block sequence
+ */
+void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state,
+ struct dc_context *ctx,
+ union dmub_rb_cmd *cmd,
+ enum dm_dmub_wait_type wait_type)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.ctx = ctx;
+ seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.cmd = cmd;
+ seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.wait_type = wait_type;
+ seq_state->steps[*seq_state->num_steps].func = DMUB_SEND_DMCUB_CMD;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DMUB SubVP save surface address to block sequence
+ */
+void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state,
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct dc_plane_address *addr,
+ uint8_t subvp_index)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc_dmub_srv;
+ seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.addr = addr;
+ seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.subvp_index = subvp_index;
+ seq_state->steps[*seq_state->num_steps].func = DMUB_SUBVP_SAVE_SURF_ADDR;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP wait for DCC meta propagation to block sequence
+ */
+void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *top_pipe_to_program)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.top_pipe_to_program = top_pipe_to_program;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FOR_DCC_META_PROP;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP wait pipe read start to block sequence
+ */
+void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_pipe_read_start_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_PIPE_READ_START;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HWS apply update flags for phantom to block sequence
+ */
+void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.apply_update_flags_for_phantom_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HWS update phantom VP position to block sequence
+ */
+void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.context = context;
+ seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = HWS_UPDATE_PHANTOM_VP_POSITION;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPTC set ODM combine to block sequence
+ */
+void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count,
+ int odm_slice_width, int last_odm_slice_width)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.tg = tg;
+ memcpy(seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_inst, opp_inst, sizeof(int) * MAX_PIPES);
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_head_count = opp_head_count;
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.odm_slice_width = odm_slice_width;
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.last_odm_slice_width = last_odm_slice_width;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_COMBINE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPTC set ODM bypass to block sequence
+ */
+void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, struct dc_crtc_timing *timing)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.timing = timing;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_BYPASS;
+ (*seq_state->num_steps)++;
+ }
+}
+
void hwss_send_dmcub_cmd(union block_sequence_params *params)
{
struct dc_context *ctx = params->send_dmcub_cmd_params.ctx;
@@ -1020,6 +1662,276 @@ void hwss_send_dmcub_cmd(union block_sequence_params *params)
dc_wake_and_execute_dmub_cmd(ctx, cmd, wait_type);
}
+/*
+ * Helper function to add TG program global sync to block sequence
+ */
+void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int vready_offset,
+ unsigned int vstartup_lines,
+ unsigned int vupdate_offset_pixels,
+ unsigned int vupdate_vupdate_width_pixels,
+ unsigned int pstate_keepout_start_lines)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vready_offset = vready_offset;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vstartup_lines = vstartup_lines;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_offset_pixels = vupdate_offset_pixels;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_vupdate_width_pixels = vupdate_vupdate_width_pixels;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.pstate_keepout_start_lines = pstate_keepout_start_lines;
+ seq_state->steps[*seq_state->num_steps].func = TG_PROGRAM_GLOBAL_SYNC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG wait for state to block sequence
+ */
+void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ enum crtc_state state)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.state = state;
+ seq_state->steps[*seq_state->num_steps].func = TG_WAIT_FOR_STATE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG set VTG params to block sequence
+ */
+void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct dc_crtc_timing *dc_crtc_timing,
+ bool program_fp2)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.timing = dc_crtc_timing;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.program_fp2 = program_fp2;
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_VTG_PARAMS;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG setup vertical interrupt2 to block sequence
+ */
+void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int start_line)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.start_line = start_line;
+ seq_state->steps[*seq_state->num_steps].func = TG_SETUP_VERTICAL_INTERRUPT2;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP set HDR multiplier to block sequence
+ */
+void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state,
+ struct dpp *dpp, uint32_t hw_mult)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.hw_mult = hw_mult;
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_HDR_MULTIPLIER;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP program DET size to block sequence
+ */
+void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ unsigned int hubp_inst,
+ unsigned int det_buffer_size_kb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubp_inst = hubp_inst;
+ seq_state->steps[*seq_state->num_steps].params.program_det_size_params.det_buffer_size_kb = det_buffer_size_kb;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SIZE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.mcache_regs = mcache_regs;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_MCACHE_ID;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ bool enable,
+ bool wait)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.enable = enable;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.wait = wait;
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_FORCE_PSTATE_CHANGE_CONTROL;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP program DET segments to block sequence
+ */
+void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ unsigned int hubp_inst,
+ unsigned int det_size)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubp_inst = hubp_inst;
+ seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.det_size = det_size;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SEGMENTS;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPP set dynamic expansion to block sequence
+ */
+void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_space,
+ enum dc_color_depth color_depth,
+ enum signal_type signal)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_space = color_space;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_depth = color_depth;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.signal = signal;
+ seq_state->steps[*seq_state->num_steps].func = OPP_SET_DYN_EXPANSION;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPP program FMT to block sequence
+ */
+void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.fmt_bit_depth = fmt_bit_depth;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.clamping = clamping;
+ seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_FMT;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum dc_pixel_encoding pixel_encoding,
+ bool is_otg_master)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.pixel_encoding = pixel_encoding;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.is_otg_master = is_otg_master;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add ABM set pipe to block sequence
+ */
+void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = ABM_SET_PIPE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add ABM set level to block sequence
+ */
+void hwss_add_abm_set_level(struct block_sequence_state *seq_state,
+ struct abm *abm,
+ uint32_t abm_level)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm = abm;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm_level = abm_level;
+ seq_state->steps[*seq_state->num_steps].func = ABM_SET_LEVEL;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG enable CRTC to block sequence
+ */
+void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_enable_crtc_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].func = TG_ENABLE_CRTC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP wait flip pending to block sequence
+ */
+void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ unsigned int timeout_us,
+ unsigned int polling_interval_us)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.timeout_us = timeout_us;
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.polling_interval_us = polling_interval_us;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FLIP_PENDING;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG wait double buffer pending to block sequence
+ */
+void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ unsigned int timeout_us,
+ unsigned int polling_interval_us)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.timeout_us = timeout_us;
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.polling_interval_us = polling_interval_us;
+ seq_state->steps[*seq_state->num_steps].func = TG_WAIT_DOUBLE_BUFFER_PENDING;
+ (*seq_state->num_steps)++;
+ }
+}
+
void hwss_program_manual_trigger(union block_sequence_params *params)
{
struct pipe_ctx *pipe_ctx = params->program_manual_trigger_params.pipe_ctx;
@@ -1046,12 +1958,6 @@ void hwss_setup_dpp(union block_sequence_params *params)
plane_state->color_space,
NULL);
}
-
- if (dpp && dpp->funcs->set_cursor_matrix) {
- dpp->funcs->set_cursor_matrix(dpp,
- plane_state->color_space,
- plane_state->cursor_csc_color_matrix);
- }
}
void hwss_program_bias_and_scale(union block_sequence_params *params)
@@ -1062,9 +1968,8 @@ void hwss_program_bias_and_scale(union block_sequence_params *params)
struct dc_bias_and_scale bns_params = plane_state->bias_and_scale;
//TODO :for CNVC set scale and bias registers if necessary
- if (dpp->funcs->dpp_program_bias_and_scale) {
+ if (dpp->funcs->dpp_program_bias_and_scale)
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
- }
}
void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params)
@@ -1114,6 +2019,39 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params)
dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index);
}
+void hwss_program_surface_config(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->program_surface_config_params.hubp;
+ enum surface_pixel_format format = params->program_surface_config_params.format;
+ struct dc_tiling_info *tiling_info = params->program_surface_config_params.tiling_info;
+ struct plane_size size = params->program_surface_config_params.plane_size;
+ enum dc_rotation_angle rotation = params->program_surface_config_params.rotation;
+ struct dc_plane_dcc_param *dcc = params->program_surface_config_params.dcc;
+ bool horizontal_mirror = params->program_surface_config_params.horizontal_mirror;
+ int compat_level = params->program_surface_config_params.compat_level;
+
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ format,
+ tiling_info,
+ &size,
+ rotation,
+ dcc,
+ horizontal_mirror,
+ compat_level);
+
+ hubp->power_gated = false;
+}
+
+void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->program_mcache_id_and_split_coordinate.hubp;
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs = params->program_mcache_id_and_split_coordinate.mcache_regs;
+
+ hubp->funcs->hubp_program_mcache_id_and_split_coordinate(hubp, mcache_regs);
+
+}
+
void get_surface_tile_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
@@ -1188,6 +2126,7 @@ void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *c
void hwss_wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
+
for (i = 0; i < MAX_PIPES; i++) {
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1264,3 +2203,1869 @@ void hwss_process_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_cont
if (dc->hwss.program_outstanding_updates)
dc->hwss.program_outstanding_updates(dc, dc_context);
}
+
+void hwss_set_odm_combine(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->set_odm_combine_params.tg;
+ int *opp_inst = params->set_odm_combine_params.opp_inst;
+ int opp_head_count = params->set_odm_combine_params.opp_head_count;
+ int odm_slice_width = params->set_odm_combine_params.odm_slice_width;
+ int last_odm_slice_width = params->set_odm_combine_params.last_odm_slice_width;
+
+ if (tg && tg->funcs->set_odm_combine)
+ tg->funcs->set_odm_combine(tg, opp_inst, opp_head_count,
+ odm_slice_width, last_odm_slice_width);
+}
+
+void hwss_set_odm_bypass(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->set_odm_bypass_params.tg;
+ const struct dc_crtc_timing *timing = params->set_odm_bypass_params.timing;
+
+ if (tg && tg->funcs->set_odm_bypass)
+ tg->funcs->set_odm_bypass(tg, timing);
+}
+
+void hwss_opp_pipe_clock_control(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_pipe_clock_control_params.opp;
+ bool enable = params->opp_pipe_clock_control_params.enable;
+
+ if (opp && opp->funcs->opp_pipe_clock_control)
+ opp->funcs->opp_pipe_clock_control(opp, enable);
+}
+
+void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_program_left_edge_extra_pixel_params.opp;
+ enum dc_pixel_encoding pixel_encoding = params->opp_program_left_edge_extra_pixel_params.pixel_encoding;
+ bool is_otg_master = params->opp_program_left_edge_extra_pixel_params.is_otg_master;
+
+ if (opp && opp->funcs->opp_program_left_edge_extra_pixel)
+ opp->funcs->opp_program_left_edge_extra_pixel(opp, pixel_encoding, is_otg_master);
+}
+
+void hwss_dccg_set_dto_dscclk(union block_sequence_params *params)
+{
+ struct dccg *dccg = params->dccg_set_dto_dscclk_params.dccg;
+ int inst = params->dccg_set_dto_dscclk_params.inst;
+ int num_slices_h = params->dccg_set_dto_dscclk_params.num_slices_h;
+
+ if (dccg && dccg->funcs->set_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, inst, num_slices_h);
+}
+
+void hwss_dsc_set_config(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_set_config_params.dsc;
+ struct dsc_config *dsc_cfg = params->dsc_set_config_params.dsc_cfg;
+ struct dsc_optc_config *dsc_optc_cfg = params->dsc_set_config_params.dsc_optc_cfg;
+
+ if (dsc && dsc->funcs->dsc_set_config)
+ dsc->funcs->dsc_set_config(dsc, dsc_cfg, dsc_optc_cfg);
+}
+
+void hwss_dsc_enable(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_enable_params.dsc;
+ int opp_inst = params->dsc_enable_params.opp_inst;
+
+ if (dsc && dsc->funcs->dsc_enable)
+ dsc->funcs->dsc_enable(dsc, opp_inst);
+}
+
+void hwss_tg_set_dsc_config(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_dsc_config_params.tg;
+ enum optc_dsc_mode optc_dsc_mode = OPTC_DSC_DISABLED;
+ uint32_t bytes_per_pixel = 0;
+ uint32_t slice_width = 0;
+
+ if (params->tg_set_dsc_config_params.enable) {
+ struct dsc_optc_config *dsc_optc_cfg = params->tg_set_dsc_config_params.dsc_optc_cfg;
+
+ if (dsc_optc_cfg) {
+ bytes_per_pixel = dsc_optc_cfg->bytes_per_pixel;
+ slice_width = dsc_optc_cfg->slice_width;
+ optc_dsc_mode = dsc_optc_cfg->is_pixel_format_444 ?
+ OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
+ }
+ }
+
+ if (tg && tg->funcs->set_dsc_config)
+ tg->funcs->set_dsc_config(tg, optc_dsc_mode, bytes_per_pixel, slice_width);
+}
+
+void hwss_dsc_disconnect(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_disconnect_params.dsc;
+
+ if (dsc && dsc->funcs->dsc_disconnect)
+ dsc->funcs->dsc_disconnect(dsc);
+}
+
+void hwss_dsc_read_state(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_read_state_params.dsc;
+ struct dcn_dsc_state *dsc_state = params->dsc_read_state_params.dsc_state;
+
+ if (dsc && dsc->funcs->dsc_read_state)
+ dsc->funcs->dsc_read_state(dsc, dsc_state);
+}
+
+void hwss_dsc_calculate_and_set_config(union block_sequence_params *params)
+{
+ struct pipe_ctx *pipe_ctx = params->dsc_calculate_and_set_config_params.pipe_ctx;
+ struct pipe_ctx *top_pipe = pipe_ctx;
+ bool enable = params->dsc_calculate_and_set_config_params.enable;
+ int opp_cnt = params->dsc_calculate_and_set_config_params.opp_cnt;
+
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+
+ if (!dsc || !enable)
+ return;
+
+ /* Calculate DSC configuration - extracted from dcn32_update_dsc_on_stream */
+ struct dsc_config dsc_cfg;
+
+ while (top_pipe->prev_odm_pipe)
+ top_pipe = top_pipe->prev_odm_pipe;
+
+ dsc_cfg.pic_width = (stream->timing.h_addressable + top_pipe->dsc_padding_params.dsc_hactive_padding +
+ stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+ dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = top_pipe->next_odm_pipe ? true : false;
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = top_pipe->dsc_padding_params.dsc_hactive_padding;
+
+ /* Set DSC configuration */
+ if (dsc->funcs->dsc_set_config)
+ dsc->funcs->dsc_set_config(dsc, &dsc_cfg,
+ &params->dsc_calculate_and_set_config_params.dsc_optc_cfg);
+}
+
+void hwss_dsc_enable_with_opp(union block_sequence_params *params)
+{
+ struct pipe_ctx *pipe_ctx = params->dsc_enable_with_opp_params.pipe_ctx;
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+
+ if (dsc && dsc->funcs->dsc_enable)
+ dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
+}
+
+void hwss_tg_program_global_sync(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_program_global_sync_params.tg;
+ int vready_offset = params->tg_program_global_sync_params.vready_offset;
+ unsigned int vstartup_lines = params->tg_program_global_sync_params.vstartup_lines;
+ unsigned int vupdate_offset_pixels = params->tg_program_global_sync_params.vupdate_offset_pixels;
+ unsigned int vupdate_vupdate_width_pixels = params->tg_program_global_sync_params.vupdate_vupdate_width_pixels;
+ unsigned int pstate_keepout_start_lines = params->tg_program_global_sync_params.pstate_keepout_start_lines;
+
+ if (tg->funcs->program_global_sync) {
+ tg->funcs->program_global_sync(tg, vready_offset, vstartup_lines,
+ vupdate_offset_pixels, vupdate_vupdate_width_pixels, pstate_keepout_start_lines);
+ }
+}
+
+void hwss_tg_wait_for_state(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_wait_for_state_params.tg;
+ enum crtc_state state = params->tg_wait_for_state_params.state;
+
+ if (tg->funcs->wait_for_state)
+ tg->funcs->wait_for_state(tg, state);
+}
+
+void hwss_tg_set_vtg_params(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_vtg_params_params.tg;
+ struct dc_crtc_timing *timing = params->tg_set_vtg_params_params.timing;
+ bool program_fp2 = params->tg_set_vtg_params_params.program_fp2;
+
+ if (tg->funcs->set_vtg_params)
+ tg->funcs->set_vtg_params(tg, timing, program_fp2);
+}
+
+void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_setup_vertical_interrupt2_params.tg;
+ int start_line = params->tg_setup_vertical_interrupt2_params.start_line;
+
+ if (tg->funcs->setup_vertical_interrupt2)
+ tg->funcs->setup_vertical_interrupt2(tg, start_line);
+}
+
+void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_set_hdr_multiplier_params.dpp;
+ uint32_t hw_mult = params->dpp_set_hdr_multiplier_params.hw_mult;
+
+ if (dpp->funcs->dpp_set_hdr_multiplier)
+ dpp->funcs->dpp_set_hdr_multiplier(dpp, hw_mult);
+}
+
+void hwss_program_det_size(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->program_det_size_params.hubbub;
+ unsigned int hubp_inst = params->program_det_size_params.hubp_inst;
+ unsigned int det_buffer_size_kb = params->program_det_size_params.det_buffer_size_kb;
+
+ if (hubbub->funcs->program_det_size)
+ hubbub->funcs->program_det_size(hubbub, hubp_inst, det_buffer_size_kb);
+}
+
+void hwss_program_det_segments(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->program_det_segments_params.hubbub;
+ unsigned int hubp_inst = params->program_det_segments_params.hubp_inst;
+ unsigned int det_size = params->program_det_segments_params.det_size;
+
+ if (hubbub->funcs->program_det_segments)
+ hubbub->funcs->program_det_segments(hubbub, hubp_inst, det_size);
+}
+
+void hwss_opp_set_dyn_expansion(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_set_dyn_expansion_params.opp;
+ enum dc_color_space color_space = params->opp_set_dyn_expansion_params.color_space;
+ enum dc_color_depth color_depth = params->opp_set_dyn_expansion_params.color_depth;
+ enum signal_type signal = params->opp_set_dyn_expansion_params.signal;
+
+ if (opp->funcs->opp_set_dyn_expansion)
+ opp->funcs->opp_set_dyn_expansion(opp, color_space, color_depth, signal);
+}
+
+void hwss_opp_program_fmt(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_program_fmt_params.opp;
+ struct bit_depth_reduction_params *fmt_bit_depth = params->opp_program_fmt_params.fmt_bit_depth;
+ struct clamping_and_pixel_encoding_params *clamping = params->opp_program_fmt_params.clamping;
+
+ if (opp->funcs->opp_program_fmt)
+ opp->funcs->opp_program_fmt(opp, fmt_bit_depth, clamping);
+}
+
+void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_program_bit_depth_reduction_params.opp;
+ bool use_default_params = params->opp_program_bit_depth_reduction_params.use_default_params;
+ struct pipe_ctx *pipe_ctx = params->opp_program_bit_depth_reduction_params.pipe_ctx;
+ struct bit_depth_reduction_params bit_depth_params;
+
+ if (use_default_params)
+ memset(&bit_depth_params, 0, sizeof(bit_depth_params));
+ else
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream, &bit_depth_params);
+
+ if (opp->funcs->opp_program_bit_depth_reduction)
+ opp->funcs->opp_program_bit_depth_reduction(opp, &bit_depth_params);
+}
+
+void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_set_disp_pattern_generator_params.opp;
+ enum controller_dp_test_pattern test_pattern = params->opp_set_disp_pattern_generator_params.test_pattern;
+ enum controller_dp_color_space color_space = params->opp_set_disp_pattern_generator_params.color_space;
+ enum dc_color_depth color_depth = params->opp_set_disp_pattern_generator_params.color_depth;
+ struct tg_color *solid_color = params->opp_set_disp_pattern_generator_params.use_solid_color ?
+ &params->opp_set_disp_pattern_generator_params.solid_color : NULL;
+ int width = params->opp_set_disp_pattern_generator_params.width;
+ int height = params->opp_set_disp_pattern_generator_params.height;
+ int offset = params->opp_set_disp_pattern_generator_params.offset;
+
+ if (opp && opp->funcs->opp_set_disp_pattern_generator) {
+ opp->funcs->opp_set_disp_pattern_generator(opp, test_pattern, color_space,
+ color_depth, solid_color, width, height, offset);
+ }
+}
+
+void hwss_set_abm_pipe(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_abm_pipe_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_abm_pipe_params.pipe_ctx;
+
+ dc->hwss.set_pipe(pipe_ctx);
+}
+
+void hwss_set_abm_level(union block_sequence_params *params)
+{
+ struct abm *abm = params->set_abm_level_params.abm;
+ unsigned int abm_level = params->set_abm_level_params.abm_level;
+
+ if (abm->funcs->set_abm_level)
+ abm->funcs->set_abm_level(abm, abm_level);
+}
+
+void hwss_set_abm_immediate_disable(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_abm_immediate_disable_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_abm_immediate_disable_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_abm_immediate_disable)
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
+}
+
+void hwss_mpc_remove_mpcc(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_remove_mpcc_params.mpc;
+ struct mpc_tree *mpc_tree_params = params->mpc_remove_mpcc_params.mpc_tree_params;
+ struct mpcc *mpcc_to_remove = params->mpc_remove_mpcc_params.mpcc_to_remove;
+
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
+}
+
+void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_set_mpcc_disconnect_pending_params.opp;
+ int mpcc_inst = params->opp_set_mpcc_disconnect_pending_params.mpcc_inst;
+ bool pending = params->opp_set_mpcc_disconnect_pending_params.pending;
+
+ opp->mpcc_disconnect_pending[mpcc_inst] = pending;
+}
+
+void hwss_dc_set_optimized_required(union block_sequence_params *params)
+{
+ struct dc *dc = params->dc_set_optimized_required_params.dc;
+ bool optimized_required = params->dc_set_optimized_required_params.optimized_required;
+
+ dc->optimized_required = optimized_required;
+}
+
+void hwss_hubp_disconnect(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_disconnect_params.hubp;
+
+ if (hubp->funcs->hubp_disconnect)
+ hubp->funcs->hubp_disconnect(hubp);
+}
+
+void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_force_pstate_change_control_params.hubbub;
+ bool enable = params->hubbub_force_pstate_change_control_params.enable;
+ bool wait = params->hubbub_force_pstate_change_control_params.wait;
+
+ if (hubbub->funcs->force_pstate_change_control) {
+ hubbub->funcs->force_pstate_change_control(hubbub, enable, wait);
+ /* Add delay when enabling pstate change control */
+ if (enable)
+ udelay(500);
+ }
+}
+
+void hwss_tg_enable_crtc(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_enable_crtc_params.tg;
+
+ if (tg->funcs->enable_crtc)
+ tg->funcs->enable_crtc(tg);
+}
+
+void hwss_tg_set_gsl(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_gsl_params.tg;
+ struct gsl_params *gsl = &params->tg_set_gsl_params.gsl;
+
+ if (tg->funcs->set_gsl)
+ tg->funcs->set_gsl(tg, gsl);
+}
+
+void hwss_tg_set_gsl_source_select(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_gsl_source_select_params.tg;
+ int group_idx = params->tg_set_gsl_source_select_params.group_idx;
+ uint32_t gsl_ready_signal = params->tg_set_gsl_source_select_params.gsl_ready_signal;
+
+ if (tg->funcs->set_gsl_source_select)
+ tg->funcs->set_gsl_source_select(tg, group_idx, gsl_ready_signal);
+}
+
+void hwss_hubp_wait_flip_pending(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_wait_flip_pending_params.hubp;
+ unsigned int timeout_us = params->hubp_wait_flip_pending_params.timeout_us;
+ unsigned int polling_interval_us = params->hubp_wait_flip_pending_params.polling_interval_us;
+ int j = 0;
+
+ for (j = 0; j < timeout_us / polling_interval_us
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ udelay(polling_interval_us);
+}
+
+void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_wait_double_buffer_pending_params.tg;
+ unsigned int timeout_us = params->tg_wait_double_buffer_pending_params.timeout_us;
+ unsigned int polling_interval_us = params->tg_wait_double_buffer_pending_params.polling_interval_us;
+ int j = 0;
+
+ if (tg->funcs->get_optc_double_buffer_pending) {
+ for (j = 0; j < timeout_us / polling_interval_us
+ && tg->funcs->get_optc_double_buffer_pending(tg); j++)
+ udelay(polling_interval_us);
+ }
+}
+
+void hwss_update_force_pstate(union block_sequence_params *params)
+{
+ struct dc *dc = params->update_force_pstate_params.dc;
+ struct dc_state *context = params->update_force_pstate_params.context;
+ struct dce_hwseq *hwseq = dc->hwseq;
+
+ if (hwseq->funcs.update_force_pstate)
+ hwseq->funcs.update_force_pstate(dc, context);
+}
+
+void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_apply_dedcn21_147_wa_params.hubbub;
+
+ hubbub->funcs->apply_DEDCN21_147_wa(hubbub);
+}
+
+void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_allow_self_refresh_control_params.hubbub;
+ bool allow = params->hubbub_allow_self_refresh_control_params.allow;
+
+ hubbub->funcs->allow_self_refresh_control(hubbub, allow);
+
+ if (!allow && params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied)
+ *params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = true;
+}
+
+void hwss_tg_get_frame_count(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_get_frame_count_params.tg;
+ unsigned int *frame_count = params->tg_get_frame_count_params.frame_count;
+
+ *frame_count = tg->funcs->get_frame_count(tg);
+}
+
+void hwss_mpc_set_dwb_mux(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_set_dwb_mux_params.mpc;
+ int dwb_id = params->mpc_set_dwb_mux_params.dwb_id;
+ int mpcc_id = params->mpc_set_dwb_mux_params.mpcc_id;
+
+ if (mpc->funcs->set_dwb_mux)
+ mpc->funcs->set_dwb_mux(mpc, dwb_id, mpcc_id);
+}
+
+void hwss_mpc_disable_dwb_mux(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_disable_dwb_mux_params.mpc;
+ unsigned int dwb_id = params->mpc_disable_dwb_mux_params.dwb_id;
+
+ if (mpc->funcs->disable_dwb_mux)
+ mpc->funcs->disable_dwb_mux(mpc, dwb_id);
+}
+
+void hwss_mcif_wb_config_buf(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_config_buf_params.mcif_wb;
+ struct mcif_buf_params *mcif_buf_params = params->mcif_wb_config_buf_params.mcif_buf_params;
+ unsigned int dest_height = params->mcif_wb_config_buf_params.dest_height;
+
+ if (mcif_wb->funcs->config_mcif_buf)
+ mcif_wb->funcs->config_mcif_buf(mcif_wb, mcif_buf_params, dest_height);
+}
+
+void hwss_mcif_wb_config_arb(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_config_arb_params.mcif_wb;
+ struct mcif_arb_params *mcif_arb_params = params->mcif_wb_config_arb_params.mcif_arb_params;
+
+ if (mcif_wb->funcs->config_mcif_arb)
+ mcif_wb->funcs->config_mcif_arb(mcif_wb, mcif_arb_params);
+}
+
+void hwss_mcif_wb_enable(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_enable_params.mcif_wb;
+
+ if (mcif_wb->funcs->enable_mcif)
+ mcif_wb->funcs->enable_mcif(mcif_wb);
+}
+
+void hwss_mcif_wb_disable(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_disable_params.mcif_wb;
+
+ if (mcif_wb->funcs->disable_mcif)
+ mcif_wb->funcs->disable_mcif(mcif_wb);
+}
+
+void hwss_dwbc_enable(union block_sequence_params *params)
+{
+ struct dwbc *dwb = params->dwbc_enable_params.dwb;
+ struct dc_dwb_params *dwb_params = params->dwbc_enable_params.dwb_params;
+
+ if (dwb->funcs->enable)
+ dwb->funcs->enable(dwb, dwb_params);
+}
+
+void hwss_dwbc_disable(union block_sequence_params *params)
+{
+ struct dwbc *dwb = params->dwbc_disable_params.dwb;
+
+ if (dwb->funcs->disable)
+ dwb->funcs->disable(dwb);
+}
+
+void hwss_dwbc_update(union block_sequence_params *params)
+{
+ struct dwbc *dwb = params->dwbc_update_params.dwb;
+ struct dc_dwb_params *dwb_params = params->dwbc_update_params.dwb_params;
+
+ if (dwb->funcs->update)
+ dwb->funcs->update(dwb, dwb_params);
+}
+
+void hwss_hubp_update_mall_sel(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_update_mall_sel_params.hubp;
+ uint32_t mall_sel = params->hubp_update_mall_sel_params.mall_sel;
+ bool cache_cursor = params->hubp_update_mall_sel_params.cache_cursor;
+
+ if (hubp && hubp->funcs->hubp_update_mall_sel)
+ hubp->funcs->hubp_update_mall_sel(hubp, mall_sel, cache_cursor);
+}
+
+void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_prepare_subvp_buffering_params.hubp;
+ bool enable = params->hubp_prepare_subvp_buffering_params.enable;
+
+ if (hubp && hubp->funcs->hubp_prepare_subvp_buffering)
+ hubp->funcs->hubp_prepare_subvp_buffering(hubp, enable);
+}
+
+void hwss_hubp_set_blank_en(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_blank_en_params.hubp;
+ bool enable = params->hubp_set_blank_en_params.enable;
+
+ if (hubp && hubp->funcs->set_hubp_blank_en)
+ hubp->funcs->set_hubp_blank_en(hubp, enable);
+}
+
+void hwss_hubp_disable_control(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_disable_control_params.hubp;
+ bool disable = params->hubp_disable_control_params.disable;
+
+ if (hubp && hubp->funcs->hubp_disable_control)
+ hubp->funcs->hubp_disable_control(hubp, disable);
+}
+
+void hwss_hubbub_soft_reset(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_soft_reset_params.hubbub;
+ bool reset = params->hubbub_soft_reset_params.reset;
+
+ if (hubbub)
+ params->hubbub_soft_reset_params.hubbub_soft_reset(hubbub, reset);
+}
+
+void hwss_hubp_clk_cntl(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_clk_cntl_params.hubp;
+ bool enable = params->hubp_clk_cntl_params.enable;
+
+ if (hubp && hubp->funcs->hubp_clk_cntl) {
+ hubp->funcs->hubp_clk_cntl(hubp, enable);
+ hubp->power_gated = !enable;
+ }
+}
+
+void hwss_hubp_init(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_init_params.hubp;
+
+ if (hubp && hubp->funcs->hubp_init)
+ hubp->funcs->hubp_init(hubp);
+}
+
+void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_vm_system_aperture_settings_params.hubp;
+ struct vm_system_aperture_param apt;
+
+ apt.sys_default = params->hubp_set_vm_system_aperture_settings_params.sys_default;
+ apt.sys_high = params->hubp_set_vm_system_aperture_settings_params.sys_high;
+ apt.sys_low = params->hubp_set_vm_system_aperture_settings_params.sys_low;
+
+ if (hubp && hubp->funcs->hubp_set_vm_system_aperture_settings)
+ hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
+}
+
+void hwss_hubp_set_flip_int(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_flip_int_params.hubp;
+
+ if (hubp && hubp->funcs->hubp_set_flip_int)
+ hubp->funcs->hubp_set_flip_int(hubp);
+}
+
+void hwss_dpp_dppclk_control(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_dppclk_control_params.dpp;
+ bool dppclk_div = params->dpp_dppclk_control_params.dppclk_div;
+ bool enable = params->dpp_dppclk_control_params.enable;
+
+ if (dpp && dpp->funcs->dpp_dppclk_control)
+ dpp->funcs->dpp_dppclk_control(dpp, dppclk_div, enable);
+}
+
+void hwss_disable_phantom_crtc(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->disable_phantom_crtc_params.tg;
+
+ if (tg && tg->funcs->disable_phantom_crtc)
+ tg->funcs->disable_phantom_crtc(tg);
+}
+
+void hwss_dsc_pg_status(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->dsc_pg_status_params.hws;
+ int dsc_inst = params->dsc_pg_status_params.dsc_inst;
+
+ if (hws && hws->funcs.dsc_pg_status)
+ params->dsc_pg_status_params.is_ungated = hws->funcs.dsc_pg_status(hws, dsc_inst);
+}
+
+void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_wait_disconnect_pending_clear_params.dsc;
+
+ if (!params->dsc_wait_disconnect_pending_clear_params.is_ungated)
+ return;
+ if (*params->dsc_wait_disconnect_pending_clear_params.is_ungated == false)
+ return;
+
+ if (dsc && dsc->funcs->dsc_wait_disconnect_pending_clear)
+ dsc->funcs->dsc_wait_disconnect_pending_clear(dsc);
+}
+
+void hwss_dsc_disable(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_disable_params.dsc;
+
+ if (!params->dsc_disable_params.is_ungated)
+ return;
+ if (*params->dsc_disable_params.is_ungated == false)
+ return;
+
+ if (dsc && dsc->funcs->dsc_disable)
+ dsc->funcs->dsc_disable(dsc);
+}
+
+void hwss_dccg_set_ref_dscclk(union block_sequence_params *params)
+{
+ struct dccg *dccg = params->dccg_set_ref_dscclk_params.dccg;
+ int dsc_inst = params->dccg_set_ref_dscclk_params.dsc_inst;
+
+ if (!params->dccg_set_ref_dscclk_params.is_ungated)
+ return;
+ if (*params->dccg_set_ref_dscclk_params.is_ungated == false)
+ return;
+
+ if (dccg && dccg->funcs->set_ref_dscclk)
+ dccg->funcs->set_ref_dscclk(dccg, dsc_inst);
+}
+
+void hwss_dpp_pg_control(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->dpp_pg_control_params.hws;
+ unsigned int dpp_inst = params->dpp_pg_control_params.dpp_inst;
+ bool power_on = params->dpp_pg_control_params.power_on;
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, dpp_inst, power_on);
+}
+
+void hwss_hubp_pg_control(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->hubp_pg_control_params.hws;
+ unsigned int hubp_inst = params->hubp_pg_control_params.hubp_inst;
+ bool power_on = params->hubp_pg_control_params.power_on;
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, hubp_inst, power_on);
+}
+
+void hwss_hubp_reset(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_reset_params.hubp;
+
+ if (hubp && hubp->funcs->hubp_reset)
+ hubp->funcs->hubp_reset(hubp);
+}
+
+void hwss_dpp_reset(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_reset_params.dpp;
+
+ if (dpp && dpp->funcs->dpp_reset)
+ dpp->funcs->dpp_reset(dpp);
+}
+
+void hwss_dpp_root_clock_control(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->dpp_root_clock_control_params.hws;
+ unsigned int dpp_inst = params->dpp_root_clock_control_params.dpp_inst;
+ bool clock_on = params->dpp_root_clock_control_params.clock_on;
+
+ if (hws->funcs.dpp_root_clock_control)
+ hws->funcs.dpp_root_clock_control(hws, dpp_inst, clock_on);
+}
+
+void hwss_dc_ip_request_cntl(union block_sequence_params *params)
+{
+ struct dc *dc = params->dc_ip_request_cntl_params.dc;
+ bool enable = params->dc_ip_request_cntl_params.enable;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (hws->funcs.dc_ip_request_cntl)
+ hws->funcs.dc_ip_request_cntl(dc, enable);
+}
+
+void hwss_dccg_update_dpp_dto(union block_sequence_params *params)
+{
+ struct dccg *dccg = params->dccg_update_dpp_dto_params.dccg;
+ int dpp_inst = params->dccg_update_dpp_dto_params.dpp_inst;
+ int dppclk_khz = params->dccg_update_dpp_dto_params.dppclk_khz;
+
+ if (dccg && dccg->funcs->update_dpp_dto)
+ dccg->funcs->update_dpp_dto(dccg, dpp_inst, dppclk_khz);
+}
+
+void hwss_hubp_vtg_sel(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_vtg_sel_params.hubp;
+ uint32_t otg_inst = params->hubp_vtg_sel_params.otg_inst;
+
+ if (hubp && hubp->funcs->hubp_vtg_sel)
+ hubp->funcs->hubp_vtg_sel(hubp, otg_inst);
+}
+
+void hwss_hubp_setup2(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup2_params.hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup2_params.hubp_regs;
+ union dml2_global_sync_programming *global_sync = params->hubp_setup2_params.global_sync;
+ struct dc_crtc_timing *timing = params->hubp_setup2_params.timing;
+
+ if (hubp && hubp->funcs->hubp_setup2)
+ hubp->funcs->hubp_setup2(hubp, hubp_regs, global_sync, timing);
+}
+
+void hwss_hubp_setup(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup_params.hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_params.dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_params.ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = params->hubp_setup_params.rq_regs;
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest = params->hubp_setup_params.pipe_dest;
+
+ if (hubp && hubp->funcs->hubp_setup)
+ hubp->funcs->hubp_setup(hubp, dlg_regs, ttu_regs, rq_regs, pipe_dest);
+}
+
+void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_unbounded_requesting_params.hubp;
+ bool unbounded_req = params->hubp_set_unbounded_requesting_params.unbounded_req;
+
+ if (hubp && hubp->funcs->set_unbounded_requesting)
+ hubp->funcs->set_unbounded_requesting(hubp, unbounded_req);
+}
+
+void hwss_hubp_setup_interdependent2(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup_interdependent2_params.hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup_interdependent2_params.hubp_regs;
+
+ if (hubp && hubp->funcs->hubp_setup_interdependent2)
+ hubp->funcs->hubp_setup_interdependent2(hubp, hubp_regs);
+}
+
+void hwss_hubp_setup_interdependent(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup_interdependent_params.hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_interdependent_params.dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_interdependent_params.ttu_regs;
+
+ if (hubp && hubp->funcs->hubp_setup_interdependent)
+ hubp->funcs->hubp_setup_interdependent(hubp, dlg_regs, ttu_regs);
+}
+
+void hwss_dpp_set_cursor_matrix(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_set_cursor_matrix_params.dpp;
+ enum dc_color_space color_space = params->dpp_set_cursor_matrix_params.color_space;
+ struct dc_csc_transform *cursor_csc_color_matrix = params->dpp_set_cursor_matrix_params.cursor_csc_color_matrix;
+
+ if (dpp && dpp->funcs->set_cursor_matrix)
+ dpp->funcs->set_cursor_matrix(dpp, color_space, *cursor_csc_color_matrix);
+}
+
+void hwss_mpc_update_mpcc(union block_sequence_params *params)
+{
+ struct dc *dc = params->mpc_update_mpcc_params.dc;
+ struct pipe_ctx *pipe_ctx = params->mpc_update_mpcc_params.pipe_ctx;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (hws->funcs.update_mpcc)
+ hws->funcs.update_mpcc(dc, pipe_ctx);
+}
+
+void hwss_mpc_update_blending(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_update_blending_params.mpc;
+ struct mpcc_blnd_cfg *blnd_cfg = &params->mpc_update_blending_params.blnd_cfg;
+ int mpcc_id = params->mpc_update_blending_params.mpcc_id;
+
+ if (mpc && mpc->funcs->update_blending)
+ mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id);
+}
+
+void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_assert_idle_mpcc_params.mpc;
+ int mpcc_id = params->mpc_assert_idle_mpcc_params.mpcc_id;
+
+ if (mpc && mpc->funcs->wait_for_idle)
+ mpc->funcs->wait_for_idle(mpc, mpcc_id);
+}
+
+void hwss_mpc_insert_plane(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_insert_plane_params.mpc;
+ struct mpc_tree *tree = params->mpc_insert_plane_params.mpc_tree_params;
+ struct mpcc_blnd_cfg *blnd_cfg = &params->mpc_insert_plane_params.blnd_cfg;
+ struct mpcc_sm_cfg *sm_cfg = params->mpc_insert_plane_params.sm_cfg;
+ struct mpcc *insert_above_mpcc = params->mpc_insert_plane_params.insert_above_mpcc;
+ int mpcc_id = params->mpc_insert_plane_params.mpcc_id;
+ int dpp_id = params->mpc_insert_plane_params.dpp_id;
+
+ if (mpc && mpc->funcs->insert_plane)
+ mpc->funcs->insert_plane(mpc, tree, blnd_cfg, sm_cfg, insert_above_mpcc,
+ dpp_id, mpcc_id);
+}
+
+void hwss_dpp_set_scaler(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_set_scaler_params.dpp;
+ const struct scaler_data *scl_data = params->dpp_set_scaler_params.scl_data;
+
+ if (dpp && dpp->funcs->dpp_set_scaler)
+ dpp->funcs->dpp_set_scaler(dpp, scl_data);
+}
+
+void hwss_hubp_mem_program_viewport(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_mem_program_viewport_params.hubp;
+ const struct rect *viewport = params->hubp_mem_program_viewport_params.viewport;
+ const struct rect *viewport_c = params->hubp_mem_program_viewport_params.viewport_c;
+
+ if (hubp && hubp->funcs->mem_program_viewport)
+ hubp->funcs->mem_program_viewport(hubp, viewport, viewport_c);
+}
+
+void hwss_abort_cursor_offload_update(union block_sequence_params *params)
+{
+ struct dc *dc = params->abort_cursor_offload_update_params.dc;
+ struct pipe_ctx *pipe_ctx = params->abort_cursor_offload_update_params.pipe_ctx;
+
+ if (dc && dc->hwss.abort_cursor_offload_update)
+ dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
+}
+
+void hwss_set_cursor_attribute(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_cursor_attribute_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_cursor_attribute_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_cursor_attribute)
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+}
+
+void hwss_set_cursor_position(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_cursor_position_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_cursor_position_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_cursor_position)
+ dc->hwss.set_cursor_position(pipe_ctx);
+}
+
+void hwss_set_cursor_sdr_white_level(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_cursor_sdr_white_level_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_cursor_sdr_white_level_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+}
+
+void hwss_program_output_csc(union block_sequence_params *params)
+{
+ struct dc *dc = params->program_output_csc_params.dc;
+ struct pipe_ctx *pipe_ctx = params->program_output_csc_params.pipe_ctx;
+ enum dc_color_space colorspace = params->program_output_csc_params.colorspace;
+ uint16_t *matrix = params->program_output_csc_params.matrix;
+ int opp_id = params->program_output_csc_params.opp_id;
+
+ if (dc && dc->hwss.program_output_csc)
+ dc->hwss.program_output_csc(dc, pipe_ctx, colorspace, matrix, opp_id);
+}
+
+void hwss_hubp_set_blank(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_blank_params.hubp;
+ bool blank = params->hubp_set_blank_params.blank;
+
+ if (hubp && hubp->funcs->set_blank)
+ hubp->funcs->set_blank(hubp, blank);
+}
+
+void hwss_phantom_hubp_post_enable(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->phantom_hubp_post_enable_params.hubp;
+
+ if (hubp && hubp->funcs->phantom_hubp_post_enable)
+ hubp->funcs->phantom_hubp_post_enable(hubp);
+}
+
+void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg, int inst, int num_slices_h)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DCCG_SET_DTO_DSCCLK;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.dccg = dccg;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.inst = inst;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.num_slices_h = num_slices_h;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_CALCULATE_AND_SET_CONFIG;
+ seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.enable = enable;
+ seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.opp_cnt = opp_cnt;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_REMOVE_MPCC;
+ seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc_tree_params = mpc_tree_params;
+ seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpcc_to_remove = mpcc_to_remove;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, int mpcc_inst, bool pending)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_SET_MPCC_DISCONNECT_PENDING;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.mpcc_inst = mpcc_inst;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.pending = pending;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_DISCONNECT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_disconnect_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_ENABLE_WITH_OPP;
+ seq_state->steps[*seq_state->num_steps].params.dsc_enable_with_opp_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, struct dsc_optc_config *dsc_optc_cfg, bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_DSC_CONFIG;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.dsc_optc_cfg = dsc_optc_cfg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_DISCONNECT;
+ seq_state->steps[*seq_state->num_steps].params.dsc_disconnect_params.dsc = dsc;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state,
+ struct dc *dc, bool optimized_required)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DC_SET_OPTIMIZED_REQUIRED;
+ seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.optimized_required = optimized_required;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = ABM_SET_IMMEDIATE_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ struct tg_color solid_color,
+ bool use_solid_color,
+ int width,
+ int height,
+ int offset)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_SET_DISP_PATTERN_GENERATOR;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.test_pattern = test_pattern;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_space = color_space;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_depth = color_depth;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.solid_color = solid_color;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.use_solid_color = use_solid_color;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.width = width;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.height = height;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.offset = offset;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC update blending to block sequence
+ */
+void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg blnd_cfg,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_BLENDING;
+ seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.blnd_cfg = blnd_cfg;
+ seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC insert plane to block sequence
+ */
+void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpc_tree *mpc_tree_params,
+ struct mpcc_blnd_cfg blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_INSERT_PLANE;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc_tree_params = mpc_tree_params;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.blnd_cfg = blnd_cfg;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.sm_cfg = sm_cfg;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.insert_above_mpcc = insert_above_mpcc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.dpp_id = dpp_id;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC assert idle MPCC to block sequence
+ */
+void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_ASSERT_IDLE_MPCC;
+ seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP set blank to block sequence
+ */
+void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool blank)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.blank = blank;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool use_default_params,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_BIT_DEPTH_REDUCTION;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.use_default_params = use_default_params;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DC_IP_REQUEST_CNTL;
+ seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dwbc_update(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DWBC_UPDATE;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb = dwb;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb_params = dwb_params;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_buf_params *mcif_buf_params,
+ unsigned int dest_height)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_BUF;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_wb = mcif_wb;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_buf_params = mcif_buf_params;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.dest_height = dest_height;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_arb_params *mcif_arb_params)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_ARB;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_wb = mcif_wb;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_arb_params = mcif_arb_params;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_ENABLE;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_enable_params.mcif_wb = mcif_wb;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_disable_params.mcif_wb = mcif_wb;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int dwb_id,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_SET_DWB_MUX;
+ seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.dwb_id = dwb_id;
+ seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ unsigned int dwb_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_DISABLE_DWB_MUX;
+ seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.dwb_id = dwb_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dwbc_enable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DWBC_ENABLE;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb = dwb;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb_params = dwb_params;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dwbc_disable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DWBC_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_disable_params.dwb = dwb;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct gsl_params gsl)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.gsl = gsl;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int group_idx,
+ uint32_t gsl_ready_signal)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL_SOURCE_SELECT;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.group_idx = group_idx;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.gsl_ready_signal = gsl_ready_signal;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t mall_sel,
+ bool cache_cursor)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_MALL_SEL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.mall_sel = mall_sel;
+ seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.cache_cursor = cache_cursor;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PREPARE_SUBVP_BUFFERING;
+ seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK_EN;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool disable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_DISABLE_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.disable = disable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset),
+ bool reset)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_SOFT_RESET;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub_soft_reset = hubbub_soft_reset;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.reset = reset;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_CLK_CNTL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ bool dppclk_div,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_DPPCLK_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dppclk_div = dppclk_div;
+ seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DISABLE_PHANTOM_CRTC;
+ seq_state->steps[*seq_state->num_steps].params.disable_phantom_crtc_params.tg = tg;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ int dsc_inst,
+ bool is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_PG_STATUS;
+ seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.dsc_inst = dsc_inst;
+ seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_WAIT_DISCONNECT_PENDING_CLEAR;
+ seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.dsc = dsc;
+ seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_disable(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.dsc = dsc;
+ seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dsc_inst,
+ bool *is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DCCG_SET_REF_DSCCLK;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dccg = dccg;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dsc_inst = dsc_inst;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool clock_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_ROOT_CLOCK_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.dpp_inst = dpp_inst;
+ seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.clock_on = clock_on;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_PG_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.dpp_inst = dpp_inst;
+ seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.power_on = power_on;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PG_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hubp_inst = hubp_inst;
+ seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.power_on = power_on;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_init(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_INIT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_init_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_reset(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_RESET;
+ seq_state->steps[*seq_state->num_steps].params.hubp_reset_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_reset(struct block_sequence_state *seq_state,
+ struct dpp *dpp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_RESET;
+ seq_state->steps[*seq_state->num_steps].params.dpp_reset_params.dpp = dpp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_PIPE_CLOCK_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint64_t sys_default,
+ uint64_t sys_low,
+ uint64_t sys_high)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_default.quad_part = sys_default;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_low.quad_part = sys_low;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_high.quad_part = sys_high;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_INT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_flip_int_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dpp_inst,
+ int dppclk_khz)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DCCG_UPDATE_DPP_DTO;
+ seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dccg = dccg;
+ seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dpp_inst = dpp_inst;
+ seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dppclk_khz = dppclk_khz;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t otg_inst)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_VTG_SEL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.otg_inst = otg_inst;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs,
+ union dml2_global_sync_programming *global_sync,
+ struct dc_crtc_timing *timing)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP2;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp_regs = hubp_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.global_sync = global_sync;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.timing = timing;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.dlg_regs = dlg_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.ttu_regs = ttu_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.rq_regs = rq_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.pipe_dest = pipe_dest;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool unbounded_req)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_UNBOUNDED_REQUESTING;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.unbounded_req = unbounded_req;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT2;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp_regs = hubp_regs;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.dlg_regs = dlg_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.ttu_regs = ttu_regs;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ struct dc_tiling_info *tiling_info,
+ struct plane_size plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ int compat_level)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_SURFACE_CONFIG;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.format = format;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.tiling_info = tiling_info;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.plane_size = plane_size;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.rotation = rotation;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.dcc = dcc;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.horizontal_mirror = horizontal_mirror;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.compat_level = compat_level;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_SETUP_DPP;
+ seq_state->steps[*seq_state->num_steps].params.setup_dpp_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ enum dc_color_space color_space,
+ struct dc_csc_transform *cursor_csc_color_matrix)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_CURSOR_MATRIX;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.color_space = color_space;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.cursor_csc_color_matrix = cursor_csc_color_matrix;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ const struct scaler_data *scl_data)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_SCALER;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.scl_data = scl_data;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_MEM_PROGRAM_VIEWPORT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport = viewport;
+ seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport_c = viewport_c;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_abort_cursor_offload_update(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = ABORT_CURSOR_OFFLOAD_UPDATE;
+ seq_state->steps[*seq_state->num_steps].params.abort_cursor_offload_update_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.abort_cursor_offload_update_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_ATTRIBUTE;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_set_cursor_position(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_POSITION;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_SDR_WHITE_LEVEL;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_program_output_csc(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = PROGRAM_OUTPUT_CSC;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.colorspace = colorspace;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.matrix = matrix;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.opp_id = opp_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = PHANTOM_HUBP_POST_ENABLE;
+ seq_state->steps[*seq_state->num_steps].params.phantom_hubp_post_enable_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_update_force_pstate(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = UPDATE_FORCE_PSTATE;
+ seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.context = context;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_APPLY_DEDCN21_147_WA;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_apply_dedcn21_147_wa_params.hubbub = hubbub;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ bool allow,
+ bool *disallow_self_refresh_applied)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_ALLOW_SELF_REFRESH_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.allow = allow;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = disallow_self_refresh_applied;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ unsigned int *frame_count)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_GET_FRAME_COUNT;
+ seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.frame_count = frame_count;
+ (*seq_state->num_steps)++;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index a180f68f711c..deb23d20bca6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -522,10 +522,10 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc)
{
struct link_encoder *link_enc = NULL;
- enum engine_id encs_assigned[MAX_DIG_LINK_ENCODERS];
+ enum engine_id encs_assigned[MAX_LINK_ENCODERS];
int i;
- for (i = 0; i < MAX_DIG_LINK_ENCODERS; i++)
+ for (i = 0; i < MAX_LINK_ENCODERS; i++)
encs_assigned[i] = ENGINE_ID_UNKNOWN;
/* Add assigned encoders to list. */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index bc5dedf5f60c..848c267ef11e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -95,10 +95,44 @@
#define DC_LOGGER \
dc->ctx->logger
#define DC_LOGGER_INIT(logger)
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#define UNABLE_TO_SPLIT -1
+static void capture_pipe_topology_data(struct dc *dc, int plane_idx, int slice_idx, int stream_idx,
+ int dpp_inst, int opp_inst, int tg_inst, bool is_phantom_pipe)
+{
+ struct pipe_topology_snapshot *current_snapshot = &dc->debug_data.topology_history.snapshots[dc->debug_data.topology_history.current_snapshot_index];
+
+ if (current_snapshot->line_count >= MAX_PIPES)
+ return;
+
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].is_phantom_pipe = is_phantom_pipe;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].plane_idx = plane_idx;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].slice_idx = slice_idx;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].stream_idx = stream_idx;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].dpp_inst = dpp_inst;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].opp_inst = opp_inst;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].tg_inst = tg_inst;
+
+ current_snapshot->line_count++;
+}
+
+static void start_new_topology_snapshot(struct dc *dc, struct dc_state *state)
+{
+ // Move to next snapshot slot (circular buffer)
+ dc->debug_data.topology_history.current_snapshot_index = (dc->debug_data.topology_history.current_snapshot_index + 1) % MAX_TOPOLOGY_SNAPSHOTS;
+
+ // Clear the new snapshot
+ struct pipe_topology_snapshot *current_snapshot = &dc->debug_data.topology_history.snapshots[dc->debug_data.topology_history.current_snapshot_index];
+ memset(current_snapshot, 0, sizeof(*current_snapshot));
+
+ // Set metadata
+ current_snapshot->timestamp_us = dm_get_timestamp(dc->ctx);
+ current_snapshot->stream_count = state->stream_count;
+ current_snapshot->phantom_stream_count = state->phantom_stream_count;
+}
+
enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
{
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
@@ -446,6 +480,14 @@ bool resource_construct(
DC_ERR("DC: failed to create stream_encoder!\n");
pool->stream_enc_count++;
}
+
+ for (i = 0; i < caps->num_analog_stream_encoder; i++) {
+ pool->stream_enc[caps->num_stream_encoder + i] =
+ create_funcs->create_stream_encoder(ENGINE_ID_DACA + i, ctx);
+ if (pool->stream_enc[caps->num_stream_encoder + i] == NULL)
+ DC_ERR("DC: failed to create analog stream_encoder %d!\n", i);
+ pool->stream_enc_count++;
+ }
}
pool->hpo_dp_stream_enc_count = 0;
@@ -2303,10 +2345,11 @@ bool resource_is_odm_topology_changed(const struct pipe_ctx *otg_master_a,
static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
int stream_idx, int slice_idx, int plane_idx, int slice_count,
- bool is_primary)
+ bool is_primary, bool is_phantom_pipe)
{
DC_LOGGER_INIT(dc->ctx->logger);
+ // new format for logging: bit storing code
if (slice_idx == 0 && plane_idx == 0 && is_primary) {
/* case 0 (OTG master pipe with plane) */
DC_LOG_DC(" | plane%d slice%d stream%d|",
@@ -2315,6 +2358,10 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
pipe->plane_res.dpp->inst,
pipe->stream_res.opp->inst,
pipe->stream_res.tg->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx == 0 && plane_idx == -1) {
/* case 1 (OTG master pipe without plane) */
DC_LOG_DC(" | slice%d stream%d|",
@@ -2323,6 +2370,10 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
pipe->stream_res.opp->inst,
pipe->stream_res.opp->inst,
pipe->stream_res.tg->inst);
+ capture_pipe_topology_data(dc, 0xF, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx != 0 && plane_idx == 0 && is_primary) {
/* case 2 (OPP head pipe with plane) */
DC_LOG_DC(" | plane%d slice%d | |",
@@ -2330,27 +2381,43 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
DC_LOG_DC(" |DPP%d----OPP%d----| |",
pipe->plane_res.dpp->inst,
pipe->stream_res.opp->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx != 0 && plane_idx == -1) {
/* case 3 (OPP head pipe without plane) */
DC_LOG_DC(" | slice%d | |", slice_idx);
DC_LOG_DC(" |DPG%d----OPP%d----| |",
pipe->plane_res.dpp->inst,
pipe->stream_res.opp->inst);
+ capture_pipe_topology_data(dc, 0xF, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx == slice_count - 1) {
/* case 4 (DPP pipe in last slice) */
DC_LOG_DC(" | plane%d | |", plane_idx);
DC_LOG_DC(" |DPP%d----| |",
pipe->plane_res.dpp->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else {
/* case 5 (DPP pipe not in last slice) */
DC_LOG_DC(" | plane%d | | |", plane_idx);
DC_LOG_DC(" |DPP%d----| | |",
pipe->plane_res.dpp->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
}
}
static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
- struct pipe_ctx *otg_master, int stream_idx)
+ struct pipe_ctx *otg_master, int stream_idx, bool is_phantom_pipe)
{
struct pipe_ctx *opp_heads[MAX_PIPES];
struct pipe_ctx *dpp_pipes[MAX_PIPES];
@@ -2376,12 +2443,12 @@ static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
resource_log_pipe(dc, dpp_pipes[dpp_idx],
stream_idx, slice_idx,
plane_idx, slice_count,
- is_primary);
+ is_primary, is_phantom_pipe);
}
} else {
resource_log_pipe(dc, opp_heads[slice_idx],
stream_idx, slice_idx, plane_idx,
- slice_count, true);
+ slice_count, true, is_phantom_pipe);
}
}
@@ -2412,6 +2479,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
struct pipe_ctx *otg_master;
int stream_idx, phantom_stream_idx;
DC_LOGGER_INIT(dc->ctx->logger);
+ bool is_phantom_pipe = false;
+
+ // Start a new snapshot for this topology update
+ start_new_topology_snapshot(dc, state);
DC_LOG_DC(" pipe topology update");
DC_LOG_DC(" ________________________");
@@ -2425,9 +2496,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
if (!otg_master)
continue;
- resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
+ resource_log_pipe_for_stream(dc, state, otg_master, stream_idx, is_phantom_pipe);
}
if (state->phantom_stream_count > 0) {
+ is_phantom_pipe = true;
DC_LOG_DC(" | (phantom pipes) |");
for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
if (state->stream_status[stream_idx].mall_stream_config.type != SUBVP_MAIN)
@@ -2440,7 +2512,7 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
if (!otg_master)
continue;
- resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
+ resource_log_pipe_for_stream(dc, state, otg_master, stream_idx, is_phantom_pipe);
}
}
DC_LOG_DC(" |________________________|\n");
@@ -2690,17 +2762,40 @@ static inline int find_fixed_dio_link_enc(const struct dc_link *link)
}
static inline int find_free_dio_link_enc(const struct resource_context *res_ctx,
- const struct dc_link *link, const struct resource_pool *pool)
+ const struct dc_link *link, const struct resource_pool *pool, struct dc_stream_state *stream)
{
- int i;
+ int i, j = -1;
+ int stream_enc_inst = -1;
int enc_count = pool->dig_link_enc_count;
- /* for dpia, check preferred encoder first and then the next one */
- for (i = 0; i < enc_count; i++)
- if (res_ctx->dio_link_enc_ref_cnts[(link->dpia_preferred_eng_id + i) % enc_count] == 0)
- break;
+ /* Find stream encoder instance for the stream */
+ if (stream) {
+ for (i = 0; i < pool->pipe_count; i++) {
+ if ((res_ctx->pipe_ctx[i].stream == stream) &&
+ (res_ctx->pipe_ctx[i].stream_res.stream_enc != NULL)) {
+ stream_enc_inst = res_ctx->pipe_ctx[i].stream_res.stream_enc->id;
+ break;
+ }
+ }
+ }
+
+ /* Assign dpia preferred > stream enc instance > available */
+ for (i = 0; i < enc_count; i++) {
+ if (res_ctx->dio_link_enc_ref_cnts[i] == 0) {
+ if (j == -1)
+ j = i;
+
+ if (link->dpia_preferred_eng_id == i) {
+ j = i;
+ break;
+ }
- return (i >= 0 && i < enc_count) ? (link->dpia_preferred_eng_id + i) % enc_count : -1;
+ if (stream_enc_inst == i) {
+ j = stream_enc_inst;
+ }
+ }
+ }
+ return j;
}
static inline void acquire_dio_link_enc(
@@ -2781,7 +2876,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc,
retain_dio_link_enc(res_ctx, enc_index);
} else {
if (stream->link->is_dig_mapping_flexible)
- enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool);
+ enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool, stream);
else {
int link_index = 0;
@@ -2791,7 +2886,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc,
* one into the acquiring link.
*/
if (enc_index >= 0 && is_dio_enc_acquired_by_other_link(stream->link, enc_index, &link_index)) {
- int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool);
+ int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool, stream);
if (new_enc_index >= 0)
swap_dio_link_enc_to_muxable_ctx(context, pool, new_enc_index, enc_index);
@@ -5201,7 +5296,7 @@ struct link_encoder *get_temp_dio_link_enc(
enc_index = link->eng_id;
if (enc_index < 0)
- enc_index = find_free_dio_link_enc(res_ctx, link, pool);
+ enc_index = find_free_dio_link_enc(res_ctx, link, pool, NULL);
if (enc_index >= 0)
link_enc = pool->link_encoders[enc_index];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index c61300a7cb1c..2de8ef4a58ec 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -35,8 +35,8 @@
#include "link_enc_cfg.h"
#if defined(CONFIG_DRM_AMD_DC_FP)
-#include "dml2/dml2_wrapper.h"
-#include "dml2/dml2_internal_types.h"
+#include "dml2_0/dml2_wrapper.h"
+#include "dml2_0/dml2_internal_types.h"
#endif
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 9ac2d41f8fca..129cd5f84983 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -224,6 +224,14 @@ struct dc_stream_status *dc_stream_get_status(
return dc_state_get_stream_status(dc->current_state, stream);
}
+const struct dc_stream_status *dc_stream_get_status_const(
+ const struct dc_stream_state *stream)
+{
+ struct dc *dc = stream->ctx->dc;
+
+ return dc_state_get_stream_status(dc->current_state, stream);
+}
+
void program_cursor_attributes(
struct dc *dc,
struct dc_stream_state *stream)
@@ -231,6 +239,7 @@ void program_cursor_attributes(
int i;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
+ bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
if (!stream)
return;
@@ -245,9 +254,14 @@ void program_cursor_attributes(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- dc->hwss.cursor_lock(dc, pipe_to_program, true);
- if (pipe_to_program->next_odm_pipe)
- dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true);
+
+ if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) {
+ dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
+ } else {
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
+ if (pipe_to_program->next_odm_pipe)
+ dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true);
+ }
}
dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -255,12 +269,18 @@ void program_cursor_attributes(
dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
if (dc->hwss.set_cursor_sdr_white_level)
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe)
+ dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx);
}
if (pipe_to_program) {
- dc->hwss.cursor_lock(dc, pipe_to_program, false);
- if (pipe_to_program->next_odm_pipe)
- dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false);
+ if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) {
+ dc->hwss.commit_cursor_offload_update(dc, pipe_to_program);
+ } else {
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
+ if (pipe_to_program->next_odm_pipe)
+ dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false);
+ }
}
}
@@ -366,6 +386,7 @@ void program_cursor_position(
int i;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
+ bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
if (!stream)
return;
@@ -384,16 +405,27 @@ void program_cursor_position(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- dc->hwss.cursor_lock(dc, pipe_to_program, true);
+
+ if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update)
+ dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
+ else
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
}
dc->hwss.set_cursor_position(pipe_ctx);
+ if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe)
+ dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx);
+
if (dc->ctx->dmub_srv)
dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
}
- if (pipe_to_program)
- dc->hwss.cursor_lock(dc, pipe_to_program, false);
+ if (pipe_to_program) {
+ if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update)
+ dc->hwss.commit_cursor_offload_update(dc, pipe_to_program);
+ else
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
+ }
}
bool dc_stream_set_cursor_position(
@@ -705,9 +737,14 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
{
uint8_t i;
bool ret = false;
- struct dc *dc = stream->ctx->dc;
- struct resource_context *res_ctx =
- &dc->current_state->res_ctx;
+ struct dc *dc;
+ struct resource_context *res_ctx;
+
+ if (!stream->ctx)
+ return false;
+
+ dc = stream->ctx->dc;
+ res_ctx = &dc->current_state->res_ctx;
dc_exit_ips_for_hw_access(dc);
@@ -855,9 +892,11 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
DC_LOG_DC(
- "\tdispname: %s signal: %x\n",
+ "\tsignal: %x dispname: %s manufacturer_id: 0x%x product_id: 0x%x\n",
+ stream->signal,
stream->sink->edid_caps.display_name,
- stream->signal);
+ stream->sink->edid_caps.manufacturer_id,
+ stream->sink->edid_caps.product_id);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 98f0b6b3c213..29edfa51ea2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -42,7 +42,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "dmub/inc/dmub_cmd.h"
@@ -54,8 +54,16 @@ struct abm_save_restore;
struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
+struct dcn_hubbub_reg_state;
+struct dcn_hubp_reg_state;
+struct dcn_dpp_reg_state;
+struct dcn_mpc_reg_state;
+struct dcn_opp_reg_state;
+struct dcn_dsc_reg_state;
+struct dcn_optc_reg_state;
+struct dcn_dccg_reg_state;
-#define DC_VER "3.2.351"
+#define DC_VER "3.2.359"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -278,6 +286,15 @@ struct dc_scl_caps {
bool sharpener_support;
};
+struct dc_check_config {
+ /**
+ * max video plane width that can be safely assumed to be always
+ * supported by single DPP pipe.
+ */
+ unsigned int max_optimizable_video_width;
+ bool enable_legacy_fast_update;
+};
+
struct dc_caps {
uint32_t max_streams;
uint32_t max_links;
@@ -293,11 +310,6 @@ struct dc_caps {
unsigned int max_cursor_size;
unsigned int max_buffered_cursor_size;
unsigned int max_video_width;
- /*
- * max video plane width that can be safely assumed to be always
- * supported by single DPP pipe.
- */
- unsigned int max_optimizable_video_width;
unsigned int min_horizontal_blanking_period;
int linear_pitch_alignment;
bool dcc_const_color;
@@ -455,6 +467,18 @@ enum surface_update_type {
UPDATE_TYPE_FULL, /* may need to shuffle resources */
};
+enum dc_lock_descriptor {
+ LOCK_DESCRIPTOR_NONE = 0x0,
+ LOCK_DESCRIPTOR_STREAM = 0x1,
+ LOCK_DESCRIPTOR_LINK = 0x2,
+ LOCK_DESCRIPTOR_GLOBAL = 0x4,
+};
+
+struct surface_update_descriptor {
+ enum surface_update_type update_type;
+ enum dc_lock_descriptor lock_descriptor;
+};
+
/* Forward declaration*/
struct dc;
struct dc_plane_state;
@@ -530,6 +554,7 @@ struct dc_config {
bool set_pipe_unlock_order;
bool enable_dpia_pre_training;
bool unify_link_enc_assignment;
+ bool enable_cursor_offload;
struct spl_sharpness_range dcn_sharpness_range;
struct spl_sharpness_range dcn_override_sharpness_range;
};
@@ -849,8 +874,7 @@ union dpia_debug_options {
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
uint32_t disable_usb4_pm_support:1; /* bit 5 */
uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */
- uint32_t enable_bw_allocation_mode:1; /* bit 7 */
- uint32_t reserved:24;
+ uint32_t reserved:25;
} bits;
uint32_t raw;
};
@@ -875,6 +899,7 @@ struct dc_debug_data {
uint32_t ltFailCount;
uint32_t i2cErrorCount;
uint32_t auxErrorCount;
+ struct pipe_topology_history topology_history;
};
struct dc_phy_addr_space_config {
@@ -1120,7 +1145,6 @@ struct dc_debug_options {
uint32_t fpo_vactive_min_active_margin_us;
uint32_t fpo_vactive_max_blank_us;
bool enable_hpo_pg_support;
- bool enable_legacy_fast_update;
bool disable_dc_mode_overwrite;
bool replay_skip_crtc_disabled;
bool ignore_pg;/*do nothing, let pmfw control it*/
@@ -1152,7 +1176,6 @@ struct dc_debug_options {
bool enable_ips_visual_confirm;
unsigned int sharpen_policy;
unsigned int scale_to_sharpness_policy;
- bool skip_full_updated_if_possible;
unsigned int enable_oled_edp_power_up_opt;
bool enable_hblank_borrow;
bool force_subvp_df_throttle;
@@ -1164,6 +1187,7 @@ struct dc_debug_options {
unsigned int auxless_alpm_lfps_t1t2_us;
short auxless_alpm_lfps_t1t2_offset_us;
bool disable_stutter_for_wm_program;
+ bool enable_block_sequence_programming;
};
@@ -1702,6 +1726,7 @@ struct dc {
struct dc_debug_options debug;
struct dc_versions versions;
struct dc_caps caps;
+ struct dc_check_config check_config;
struct dc_cap_funcs cap_funcs;
struct dc_config config;
struct dc_bounding_box_overrides bb_overrides;
@@ -1830,20 +1855,26 @@ struct dc_surface_update {
};
struct dc_underflow_debug_data {
- uint32_t otg_inst;
- uint32_t otg_underflow;
- uint32_t h_position;
- uint32_t v_position;
- uint32_t otg_frame_count;
- struct dc_underflow_per_hubp_debug_data {
- uint32_t hubp_underflow;
- uint32_t hubp_in_blank;
- uint32_t hubp_readline;
- uint32_t det_config_error;
- } hubps[MAX_PIPES];
- uint32_t curr_det_sizes[MAX_PIPES];
- uint32_t target_det_sizes[MAX_PIPES];
- uint32_t compbuf_config_error;
+ struct dcn_hubbub_reg_state *hubbub_reg_state;
+ struct dcn_hubp_reg_state *hubp_reg_state[MAX_PIPES];
+ struct dcn_dpp_reg_state *dpp_reg_state[MAX_PIPES];
+ struct dcn_mpc_reg_state *mpc_reg_state[MAX_PIPES];
+ struct dcn_opp_reg_state *opp_reg_state[MAX_PIPES];
+ struct dcn_dsc_reg_state *dsc_reg_state[MAX_PIPES];
+ struct dcn_optc_reg_state *optc_reg_state[MAX_PIPES];
+ struct dcn_dccg_reg_state *dccg_reg_state[MAX_PIPES];
+};
+
+struct power_features {
+ bool ips;
+ bool rcg;
+ bool replay;
+ bool dds;
+ bool sprs;
+ bool psr;
+ bool fams;
+ bool mpo;
+ bool uclk_p_state;
};
/*
@@ -2688,6 +2719,13 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
uint32_t link_index,
struct aux_payload *payload);
+/*
+ * smart power OLED Interfaces
+ */
+bool dc_smart_power_oled_enable(const struct dc_link *link, bool enable, uint16_t peak_nits,
+ uint8_t debug_control, uint16_t fixed_CLL, uint32_t triggerline);
+bool dc_smart_power_oled_get_max_cll(const struct dc_link *link, unsigned int *pCurrent_MaxCLL);
+
/* Get dc link index from dpia port index */
uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
uint8_t dpia_port_index);
@@ -2721,6 +2759,8 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index);
+void dc_log_preos_dmcub_info(const struct dc *dc);
+
/* DSC Interfaces */
#include "dc_dsc.h"
@@ -2736,7 +2776,7 @@ bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream);
bool dc_is_cursor_limit_pending(struct dc *dc);
-bool dc_can_clear_cursor_limit(struct dc *dc);
+bool dc_can_clear_cursor_limit(const struct dc *dc);
/**
* dc_get_underflow_debug_data_for_otg() - Retrieve underflow debug data.
@@ -2751,4 +2791,493 @@ bool dc_can_clear_cursor_limit(struct dc *dc);
*/
void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, struct dc_underflow_debug_data *out_data);
+void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst, struct power_features *out_data);
+
+/**
+ * Software state variables used to program register fields across the display pipeline
+ */
+struct dc_register_software_state {
+ /* HUBP register programming variables for each pipe */
+ struct {
+ bool valid_plane_state;
+ bool valid_stream;
+ bool min_dc_gfx_version9;
+ uint32_t vtg_sel; /* DCHUBP_CNTL->HUBP_VTG_SEL from pipe_ctx->stream_res.tg->inst */
+ uint32_t hubp_clock_enable; /* HUBP_CLK_CNTL->HUBP_CLOCK_ENABLE from power management */
+ uint32_t surface_pixel_format; /* DCSURF_SURFACE_CONFIG->SURFACE_PIXEL_FORMAT from plane_state->format */
+ uint32_t rotation_angle; /* DCSURF_SURFACE_CONFIG->ROTATION_ANGLE from plane_state->rotation */
+ uint32_t h_mirror_en; /* DCSURF_SURFACE_CONFIG->H_MIRROR_EN from plane_state->horizontal_mirror */
+ uint32_t surface_dcc_en; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_EN from dcc->enable */
+ uint32_t surface_size_width; /* HUBP_SIZE->SURFACE_SIZE_WIDTH from plane_size.surface_size.width */
+ uint32_t surface_size_height; /* HUBP_SIZE->SURFACE_SIZE_HEIGHT from plane_size.surface_size.height */
+ uint32_t pri_viewport_width; /* DCSURF_PRI_VIEWPORT_DIMENSION->PRI_VIEWPORT_WIDTH from scaler_data.viewport.width */
+ uint32_t pri_viewport_height; /* DCSURF_PRI_VIEWPORT_DIMENSION->PRI_VIEWPORT_HEIGHT from scaler_data.viewport.height */
+ uint32_t pri_viewport_x_start; /* DCSURF_PRI_VIEWPORT_START->PRI_VIEWPORT_X_START from scaler_data.viewport.x */
+ uint32_t pri_viewport_y_start; /* DCSURF_PRI_VIEWPORT_START->PRI_VIEWPORT_Y_START from scaler_data.viewport.y */
+ uint32_t cursor_enable; /* CURSOR_CONTROL->CURSOR_ENABLE from cursor_attributes.enable */
+ uint32_t cursor_width; /* CURSOR_SETTINGS->CURSOR_WIDTH from cursor_position.width */
+ uint32_t cursor_height; /* CURSOR_SETTINGS->CURSOR_HEIGHT from cursor_position.height */
+
+ /* Additional DCC configuration */
+ uint32_t surface_dcc_ind_64b_blk; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_IND_64B_BLK from dcc.independent_64b_blks */
+ uint32_t surface_dcc_ind_128b_blk; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_IND_128B_BLK from dcc.independent_128b_blks */
+
+ /* Surface pitch configuration */
+ uint32_t surface_pitch; /* DCSURF_SURFACE_PITCH->PITCH from plane_size.surface_pitch */
+ uint32_t meta_pitch; /* DCSURF_SURFACE_PITCH->META_PITCH from dcc.meta_pitch */
+ uint32_t chroma_pitch; /* DCSURF_SURFACE_PITCH_C->PITCH_C from plane_size.chroma_pitch */
+ uint32_t meta_pitch_c; /* DCSURF_SURFACE_PITCH_C->META_PITCH_C from dcc.meta_pitch_c */
+
+ /* Surface addresses */
+ uint32_t primary_surface_address_low; /* DCSURF_PRIMARY_SURFACE_ADDRESS->PRIMARY_SURFACE_ADDRESS from address.grph.addr.low_part */
+ uint32_t primary_surface_address_high; /* DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH->PRIMARY_SURFACE_ADDRESS_HIGH from address.grph.addr.high_part */
+ uint32_t primary_meta_surface_address_low; /* DCSURF_PRIMARY_META_SURFACE_ADDRESS->PRIMARY_META_SURFACE_ADDRESS from address.grph.meta_addr.low_part */
+ uint32_t primary_meta_surface_address_high; /* DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH->PRIMARY_META_SURFACE_ADDRESS_HIGH from address.grph.meta_addr.high_part */
+
+ /* TMZ configuration */
+ uint32_t primary_surface_tmz; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_TMZ from address.tmz_surface */
+ uint32_t primary_meta_surface_tmz; /* DCSURF_SURFACE_CONTROL->PRIMARY_META_SURFACE_TMZ from address.tmz_surface */
+
+ /* Tiling configuration */
+ uint32_t sw_mode; /* DCSURF_TILING_CONFIG->SW_MODE from tiling_info.gfx9.swizzle */
+ uint32_t num_pipes; /* DCSURF_ADDR_CONFIG->NUM_PIPES from tiling_info.gfx9.num_pipes */
+ uint32_t num_banks; /* DCSURF_ADDR_CONFIG->NUM_BANKS from tiling_info.gfx9.num_banks */
+ uint32_t pipe_interleave; /* DCSURF_ADDR_CONFIG->PIPE_INTERLEAVE from tiling_info.gfx9.pipe_interleave */
+ uint32_t num_shader_engines; /* DCSURF_ADDR_CONFIG->NUM_SE from tiling_info.gfx9.num_shader_engines */
+ uint32_t num_rb_per_se; /* DCSURF_ADDR_CONFIG->NUM_RB_PER_SE from tiling_info.gfx9.num_rb_per_se */
+ uint32_t num_pkrs; /* DCSURF_ADDR_CONFIG->NUM_PKRS from tiling_info.gfx9.num_pkrs */
+
+ /* DML Request Size Configuration - Luma */
+ uint32_t rq_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->CHUNK_SIZE from rq_regs.rq_regs_l.chunk_size */
+ uint32_t rq_min_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->MIN_CHUNK_SIZE from rq_regs.rq_regs_l.min_chunk_size */
+ uint32_t rq_meta_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->META_CHUNK_SIZE from rq_regs.rq_regs_l.meta_chunk_size */
+ uint32_t rq_min_meta_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->MIN_META_CHUNK_SIZE from rq_regs.rq_regs_l.min_meta_chunk_size */
+ uint32_t rq_dpte_group_size; /* DCHUBP_REQ_SIZE_CONFIG->DPTE_GROUP_SIZE from rq_regs.rq_regs_l.dpte_group_size */
+ uint32_t rq_mpte_group_size; /* DCHUBP_REQ_SIZE_CONFIG->MPTE_GROUP_SIZE from rq_regs.rq_regs_l.mpte_group_size */
+ uint32_t rq_swath_height_l; /* DCHUBP_REQ_SIZE_CONFIG->SWATH_HEIGHT_L from rq_regs.rq_regs_l.swath_height */
+ uint32_t rq_pte_row_height_l; /* DCHUBP_REQ_SIZE_CONFIG->PTE_ROW_HEIGHT_L from rq_regs.rq_regs_l.pte_row_height */
+
+ /* DML Request Size Configuration - Chroma */
+ uint32_t rq_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->CHUNK_SIZE_C from rq_regs.rq_regs_c.chunk_size */
+ uint32_t rq_min_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MIN_CHUNK_SIZE_C from rq_regs.rq_regs_c.min_chunk_size */
+ uint32_t rq_meta_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->META_CHUNK_SIZE_C from rq_regs.rq_regs_c.meta_chunk_size */
+ uint32_t rq_min_meta_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MIN_META_CHUNK_SIZE_C from rq_regs.rq_regs_c.min_meta_chunk_size */
+ uint32_t rq_dpte_group_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->DPTE_GROUP_SIZE_C from rq_regs.rq_regs_c.dpte_group_size */
+ uint32_t rq_mpte_group_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MPTE_GROUP_SIZE_C from rq_regs.rq_regs_c.mpte_group_size */
+ uint32_t rq_swath_height_c; /* DCHUBP_REQ_SIZE_CONFIG_C->SWATH_HEIGHT_C from rq_regs.rq_regs_c.swath_height */
+ uint32_t rq_pte_row_height_c; /* DCHUBP_REQ_SIZE_CONFIG_C->PTE_ROW_HEIGHT_C from rq_regs.rq_regs_c.pte_row_height */
+
+ /* DML Expansion Modes */
+ uint32_t drq_expansion_mode; /* DCN_EXPANSION_MODE->DRQ_EXPANSION_MODE from rq_regs.drq_expansion_mode */
+ uint32_t prq_expansion_mode; /* DCN_EXPANSION_MODE->PRQ_EXPANSION_MODE from rq_regs.prq_expansion_mode */
+ uint32_t mrq_expansion_mode; /* DCN_EXPANSION_MODE->MRQ_EXPANSION_MODE from rq_regs.mrq_expansion_mode */
+ uint32_t crq_expansion_mode; /* DCN_EXPANSION_MODE->CRQ_EXPANSION_MODE from rq_regs.crq_expansion_mode */
+
+ /* DML DLG parameters - nominal */
+ uint32_t dst_y_per_vm_vblank; /* NOM_PARAMETERS_0->DST_Y_PER_VM_VBLANK from dlg_regs.dst_y_per_vm_vblank */
+ uint32_t dst_y_per_row_vblank; /* NOM_PARAMETERS_0->DST_Y_PER_ROW_VBLANK from dlg_regs.dst_y_per_row_vblank */
+ uint32_t dst_y_per_vm_flip; /* NOM_PARAMETERS_1->DST_Y_PER_VM_FLIP from dlg_regs.dst_y_per_vm_flip */
+ uint32_t dst_y_per_row_flip; /* NOM_PARAMETERS_1->DST_Y_PER_ROW_FLIP from dlg_regs.dst_y_per_row_flip */
+
+ /* DML prefetch settings */
+ uint32_t dst_y_prefetch; /* PREFETCH_SETTINS->DST_Y_PREFETCH from dlg_regs.dst_y_prefetch */
+ uint32_t vratio_prefetch; /* PREFETCH_SETTINS->VRATIO_PREFETCH from dlg_regs.vratio_prefetch */
+ uint32_t vratio_prefetch_c; /* PREFETCH_SETTINS_C->VRATIO_PREFETCH_C from dlg_regs.vratio_prefetch_c */
+
+ /* TTU parameters */
+ uint32_t qos_level_low_wm; /* TTU_CNTL1->QoSLevelLowWaterMark from ttu_regs.qos_level_low_wm */
+ uint32_t qos_level_high_wm; /* TTU_CNTL1->QoSLevelHighWaterMark from ttu_regs.qos_level_high_wm */
+ uint32_t qos_level_flip; /* TTU_CNTL2->QoS_LEVEL_FLIP_L from ttu_regs.qos_level_flip */
+ uint32_t min_ttu_vblank; /* DCN_GLOBAL_TTU_CNTL->MIN_TTU_VBLANK from ttu_regs.min_ttu_vblank */
+ } hubp[MAX_PIPES];
+
+ /* HUBBUB register programming variables */
+ struct {
+ /* Individual DET buffer control per pipe - software state that programs DET registers */
+ uint32_t det0_size; /* DCHUBBUB_DET0_CTRL->DET0_SIZE from hubbub->funcs->program_det_size(hubbub, 0, det_buffer_size_kb) */
+ uint32_t det1_size; /* DCHUBBUB_DET1_CTRL->DET1_SIZE from hubbub->funcs->program_det_size(hubbub, 1, det_buffer_size_kb) */
+ uint32_t det2_size; /* DCHUBBUB_DET2_CTRL->DET2_SIZE from hubbub->funcs->program_det_size(hubbub, 2, det_buffer_size_kb) */
+ uint32_t det3_size; /* DCHUBBUB_DET3_CTRL->DET3_SIZE from hubbub->funcs->program_det_size(hubbub, 3, det_buffer_size_kb) */
+
+ /* Compression buffer control - software state that programs COMPBUF registers */
+ uint32_t compbuf_size; /* DCHUBBUB_COMPBUF_CTRL->COMPBUF_SIZE from hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, safe_to_increase) */
+ uint32_t compbuf_reserved_space_64b; /* COMPBUF_RESERVED_SPACE->COMPBUF_RESERVED_SPACE_64B from hubbub2->pixel_chunk_size / 32 */
+ uint32_t compbuf_reserved_space_zs; /* COMPBUF_RESERVED_SPACE->COMPBUF_RESERVED_SPACE_ZS from hubbub2->pixel_chunk_size / 128 */
+ } hubbub;
+
+ /* DPP register programming variables for each pipe (simplified for available fields) */
+ struct {
+ uint32_t dpp_clock_enable; /* DPP_CONTROL->DPP_CLOCK_ENABLE from dppclk_enable */
+
+ /* Recout (Rectangle of Interest) configuration */
+ uint32_t recout_start_x; /* RECOUT_START->RECOUT_START_X from pipe_ctx->plane_res.scl_data.recout.x */
+ uint32_t recout_start_y; /* RECOUT_START->RECOUT_START_Y from pipe_ctx->plane_res.scl_data.recout.y */
+ uint32_t recout_width; /* RECOUT_SIZE->RECOUT_WIDTH from pipe_ctx->plane_res.scl_data.recout.width */
+ uint32_t recout_height; /* RECOUT_SIZE->RECOUT_HEIGHT from pipe_ctx->plane_res.scl_data.recout.height */
+
+ /* MPC (Multiple Pipe/Plane Combiner) size configuration */
+ uint32_t mpc_width; /* MPC_SIZE->MPC_WIDTH from pipe_ctx->plane_res.scl_data.h_active */
+ uint32_t mpc_height; /* MPC_SIZE->MPC_HEIGHT from pipe_ctx->plane_res.scl_data.v_active */
+
+ /* DSCL mode configuration */
+ uint32_t dscl_mode; /* SCL_MODE->DSCL_MODE from pipe_ctx->plane_res.scl_data.dscl_prog_data.dscl_mode */
+
+ /* Scaler ratios (simplified to integer parts) */
+ uint32_t horz_ratio_int; /* SCL_HORZ_FILTER_SCALE_RATIO->SCL_H_SCALE_RATIO integer part from ratios.horz */
+ uint32_t vert_ratio_int; /* SCL_VERT_FILTER_SCALE_RATIO->SCL_V_SCALE_RATIO integer part from ratios.vert */
+
+ /* Basic scaler taps */
+ uint32_t h_taps; /* SCL_TAP_CONTROL->SCL_H_NUM_TAPS from taps.h_taps */
+ uint32_t v_taps; /* SCL_TAP_CONTROL->SCL_V_NUM_TAPS from taps.v_taps */
+ } dpp[MAX_PIPES];
+
+ /* DCCG register programming variables */
+ struct {
+ /* Core Display Clock Control */
+ uint32_t dispclk_khz; /* DENTIST_DISPCLK_CNTL->DENTIST_DISPCLK_WDIVIDER from clk_mgr.dispclk_khz */
+ uint32_t dc_mem_global_pwr_req_dis; /* DC_MEM_GLOBAL_PWR_REQ_CNTL->DC_MEM_GLOBAL_PWR_REQ_DIS from memory power management settings */
+
+ /* DPP Clock Control - 4 fields per pipe */
+ uint32_t dppclk_khz[MAX_PIPES]; /* DPPCLK_CTRL->DPPCLK_R_GATE_DISABLE from dpp_clocks[pipe] */
+ uint32_t dppclk_enable[MAX_PIPES]; /* DPPCLK_CTRL->DPPCLK0_EN,DPPCLK1_EN,DPPCLK2_EN,DPPCLK3_EN from dccg31_update_dpp_dto() */
+ uint32_t dppclk_dto_enable[MAX_PIPES]; /* DPPCLK_DTO_CTRL->DPPCLK_DTO_ENABLE from dccg->dpp_clock_gated[dpp_inst] state */
+ uint32_t dppclk_dto_phase[MAX_PIPES]; /* DPPCLK0_DTO_PARAM->DPPCLK0_DTO_PHASE from phase calculation req_dppclk/ref_dppclk */
+ uint32_t dppclk_dto_modulo[MAX_PIPES]; /* DPPCLK0_DTO_PARAM->DPPCLK0_DTO_MODULO from modulo = 0xff */
+
+ /* DSC Clock Control - 4 fields per DSC resource */
+ uint32_t dscclk_khz[MAX_PIPES]; /* DSCCLK_DTO_CTRL->DSCCLK_DTO_ENABLE from dsc_clocks */
+ uint32_t dscclk_dto_enable[MAX_PIPES]; /* DSCCLK_DTO_CTRL->DSCCLK0_DTO_ENABLE,DSCCLK1_DTO_ENABLE,DSCCLK2_DTO_ENABLE,DSCCLK3_DTO_ENABLE */
+ uint32_t dscclk_dto_phase[MAX_PIPES]; /* DSCCLK0_DTO_PARAM->DSCCLK0_DTO_PHASE from dccg31_enable_dscclk() */
+ uint32_t dscclk_dto_modulo[MAX_PIPES]; /* DSCCLK0_DTO_PARAM->DSCCLK0_DTO_MODULO from dccg31_enable_dscclk() */
+
+ /* Pixel Clock Control - per pipe */
+ uint32_t pixclk_khz[MAX_PIPES]; /* PIXCLK_RESYNC_CNTL->PIXCLK_RESYNC_ENABLE from stream.timing.pix_clk_100hz */
+ uint32_t otg_pixel_rate_div[MAX_PIPES]; /* OTG_PIXEL_RATE_DIV->OTG_PIXEL_RATE_DIV from OTG pixel rate divider control */
+ uint32_t dtbclk_dto_enable[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->DTBCLK_DTO_ENABLE from dccg31_set_dtbclk_dto() */
+ uint32_t pipe_dto_src_sel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->PIPE_DTO_SRC_SEL from dccg31_set_dtbclk_dto() source selection */
+ uint32_t dtbclk_dto_div[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->DTBCLK_DTO_DIV from dtbdto_div calculation */
+ uint32_t otg_add_pixel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->OTG_ADD_PIXEL from dccg31_otg_add_pixel() */
+ uint32_t otg_drop_pixel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->OTG_DROP_PIXEL from dccg31_otg_drop_pixel() */
+
+ /* DTBCLK DTO Control - 4 DTOs */
+ uint32_t dtbclk_dto_modulo[4]; /* DTBCLK_DTO0_MODULO->DTBCLK_DTO0_MODULO from dccg31_set_dtbclk_dto() modulo calculation */
+ uint32_t dtbclk_dto_phase[4]; /* DTBCLK_DTO0_PHASE->DTBCLK_DTO0_PHASE from phase calculation pixclk_khz/ref_dtbclk_khz */
+ uint32_t dtbclk_dto_dbuf_en; /* DTBCLK_DTO_DBUF_EN->DTBCLK DTO data buffer enable */
+
+ /* DP Stream Clock Control - 4 pipes */
+ uint32_t dpstreamclk_enable[MAX_PIPES]; /* DPSTREAMCLK_CNTL->DPSTREAMCLK_PIPE0_EN,DPSTREAMCLK_PIPE1_EN,DPSTREAMCLK_PIPE2_EN,DPSTREAMCLK_PIPE3_EN */
+ uint32_t dp_dto_modulo[4]; /* DP_DTO0_MODULO->DP_DTO0_MODULO from DP stream DTO programming */
+ uint32_t dp_dto_phase[4]; /* DP_DTO0_PHASE->DP_DTO0_PHASE from DP stream DTO programming */
+ uint32_t dp_dto_dbuf_en; /* DP_DTO_DBUF_EN->DP DTO data buffer enable */
+
+ /* PHY Symbol Clock Control - 5 PHYs (A,B,C,D,E) */
+ uint32_t phy_symclk_force_en[5]; /* PHYASYMCLK_CLOCK_CNTL->PHYASYMCLK_FORCE_EN from dccg31_set_physymclk() force_enable */
+ uint32_t phy_symclk_force_src_sel[5]; /* PHYASYMCLK_CLOCK_CNTL->PHYASYMCLK_FORCE_SRC_SEL from dccg31_set_physymclk() clk_src */
+ uint32_t phy_symclk_gate_disable[5]; /* DCCG_GATE_DISABLE_CNTL2->PHYASYMCLK_GATE_DISABLE from debug.root_clock_optimization.bits.physymclk */
+
+ /* SYMCLK32 SE Control - 4 instances */
+ uint32_t symclk32_se_src_sel[4]; /* SYMCLK32_SE_CNTL->SYMCLK32_SE0_SRC_SEL from dccg31_enable_symclk32_se() with get_phy_mux_symclk() mapping */
+ uint32_t symclk32_se_enable[4]; /* SYMCLK32_SE_CNTL->SYMCLK32_SE0_EN from dccg31_enable_symclk32_se() enable */
+ uint32_t symclk32_se_gate_disable[4]; /* DCCG_GATE_DISABLE_CNTL3->SYMCLK32_SE0_GATE_DISABLE from debug.root_clock_optimization.bits.symclk32_se */
+
+ /* SYMCLK32 LE Control - 2 instances */
+ uint32_t symclk32_le_src_sel[2]; /* SYMCLK32_LE_CNTL->SYMCLK32_LE0_SRC_SEL from dccg31_enable_symclk32_le() phyd32clk source */
+ uint32_t symclk32_le_enable[2]; /* SYMCLK32_LE_CNTL->SYMCLK32_LE0_EN from dccg31_enable_symclk32_le() enable */
+ uint32_t symclk32_le_gate_disable[2]; /* DCCG_GATE_DISABLE_CNTL3->SYMCLK32_LE0_GATE_DISABLE from debug.root_clock_optimization.bits.symclk32_le */
+
+ /* DPIA Clock Control */
+ uint32_t dpiaclk_540m_dto_modulo; /* DPIACLK_540M_DTO_MODULO->DPIA 540MHz DTO modulo */
+ uint32_t dpiaclk_540m_dto_phase; /* DPIACLK_540M_DTO_PHASE->DPIA 540MHz DTO phase */
+ uint32_t dpiaclk_810m_dto_modulo; /* DPIACLK_810M_DTO_MODULO->DPIA 810MHz DTO modulo */
+ uint32_t dpiaclk_810m_dto_phase; /* DPIACLK_810M_DTO_PHASE->DPIA 810MHz DTO phase */
+ uint32_t dpiaclk_dto_cntl; /* DPIACLK_DTO_CNTL->DPIA clock DTO control */
+ uint32_t dpiasymclk_cntl; /* DPIASYMCLK_CNTL->DPIA symbol clock control */
+
+ /* Clock Gating Control */
+ uint32_t dccg_gate_disable_cntl; /* DCCG_GATE_DISABLE_CNTL->Clock gate disable control from dccg31_init() */
+ uint32_t dpstreamclk_gate_disable; /* DCCG_GATE_DISABLE_CNTL3->DPSTREAMCLK_GATE_DISABLE from debug.root_clock_optimization.bits.dpstream */
+ uint32_t dpstreamclk_root_gate_disable; /* DCCG_GATE_DISABLE_CNTL3->DPSTREAMCLK_ROOT_GATE_DISABLE from debug.root_clock_optimization.bits.dpstream */
+
+ /* VSync Control */
+ uint32_t vsync_cnt_ctrl; /* DCCG_VSYNC_CNT_CTRL->VSync counter control */
+ uint32_t vsync_cnt_int_ctrl; /* DCCG_VSYNC_CNT_INT_CTRL->VSync counter interrupt control */
+ uint32_t vsync_otg_latch_value[6]; /* DCCG_VSYNC_OTG0_LATCH_VALUE->OTG0 VSync latch value (for OTG0-5) */
+
+ /* Time Base Control */
+ uint32_t microsecond_time_base_div; /* MICROSECOND_TIME_BASE_DIV->Microsecond time base divider */
+ uint32_t millisecond_time_base_div; /* MILLISECOND_TIME_BASE_DIV->Millisecond time base divider */
+ } dccg;
+
+ /* DSC essential configuration for underflow analysis */
+ struct {
+ /* DSC active state - critical for bandwidth analysis */
+ uint32_t dsc_clock_enable; /* DSC enabled - affects bandwidth requirements */
+
+ /* DSC configuration affecting bandwidth and timing */
+ uint32_t dsc_num_slices_h; /* Horizontal slice count - affects throughput */
+ uint32_t dsc_num_slices_v; /* Vertical slice count - affects throughput */
+ uint32_t dsc_bits_per_pixel; /* Compression ratio - affects bandwidth */
+
+ /* OPP integration - affects pipeline flow */
+ uint32_t dscrm_dsc_forward_enable; /* DSC forwarding to OPP enabled */
+ uint32_t dscrm_dsc_opp_pipe_source; /* Which OPP receives DSC output */
+ } dsc[MAX_PIPES];
+
+ /* MPC register programming variables */
+ struct {
+ /* MPCC blending tree and mode control */
+ uint32_t mpcc_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_MODE from blend_cfg.blend_mode */
+ uint32_t mpcc_alpha_blend_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_ALPHA_BLND_MODE from blend_cfg.alpha_mode */
+ uint32_t mpcc_alpha_multiplied_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_ALPHA_MULTIPLIED_MODE from blend_cfg.pre_multiplied_alpha */
+ uint32_t mpcc_blnd_active_overlap_only[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BLND_ACTIVE_OVERLAP_ONLY from blend_cfg.overlap_only */
+ uint32_t mpcc_global_alpha[MAX_PIPES]; /* MPCC_CONTROL->MPCC_GLOBAL_ALPHA from blend_cfg.global_alpha */
+ uint32_t mpcc_global_gain[MAX_PIPES]; /* MPCC_CONTROL->MPCC_GLOBAL_GAIN from blend_cfg.global_gain */
+ uint32_t mpcc_bg_bpc[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BG_BPC from background color depth */
+ uint32_t mpcc_bot_gain_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BOT_GAIN_MODE from bottom layer gain control */
+
+ /* MPCC blending tree connections */
+ uint32_t mpcc_bot_sel[MAX_PIPES]; /* MPCC_BOT_SEL->MPCC_BOT_SEL from mpcc_state->bot_sel */
+ uint32_t mpcc_top_sel[MAX_PIPES]; /* MPCC_TOP_SEL->MPCC_TOP_SEL from mpcc_state->dpp_id */
+
+ /* MPCC output gamma control */
+ uint32_t mpcc_ogam_mode[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_MODE from output gamma mode */
+ uint32_t mpcc_ogam_select[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_SELECT from gamma LUT bank selection */
+ uint32_t mpcc_ogam_pwl_disable[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_PWL_DISABLE from PWL control */
+
+ /* MPCC pipe assignment and status */
+ uint32_t mpcc_opp_id[MAX_PIPES]; /* MPCC_OPP_ID->MPCC_OPP_ID from mpcc_state->opp_id */
+ uint32_t mpcc_idle[MAX_PIPES]; /* MPCC_STATUS->MPCC_IDLE from mpcc idle status */
+ uint32_t mpcc_busy[MAX_PIPES]; /* MPCC_STATUS->MPCC_BUSY from mpcc busy status */
+
+ /* MPC output processing */
+ uint32_t mpc_out_csc_mode; /* MPC_OUT_CSC_COEF->MPC_OUT_CSC_MODE from output_csc */
+ uint32_t mpc_out_gamma_mode; /* MPC_OUT_GAMMA_LUT->MPC_OUT_GAMMA_MODE from output_gamma */
+ } mpc;
+
+ /* OPP register programming variables for each pipe */
+ struct {
+ /* Display Pattern Generator (DPG) Control - 19 fields from DPG_CONTROL register */
+ uint32_t dpg_enable; /* DPG_CONTROL->DPG_EN from test_pattern parameter (enable/disable) */
+
+ /* Format Control (FMT) - 18 fields from FMT_CONTROL register */
+ uint32_t fmt_pixel_encoding; /* FMT_CONTROL->FMT_PIXEL_ENCODING from clamping->pixel_encoding */
+ uint32_t fmt_subsampling_mode; /* FMT_CONTROL->FMT_SUBSAMPLING_MODE from force_chroma_subsampling_1tap */
+ uint32_t fmt_cbcr_bit_reduction_bypass; /* FMT_CONTROL->FMT_CBCR_BIT_REDUCTION_BYPASS from pixel_encoding bypass control */
+ uint32_t fmt_stereosync_override; /* FMT_CONTROL->FMT_STEREOSYNC_OVERRIDE from stereo timing override */
+ uint32_t fmt_spatial_dither_frame_counter_max; /* FMT_CONTROL->FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX from fmt_bit_depth->flags */
+ uint32_t fmt_spatial_dither_frame_counter_bit_swap; /* FMT_CONTROL->FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP from dither control */
+ uint32_t fmt_truncate_enable; /* FMT_CONTROL->FMT_TRUNCATE_EN from fmt_bit_depth->flags.TRUNCATE_ENABLED */
+ uint32_t fmt_truncate_depth; /* FMT_CONTROL->FMT_TRUNCATE_DEPTH from fmt_bit_depth->flags.TRUNCATE_DEPTH */
+ uint32_t fmt_truncate_mode; /* FMT_CONTROL->FMT_TRUNCATE_MODE from fmt_bit_depth->flags.TRUNCATE_MODE */
+ uint32_t fmt_spatial_dither_enable; /* FMT_CONTROL->FMT_SPATIAL_DITHER_EN from fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED */
+ uint32_t fmt_spatial_dither_mode; /* FMT_CONTROL->FMT_SPATIAL_DITHER_MODE from fmt_bit_depth->flags.SPATIAL_DITHER_MODE */
+ uint32_t fmt_spatial_dither_depth; /* FMT_CONTROL->FMT_SPATIAL_DITHER_DEPTH from fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH */
+ uint32_t fmt_temporal_dither_enable; /* FMT_CONTROL->FMT_TEMPORAL_DITHER_EN from fmt_bit_depth->flags.TEMPORAL_DITHER_ENABLED */
+ uint32_t fmt_clamp_data_enable; /* FMT_CONTROL->FMT_CLAMP_DATA_EN from clamping->clamping_range enable */
+ uint32_t fmt_clamp_color_format; /* FMT_CONTROL->FMT_CLAMP_COLOR_FORMAT from clamping->color_format */
+ uint32_t fmt_dynamic_exp_enable; /* FMT_CONTROL->FMT_DYNAMIC_EXP_EN from color_sp/color_dpth/signal */
+ uint32_t fmt_dynamic_exp_mode; /* FMT_CONTROL->FMT_DYNAMIC_EXP_MODE from color space mode mapping */
+ uint32_t fmt_bit_depth_control; /* Legacy field - kept for compatibility */
+
+ /* OPP Pipe Control - 1 field from OPP_PIPE_CONTROL register */
+ uint32_t opp_pipe_clock_enable; /* OPP_PIPE_CONTROL->OPP_PIPE_CLOCK_EN from enable parameter (bool) */
+
+ /* OPP CRC Control - 3 fields from OPP_PIPE_CRC_CONTROL register */
+ uint32_t opp_crc_enable; /* OPP_PIPE_CRC_CONTROL->CRC_EN from CRC enable control */
+ uint32_t opp_crc_select_source; /* OPP_PIPE_CRC_CONTROL->CRC_SELECT_SOURCE from CRC source selection */
+ uint32_t opp_crc_stereo_cont; /* OPP_PIPE_CRC_CONTROL->CRC_STEREO_CONT from stereo continuous CRC */
+
+ /* Output Buffer (OPPBUF) Control - 6 fields from OPPBUF_CONTROL register */
+ uint32_t oppbuf_active_width; /* OPPBUF_CONTROL->OPPBUF_ACTIVE_WIDTH from oppbuf_params->active_width */
+ uint32_t oppbuf_pixel_repetition; /* OPPBUF_CONTROL->OPPBUF_PIXEL_REPETITION from oppbuf_params->pixel_repetition */
+ uint32_t oppbuf_display_segmentation; /* OPPBUF_CONTROL->OPPBUF_DISPLAY_SEGMENTATION from oppbuf_params->mso_segmentation */
+ uint32_t oppbuf_overlap_pixel_num; /* OPPBUF_CONTROL->OPPBUF_OVERLAP_PIXEL_NUM from oppbuf_params->mso_overlap_pixel_num */
+ uint32_t oppbuf_3d_vact_space1_size; /* OPPBUF_CONTROL->OPPBUF_3D_VACT_SPACE1_SIZE from 3D timing space1_size */
+ uint32_t oppbuf_3d_vact_space2_size; /* OPPBUF_CONTROL->OPPBUF_3D_VACT_SPACE2_SIZE from 3D timing space2_size */
+
+ /* DSC Forward Config - 3 fields from DSCRM_DSC_FORWARD_CONFIG register */
+ uint32_t dscrm_dsc_forward_enable; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_FORWARD_EN from DSC forward enable control */
+ uint32_t dscrm_dsc_opp_pipe_source; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_OPP_PIPE_SOURCE from opp_pipe parameter */
+ uint32_t dscrm_dsc_forward_enable_status; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_FORWARD_EN_STATUS from DSC forward status (read-only) */
+ } opp[MAX_PIPES];
+
+ /* OPTC register programming variables for each pipe */
+ struct {
+ uint32_t otg_master_inst;
+
+ /* OTG_CONTROL register - 5 fields for OTG control */
+ uint32_t otg_master_enable; /* OTG_CONTROL->OTG_MASTER_EN from timing enable/disable control */
+ uint32_t otg_disable_point_cntl; /* OTG_CONTROL->OTG_DISABLE_POINT_CNTL from disable timing control */
+ uint32_t otg_start_point_cntl; /* OTG_CONTROL->OTG_START_POINT_CNTL from start timing control */
+ uint32_t otg_field_number_cntl; /* OTG_CONTROL->OTG_FIELD_NUMBER_CNTL from interlace field control */
+ uint32_t otg_out_mux; /* OTG_CONTROL->OTG_OUT_MUX from output mux selection */
+
+ /* OTG Horizontal Timing - 7 fields */
+ uint32_t otg_h_total; /* OTG_H_TOTAL->OTG_H_TOTAL from dc_crtc_timing->h_total */
+ uint32_t otg_h_blank_start; /* OTG_H_BLANK_START_END->OTG_H_BLANK_START from dc_crtc_timing->h_front_porch */
+ uint32_t otg_h_blank_end; /* OTG_H_BLANK_START_END->OTG_H_BLANK_END from dc_crtc_timing->h_addressable_video_pixel_width */
+ uint32_t otg_h_sync_start; /* OTG_H_SYNC_A->OTG_H_SYNC_A_START from dc_crtc_timing->h_sync_width */
+ uint32_t otg_h_sync_end; /* OTG_H_SYNC_A->OTG_H_SYNC_A_END from calculated sync end position */
+ uint32_t otg_h_sync_polarity; /* OTG_H_SYNC_A_CNTL->OTG_H_SYNC_A_POL from dc_crtc_timing->flags.HSYNC_POSITIVE_POLARITY */
+ uint32_t otg_h_timing_div_mode; /* OTG_H_TIMING_CNTL->OTG_H_TIMING_DIV_MODE from horizontal timing division mode */
+
+ /* OTG Vertical Timing - 7 fields */
+ uint32_t otg_v_total; /* OTG_V_TOTAL->OTG_V_TOTAL from dc_crtc_timing->v_total */
+ uint32_t otg_v_blank_start; /* OTG_V_BLANK_START_END->OTG_V_BLANK_START from dc_crtc_timing->v_front_porch */
+ uint32_t otg_v_blank_end; /* OTG_V_BLANK_START_END->OTG_V_BLANK_END from dc_crtc_timing->v_addressable_video_line_width */
+ uint32_t otg_v_sync_start; /* OTG_V_SYNC_A->OTG_V_SYNC_A_START from dc_crtc_timing->v_sync_width */
+ uint32_t otg_v_sync_end; /* OTG_V_SYNC_A->OTG_V_SYNC_A_END from calculated sync end position */
+ uint32_t otg_v_sync_polarity; /* OTG_V_SYNC_A_CNTL->OTG_V_SYNC_A_POL from dc_crtc_timing->flags.VSYNC_POSITIVE_POLARITY */
+ uint32_t otg_v_sync_mode; /* OTG_V_SYNC_A_CNTL->OTG_V_SYNC_MODE from sync mode selection */
+
+ /* OTG DRR (Dynamic Refresh Rate) Control - 8 fields */
+ uint32_t otg_v_total_max; /* OTG_V_TOTAL_MAX->OTG_V_TOTAL_MAX from drr_params->vertical_total_max */
+ uint32_t otg_v_total_min; /* OTG_V_TOTAL_MIN->OTG_V_TOTAL_MIN from drr_params->vertical_total_min */
+ uint32_t otg_v_total_mid; /* OTG_V_TOTAL_MID->OTG_V_TOTAL_MID from drr_params->vertical_total_mid */
+ uint32_t otg_v_total_max_sel; /* OTG_V_TOTAL_CONTROL->OTG_V_TOTAL_MAX_SEL from DRR max selection enable */
+ uint32_t otg_v_total_min_sel; /* OTG_V_TOTAL_CONTROL->OTG_V_TOTAL_MIN_SEL from DRR min selection enable */
+ uint32_t otg_vtotal_mid_replacing_max_en; /* OTG_V_TOTAL_CONTROL->OTG_VTOTAL_MID_REPLACING_MAX_EN from DRR mid-frame enable */
+ uint32_t otg_vtotal_mid_frame_num; /* OTG_V_TOTAL_CONTROL->OTG_VTOTAL_MID_FRAME_NUM from drr_params->vertical_total_mid_frame_num */
+ uint32_t otg_set_v_total_min_mask; /* OTG_V_TOTAL_CONTROL->OTG_SET_V_TOTAL_MIN_MASK from DRR trigger mask */
+ uint32_t otg_force_lock_on_event; /* OTG_V_TOTAL_CONTROL->OTG_FORCE_LOCK_ON_EVENT from DRR force lock control */
+
+ /* OPTC Data Source and ODM - 6 fields */
+ uint32_t optc_seg0_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG0_SRC_SEL from opp_id[0] ODM segment 0 source */
+ uint32_t optc_seg1_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG1_SRC_SEL from opp_id[1] ODM segment 1 source */
+ uint32_t optc_seg2_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG2_SRC_SEL from opp_id[2] ODM segment 2 source */
+ uint32_t optc_seg3_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG3_SRC_SEL from opp_id[3] ODM segment 3 source */
+ uint32_t optc_num_of_input_segment; /* OPTC_DATA_SOURCE_SELECT->OPTC_NUM_OF_INPUT_SEGMENT from opp_cnt-1 number of input segments */
+ uint32_t optc_mem_sel; /* OPTC_MEMORY_CONFIG->OPTC_MEM_SEL from memory_mask ODM memory selection */
+
+ /* OPTC Data Format and DSC - 4 fields */
+ uint32_t optc_data_format; /* OPTC_DATA_FORMAT_CONTROL->OPTC_DATA_FORMAT from data format selection */
+ uint32_t optc_dsc_mode; /* OPTC_DATA_FORMAT_CONTROL->OPTC_DSC_MODE from dsc_mode parameter */
+ uint32_t optc_dsc_bytes_per_pixel; /* OPTC_BYTES_PER_PIXEL->OPTC_DSC_BYTES_PER_PIXEL from dsc_bytes_per_pixel parameter */
+ uint32_t optc_segment_width; /* OPTC_WIDTH_CONTROL->OPTC_SEGMENT_WIDTH from segment_width parameter */
+ uint32_t optc_dsc_slice_width; /* OPTC_WIDTH_CONTROL->OPTC_DSC_SLICE_WIDTH from dsc_slice_width parameter */
+
+ /* OPTC Clock and Underflow Control - 4 fields */
+ uint32_t optc_input_pix_clk_en; /* OPTC_INPUT_CLOCK_CONTROL->OPTC_INPUT_PIX_CLK_EN from pixel clock enable */
+ uint32_t optc_underflow_occurred_status; /* OPTC_INPUT_GLOBAL_CONTROL->OPTC_UNDERFLOW_OCCURRED_STATUS from underflow status (read-only) */
+ uint32_t optc_underflow_clear; /* OPTC_INPUT_GLOBAL_CONTROL->OPTC_UNDERFLOW_CLEAR from underflow clear control */
+ uint32_t otg_clock_enable; /* OTG_CLOCK_CONTROL->OTG_CLOCK_EN from OTG clock enable */
+ uint32_t otg_clock_gate_dis; /* OTG_CLOCK_CONTROL->OTG_CLOCK_GATE_DIS from clock gate disable */
+
+ /* OTG Stereo and 3D Control - 6 fields */
+ uint32_t otg_stereo_enable; /* OTG_STEREO_CONTROL->OTG_STEREO_EN from stereo enable control */
+ uint32_t otg_stereo_sync_output_line_num; /* OTG_STEREO_CONTROL->OTG_STEREO_SYNC_OUTPUT_LINE_NUM from timing->stereo_3d_format line num */
+ uint32_t otg_stereo_sync_output_polarity; /* OTG_STEREO_CONTROL->OTG_STEREO_SYNC_OUTPUT_POLARITY from stereo polarity control */
+ uint32_t otg_3d_structure_en; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_EN from 3D structure enable */
+ uint32_t otg_3d_structure_v_update_mode; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_V_UPDATE_MODE from 3D vertical update mode */
+ uint32_t otg_3d_structure_stereo_sel_ovr; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_STEREO_SEL_OVR from 3D stereo selection override */
+ uint32_t otg_interlace_enable; /* OTG_INTERLACE_CONTROL->OTG_INTERLACE_ENABLE from dc_crtc_timing->flags.INTERLACE */
+
+ /* OTG GSL (Global Sync Lock) Control - 5 fields */
+ uint32_t otg_gsl0_en; /* OTG_GSL_CONTROL->OTG_GSL0_EN from GSL group 0 enable */
+ uint32_t otg_gsl1_en; /* OTG_GSL_CONTROL->OTG_GSL1_EN from GSL group 1 enable */
+ uint32_t otg_gsl2_en; /* OTG_GSL_CONTROL->OTG_GSL2_EN from GSL group 2 enable */
+ uint32_t otg_gsl_master_en; /* OTG_GSL_CONTROL->OTG_GSL_MASTER_EN from GSL master enable */
+ uint32_t otg_gsl_master_mode; /* OTG_GSL_CONTROL->OTG_GSL_MASTER_MODE from gsl_params->gsl_master mode */
+
+ /* OTG DRR Advanced Control - 4 fields */
+ uint32_t otg_v_total_last_used_by_drr; /* OTG_DRR_CONTROL->OTG_V_TOTAL_LAST_USED_BY_DRR from last used DRR V_TOTAL (read-only) */
+ uint32_t otg_drr_trigger_window_start_x; /* OTG_DRR_TRIGGER_WINDOW->OTG_DRR_TRIGGER_WINDOW_START_X from window_start parameter */
+ uint32_t otg_drr_trigger_window_end_x; /* OTG_DRR_TRIGGER_WINDOW->OTG_DRR_TRIGGER_WINDOW_END_X from window_end parameter */
+ uint32_t otg_drr_v_total_change_limit; /* OTG_DRR_V_TOTAL_CHANGE->OTG_DRR_V_TOTAL_CHANGE_LIMIT from limit parameter */
+
+ /* OTG DSC Position Control - 2 fields */
+ uint32_t otg_dsc_start_position_x; /* OTG_DSC_START_POSITION->OTG_DSC_START_POSITION_X from DSC start X position */
+ uint32_t otg_dsc_start_position_line_num; /* OTG_DSC_START_POSITION->OTG_DSC_START_POSITION_LINE_NUM from DSC start line number */
+
+ /* OTG Double Buffer Control - 2 fields */
+ uint32_t otg_drr_timing_dbuf_update_mode; /* OTG_DOUBLE_BUFFER_CONTROL->OTG_DRR_TIMING_DBUF_UPDATE_MODE from DRR double buffer mode */
+ uint32_t otg_blank_data_double_buffer_en; /* OTG_DOUBLE_BUFFER_CONTROL->OTG_BLANK_DATA_DOUBLE_BUFFER_EN from blank data double buffer enable */
+
+ /* OTG Vertical Interrupts - 6 fields */
+ uint32_t otg_vertical_interrupt0_int_enable; /* OTG_VERTICAL_INTERRUPT0_CONTROL->OTG_VERTICAL_INTERRUPT0_INT_ENABLE from interrupt 0 enable */
+ uint32_t otg_vertical_interrupt0_line_start; /* OTG_VERTICAL_INTERRUPT0_POSITION->OTG_VERTICAL_INTERRUPT0_LINE_START from start_line parameter */
+ uint32_t otg_vertical_interrupt1_int_enable; /* OTG_VERTICAL_INTERRUPT1_CONTROL->OTG_VERTICAL_INTERRUPT1_INT_ENABLE from interrupt 1 enable */
+ uint32_t otg_vertical_interrupt1_line_start; /* OTG_VERTICAL_INTERRUPT1_POSITION->OTG_VERTICAL_INTERRUPT1_LINE_START from start_line parameter */
+ uint32_t otg_vertical_interrupt2_int_enable; /* OTG_VERTICAL_INTERRUPT2_CONTROL->OTG_VERTICAL_INTERRUPT2_INT_ENABLE from interrupt 2 enable */
+ uint32_t otg_vertical_interrupt2_line_start; /* OTG_VERTICAL_INTERRUPT2_POSITION->OTG_VERTICAL_INTERRUPT2_LINE_START from start_line parameter */
+
+ /* OTG Global Sync Parameters - 6 fields */
+ uint32_t otg_vready_offset; /* OTG_VREADY_PARAM->OTG_VREADY_OFFSET from vready_offset parameter */
+ uint32_t otg_vstartup_start; /* OTG_VSTARTUP_PARAM->OTG_VSTARTUP_START from vstartup_start parameter */
+ uint32_t otg_vupdate_offset; /* OTG_VUPDATE_PARAM->OTG_VUPDATE_OFFSET from vupdate_offset parameter */
+ uint32_t otg_vupdate_width; /* OTG_VUPDATE_PARAM->OTG_VUPDATE_WIDTH from vupdate_width parameter */
+ uint32_t master_update_lock_vupdate_keepout_start_offset; /* OTG_VUPDATE_KEEPOUT->MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET from pstate_keepout start */
+ uint32_t master_update_lock_vupdate_keepout_end_offset; /* OTG_VUPDATE_KEEPOUT->MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET from pstate_keepout end */
+
+ /* OTG Manual Trigger Control - 11 fields */
+ uint32_t otg_triga_source_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_SOURCE_SELECT from trigger A source selection */
+ uint32_t otg_triga_source_pipe_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_SOURCE_PIPE_SELECT from trigger A pipe selection */
+ uint32_t otg_triga_rising_edge_detect_cntl; /* OTG_TRIGA_CNTL->OTG_TRIGA_RISING_EDGE_DETECT_CNTL from trigger A rising edge detect */
+ uint32_t otg_triga_falling_edge_detect_cntl; /* OTG_TRIGA_CNTL->OTG_TRIGA_FALLING_EDGE_DETECT_CNTL from trigger A falling edge detect */
+ uint32_t otg_triga_polarity_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_POLARITY_SELECT from trigger A polarity selection */
+ uint32_t otg_triga_frequency_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_FREQUENCY_SELECT from trigger A frequency selection */
+ uint32_t otg_triga_delay; /* OTG_TRIGA_CNTL->OTG_TRIGA_DELAY from trigger A delay */
+ uint32_t otg_triga_clear; /* OTG_TRIGA_CNTL->OTG_TRIGA_CLEAR from trigger A clear */
+ uint32_t otg_triga_manual_trig; /* OTG_TRIGA_MANUAL_TRIG->OTG_TRIGA_MANUAL_TRIG from manual trigger A */
+ uint32_t otg_trigb_source_select; /* OTG_TRIGB_CNTL->OTG_TRIGB_SOURCE_SELECT from trigger B source selection */
+ uint32_t otg_trigb_polarity_select; /* OTG_TRIGB_CNTL->OTG_TRIGB_POLARITY_SELECT from trigger B polarity selection */
+ uint32_t otg_trigb_manual_trig; /* OTG_TRIGB_MANUAL_TRIG->OTG_TRIGB_MANUAL_TRIG from manual trigger B */
+
+ /* OTG Static Screen and Update Control - 6 fields */
+ uint32_t otg_static_screen_event_mask; /* OTG_STATIC_SCREEN_CONTROL->OTG_STATIC_SCREEN_EVENT_MASK from event_triggers parameter */
+ uint32_t otg_static_screen_frame_count; /* OTG_STATIC_SCREEN_CONTROL->OTG_STATIC_SCREEN_FRAME_COUNT from num_frames parameter */
+ uint32_t master_update_lock; /* OTG_MASTER_UPDATE_LOCK->MASTER_UPDATE_LOCK from update lock control */
+ uint32_t master_update_mode; /* OTG_MASTER_UPDATE_MODE->MASTER_UPDATE_MODE from update mode selection */
+ uint32_t otg_force_count_now_mode; /* OTG_FORCE_COUNT_NOW_CNTL->OTG_FORCE_COUNT_NOW_MODE from force count mode */
+ uint32_t otg_force_count_now_clear; /* OTG_FORCE_COUNT_NOW_CNTL->OTG_FORCE_COUNT_NOW_CLEAR from force count clear */
+
+ /* VTG Control - 3 fields */
+ uint32_t vtg0_enable; /* CONTROL->VTG0_ENABLE from VTG enable control */
+ uint32_t vtg0_fp2; /* CONTROL->VTG0_FP2 from VTG front porch 2 */
+ uint32_t vtg0_vcount_init; /* CONTROL->VTG0_VCOUNT_INIT from VTG vertical count init */
+
+ /* OTG Status (Read-Only) - 12 fields */
+ uint32_t otg_v_blank; /* OTG_STATUS->OTG_V_BLANK from vertical blank status (read-only) */
+ uint32_t otg_v_active_disp; /* OTG_STATUS->OTG_V_ACTIVE_DISP from vertical active display (read-only) */
+ uint32_t otg_frame_count; /* OTG_STATUS_FRAME_COUNT->OTG_FRAME_COUNT from frame count (read-only) */
+ uint32_t otg_horz_count; /* OTG_STATUS_POSITION->OTG_HORZ_COUNT from horizontal position (read-only) */
+ uint32_t otg_vert_count; /* OTG_STATUS_POSITION->OTG_VERT_COUNT from vertical position (read-only) */
+ uint32_t otg_horz_count_hv; /* OTG_STATUS_HV_COUNT->OTG_HORZ_COUNT from horizontal count (read-only) */
+ uint32_t otg_vert_count_nom; /* OTG_STATUS_HV_COUNT->OTG_VERT_COUNT_NOM from vertical count nominal (read-only) */
+ uint32_t otg_flip_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_FLIP_PENDING from flip pending status (read-only) */
+ uint32_t otg_dc_reg_update_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_DC_REG_UPDATE_PENDING from DC register update pending (read-only) */
+ uint32_t otg_cursor_update_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_CURSOR_UPDATE_PENDING from cursor update pending (read-only) */
+ uint32_t otg_vupdate_keepout_status; /* OTG_PIPE_UPDATE_STATUS->OTG_VUPDATE_KEEPOUT_STATUS from VUPDATE keepout status (read-only) */
+ } optc[MAX_PIPES];
+
+ /* Metadata */
+ uint32_t active_pipe_count;
+ uint32_t active_stream_count;
+ bool state_valid;
+};
+
+/**
+ * dc_capture_register_software_state() - Capture software state for register programming
+ * @dc: DC context containing current display configuration
+ * @state: Pointer to dc_register_software_state structure to populate
+ *
+ * Extracts all software state variables that are used to program hardware register
+ * fields across the display driver pipeline. This provides a complete snapshot
+ * of the software configuration that drives hardware register programming.
+ *
+ * The function traverses the DC context and extracts values from:
+ * - Stream configurations (timing, format, DSC settings)
+ * - Plane states (surface format, rotation, scaling, cursor)
+ * - Pipe contexts (resource allocation, blending, viewport)
+ * - Clock manager (display clocks, DPP clocks, pixel clocks)
+ * - Resource context (DET buffer allocation, ODM configuration)
+ *
+ * This is essential for underflow debugging as it captures the exact software
+ * state that determines how registers are programmed, allowing analysis of
+ * whether underflow is caused by incorrect register programming or timing issues.
+ *
+ * Return: true if state was successfully captured, false on error
+ */
+bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state);
+
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 5fa5e2b63fb7..40d7a7d83c40 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -91,9 +91,17 @@ struct dc_vbios_funcs {
struct device_id id);
/* COMMANDS */
+ enum bp_result (*select_crtc_source)(
+ struct dc_bios *bios,
+ struct bp_crtc_source_select *bp_params);
enum bp_result (*encoder_control)(
struct dc_bios *bios,
struct bp_encoder_control *cntl);
+ enum bp_result (*dac_load_detection)(
+ struct dc_bios *bios,
+ enum engine_id engine_id,
+ enum dal_device_type device_type,
+ uint32_t enum_id);
enum bp_result (*transmitter_control)(
struct dc_bios *bios,
struct bp_transmitter_control *cntl);
@@ -165,6 +173,7 @@ struct dc_vbios_funcs {
};
struct bios_registers {
+ uint32_t BIOS_SCRATCH_0;
uint32_t BIOS_SCRATCH_3;
uint32_t BIOS_SCRATCH_6;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 53a088ebddef..7b09af1cb306 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -442,7 +442,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
int i = 0, k = 0;
int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
uint8_t visual_confirm_enabled;
- int pipe_idx = 0;
struct dc_stream_status *stream_status = NULL;
if (dc == NULL)
@@ -457,7 +456,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled;
if (should_manage_pstate) {
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
@@ -472,7 +471,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us;
break;
}
- pipe_idx++;
}
}
@@ -872,7 +870,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
bool enable)
{
uint8_t cmd_pipe_index = 0;
- uint32_t i, pipe_idx;
+ uint32_t i;
uint8_t subvp_count = 0;
union dmub_rb_cmd cmd;
struct pipe_ctx *subvp_pipes[2];
@@ -899,7 +897,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
if (enable) {
// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
@@ -922,7 +920,6 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
}
- pipe_idx++;
}
if (subvp_count == 2) {
update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes);
@@ -1174,6 +1171,100 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con
dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index);
}
+void dc_dmub_srv_cursor_offload_init(struct dc *dc)
+{
+ struct dmub_rb_cmd_cursor_offload_init *init;
+ struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
+ union dmub_rb_cmd cmd;
+
+ if (!dc->config.enable_cursor_offload)
+ return;
+
+ if (!dc_dmub_srv->dmub->meta_info.feature_bits.bits.cursor_offload_v1_support)
+ return;
+
+ if (!dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr || !dc_dmub_srv->dmub->cursor_offload_fb.cpu_addr)
+ return;
+
+ if (!dc_dmub_srv->dmub->cursor_offload_v1)
+ return;
+
+ if (!dc_dmub_srv->dmub->shared_state)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ init = &cmd.cursor_offload_init;
+ init->header.type = DMUB_CMD__CURSOR_OFFLOAD;
+ init->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_INIT;
+ init->header.payload_bytes = sizeof(init->init_data);
+ init->init_data.state_addr.quad_part = dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr;
+ init->init_data.state_size = dc_dmub_srv->dmub->cursor_offload_fb.size;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ dc_dmub_srv->cursor_offload_enabled = true;
+}
+
+void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream, bool enable)
+{
+ struct pipe_ctx const *pipe_ctx;
+ struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl;
+ union dmub_rb_cmd cmd;
+
+ if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
+ return;
+
+ if (!stream)
+ return;
+
+ pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
+ if (!pipe_ctx || !pipe_ctx->stream_res.tg || pipe_ctx->stream != stream)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cntl = &cmd.cursor_offload_stream_ctnl;
+ cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD;
+ cntl->header.sub_type =
+ enable ? DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE : DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE;
+ cntl->header.payload_bytes = sizeof(cntl->data);
+
+ cntl->data.otg_inst = pipe_ctx->stream_res.tg->inst;
+ cntl->data.line_time_in_ns = 1u + (uint32_t)(div64_u64(stream->timing.h_total * 1000000ull,
+ stream->timing.pix_clk_100hz / 10));
+
+ cntl->data.v_total_max = stream->adjust.v_total_max > stream->timing.v_total ?
+ stream->adjust.v_total_max :
+ stream->timing.v_total;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd,
+ enable ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl;
+ union dmub_rb_cmd cmd;
+
+ if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
+ return;
+
+ if (!pipe || !pipe->stream || !pipe->stream_res.tg)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cntl = &cmd.cursor_offload_stream_ctnl;
+ cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD;
+ cntl->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM;
+ cntl->header.payload_bytes = sizeof(cntl->data);
+ cntl->data.otg_inst = pipe->stream_res.tg->inst;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
{
struct dc_context *dc_ctx;
@@ -1993,6 +2084,9 @@ bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv)
struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
bool result;
+ if (!dc_dmub_srv->dmub->feature_caps.lsdma_support_in_dmu)
+ return false;
+
memset(&cmd, 0, sizeof(cmd));
cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
@@ -2231,6 +2325,11 @@ bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uin
return result;
}
+bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc)
+{
+ return dc->ctx->dmub_srv && dc->ctx->dmub_srv->cursor_offload_enabled;
+}
+
void dc_dmub_srv_release_hw(const struct dc *dc)
{
struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
@@ -2248,3 +2347,24 @@ void dc_dmub_srv_release_hw(const struct dc *dc)
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+
+void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dmub_srv *dmub;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return;
+
+ dmub = dc_dmub_srv->dmub;
+
+ if (dmub_srv_get_preos_info(dmub)) {
+ DC_LOG_DEBUG("%s: PreOS DMCUB Info", __func__);
+ DC_LOG_DEBUG("fw_version : 0x%08x", dmub->preos_info.fw_version);
+ DC_LOG_DEBUG("boot_options : 0x%08x", dmub->preos_info.boot_options);
+ DC_LOG_DEBUG("boot_status : 0x%08x", dmub->preos_info.boot_status);
+ DC_LOG_DEBUG("trace_buffer_phy_addr : 0x%016llx", dmub->preos_info.trace_buffer_phy_addr);
+ DC_LOG_DEBUG("trace_buffer_size_bytes : 0x%08x", dmub->preos_info.trace_buffer_size);
+ DC_LOG_DEBUG("fb_base : 0x%016llx", dmub->preos_info.fb_base);
+ DC_LOG_DEBUG("fb_offset : 0x%016llx", dmub->preos_info.fb_offset);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 7ef93444ef3c..72e0a41f39f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -56,6 +56,7 @@ struct dc_dmub_srv {
union dmub_shared_state_ips_driver_signals driver_signals;
bool idle_allowed;
bool needs_idle_wake;
+ bool cursor_offload_enabled;
};
bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv);
@@ -326,9 +327,51 @@ bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t
enum ips_residency_mode ips_mode);
/**
+ * dc_dmub_srv_cursor_offload_init() - Enables or disables cursor offloading for a stream.
+ *
+ * @dc: pointer to DC object
+ */
+void dc_dmub_srv_cursor_offload_init(struct dc *dc);
+
+/**
+ * dc_dmub_srv_control_cursor_offload() - Enables or disables cursor offloading for a stream.
+ *
+ * @dc: pointer to DC object
+ * @context: the DC context to reference for pipe allocations
+ * @stream: the stream to control
+ * @enable: true to enable cursor offload, false to disable
+ */
+void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream, bool enable);
+
+/**
+ * dc_dmub_srv_program_cursor_now() - Requests immediate cursor programming for a given pipe.
+ *
+ * @dc: pointer to DC object
+ * @pipe: top-most pipe for a stream.
+ */
+void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe);
+
+/**
+ * dc_dmub_srv_is_cursor_offload_enabled() - Checks if cursor offload is supported.
+ *
+ * @dc: pointer to DC object
+ *
+ * Return: true if cursor offload is supported, false otherwise
+ */
+bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc);
+
+/**
* dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required.
*
* @dc - pointer to DC object
*/
void dc_dmub_srv_release_hw(const struct dc *dc);
+
+/**
+ * dc_dmub_srv_log_preos_dmcub_info() - Logs preos dmcub fw info.
+ *
+ * @dc - pointer to DC object
+ */
+void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index db669ccb1d58..79e1696def63 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -1157,6 +1157,16 @@ struct dprx_states {
bool cable_id_written;
};
+union dpcd_panel_replay_capability_supported {
+ struct {
+ unsigned char PANEL_REPLAY_SUPPORT :1;
+ unsigned char SELECTIVE_UPDATE_SUPPORT :1;
+ unsigned char EARLY_TRANSPORT_SUPPORT :1;
+ unsigned char RESERVED :5;
+ } bits;
+ unsigned char raw;
+};
+
enum dpcd_downstream_port_max_bpc {
DOWN_STREAM_MAX_8BPC = 0,
DOWN_STREAM_MAX_10BPC,
@@ -1280,6 +1290,7 @@ struct dpcd_caps {
struct edp_psr_info psr_info;
struct replay_info pr_info;
+ union dpcd_panel_replay_capability_supported pr_caps_supported;
uint16_t edp_oled_emission_rate;
union dp_receive_port0_cap receive_port0_cap;
/* Indicates the number of SST links supported by MSO (Multi-Stream Output) */
@@ -1346,6 +1357,31 @@ union dpcd_replay_configuration {
unsigned char raw;
};
+union panel_replay_enable_and_configuration_1 {
+ struct {
+ unsigned char PANEL_REPLAY_ENABLE :1;
+ unsigned char PANEL_REPLAY_CRC_ENABLE :1;
+ unsigned char IRQ_HPD_ASSDP_MISSING :1;
+ unsigned char IRQ_HPD_VSCSDP_UNCORRECTABLE_ERROR :1;
+ unsigned char IRQ_HPD_RFB_ERROR :1;
+ unsigned char IRQ_HPD_ACTIVE_FRAME_CRC_ERROR :1;
+ unsigned char PANEL_REPLAY_SELECTIVE_UPDATE_ENABLE :1;
+ unsigned char PANEL_REPLAY_EARLY_TRANSPORT_ENABLE :1;
+ } bits;
+ unsigned char raw;
+};
+
+union panel_replay_enable_and_configuration_2 {
+ struct {
+ unsigned char SINK_REFRESH_RATE_UNLOCK_GRANTED :1;
+ unsigned char RESERVED :1;
+ unsigned char SU_Y_GRANULARITY_EXT_VALUE_ENABLED :1;
+ unsigned char SU_Y_GRANULARITY_EXT_VALUE :4;
+ unsigned char SU_REGION_SCAN_LINE_CAPTURE_INDICATION :1;
+ } bits;
+ unsigned char raw;
+};
+
union dpcd_alpm_configuration {
struct {
unsigned char ENABLE : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 55704d4457ef..37d1a79e8241 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -147,6 +147,8 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->prefer_easf = false;
else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 2)
spl_in->disable_easf = true;
+ else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 3)
+ spl_in->override_easf = true;
/* Translate adaptive sharpening preference */
unsigned int sharpness_setting = pipe_ctx->stream->ctx->dc->debug.force_sharpness;
unsigned int force_sharpness_level = pipe_ctx->stream->ctx->dc->debug.force_sharpness_level;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 76cf9fdedab0..321cfe92d799 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -473,12 +473,11 @@ void dc_enable_stereo(
/* Triggers multi-stream synchronization. */
void dc_trigger_sync(struct dc *dc, struct dc_state *context);
-enum surface_update_type dc_check_update_surfaces_for_stream(
- struct dc *dc,
+struct surface_update_descriptor dc_check_update_surfaces_for_stream(
+ const struct dc_check_config *check_config,
struct dc_surface_update *updates,
int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status);
+ struct dc_stream_update *stream_update);
/**
* Create a new default stream for the requested sink
@@ -492,8 +491,8 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink);
void dc_stream_retain(struct dc_stream_state *dc_stream);
void dc_stream_release(struct dc_stream_state *dc_stream);
-struct dc_stream_status *dc_stream_get_status(
- struct dc_stream_state *dc_stream);
+struct dc_stream_status *dc_stream_get_status(struct dc_stream_state *dc_stream);
+const struct dc_stream_status *dc_stream_get_status_const(const struct dc_stream_state *dc_stream);
/*******************************************************************************
* Cursor interfaces - To manages the cursor within a stream
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index b5aa03a3e39c..f46039f64203 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -185,6 +185,10 @@ struct dc_panel_patch {
unsigned int wait_after_dpcd_poweroff_ms;
};
+/**
+ * struct dc_edid_caps - Capabilities read from EDID.
+ * @analog: Whether the monitor is analog. Used by DVI-I handling.
+ */
struct dc_edid_caps {
/* sink identification */
uint16_t manufacturer_id;
@@ -212,6 +216,8 @@ struct dc_edid_caps {
bool edid_hdmi;
bool hdr_supported;
bool rr_capable;
+ bool scdc_present;
+ bool analog;
struct dc_panel_patch panel_patch;
};
@@ -347,7 +353,8 @@ enum dc_connection_type {
dc_connection_none,
dc_connection_single,
dc_connection_mst_branch,
- dc_connection_sst_branch
+ dc_connection_sst_branch,
+ dc_connection_dac_load
};
struct dc_csc_adjustments {
@@ -934,6 +941,12 @@ enum dc_psr_version {
DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
};
+enum dc_replay_version {
+ DC_FREESYNC_REPLAY = 0,
+ DC_VESA_PANEL_REPLAY = 1,
+ DC_REPLAY_VERSION_UNSUPPORTED = 0XFF,
+};
+
/* Possible values of display_endpoint_id.endpoint */
enum display_endpoint_type {
DISPLAY_ENDPOINT_PHY = 0, /* Physical connector. */
@@ -1086,6 +1099,7 @@ enum replay_FW_Message_type {
Replay_Set_Residency_Frameupdate_Timer,
Replay_Set_Pseudo_VTotal,
Replay_Disabled_Adaptive_Sync_SDP,
+ Replay_Set_Version,
Replay_Set_General_Cmd,
};
@@ -1121,6 +1135,8 @@ union replay_low_refresh_rate_enable_options {
};
struct replay_config {
+ /* Replay version */
+ enum dc_replay_version replay_version;
/* Replay feature is supported */
bool replay_supported;
/* Replay caps support DPCD & EDID caps*/
@@ -1177,6 +1193,10 @@ struct replay_settings {
uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Defer Update Coasting vtotal table */
uint32_t defer_update_coasting_vtotal_table[PR_COASTING_TYPE_NUM];
+ /* Skip frame number table */
+ uint32_t frame_skip_number_table[PR_COASTING_TYPE_NUM];
+ /* Defer skip frame number table */
+ uint32_t defer_frame_skip_number_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */
uint32_t link_off_frame_count;
/* Replay pseudo vtotal for low refresh rate*/
@@ -1185,6 +1205,8 @@ struct replay_settings {
uint16_t last_pseudo_vtotal;
/* Replay desync error */
uint32_t replay_desync_error_fail_count;
+ /* The frame skip number dal send to DMUB */
+ uint16_t frame_skip_number;
};
/* To split out "global" and "per-panel" config settings.
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
index 5999b2da3a01..33d8bd91cb01 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
@@ -148,7 +148,7 @@ struct dccg *dccg2_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask)
{
- struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
+ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
struct dccg *base;
if (dccg_dcn == NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
index a9b88f5e0c04..8bdffd9ff31b 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
@@ -425,7 +425,69 @@ struct dccg_mask {
uint32_t SYMCLKD_CLOCK_ENABLE; \
uint32_t SYMCLKE_CLOCK_ENABLE; \
uint32_t DP_DTO_MODULO[MAX_PIPES]; \
- uint32_t DP_DTO_PHASE[MAX_PIPES]
+ uint32_t DP_DTO_PHASE[MAX_PIPES]; \
+ uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL; \
+ uint32_t DCCG_AUDIO_DTO0_MODULE; \
+ uint32_t DCCG_AUDIO_DTO0_PHASE; \
+ uint32_t DCCG_AUDIO_DTO1_MODULE; \
+ uint32_t DCCG_AUDIO_DTO1_PHASE; \
+ uint32_t DCCG_CAC_STATUS; \
+ uint32_t DCCG_CAC_STATUS2; \
+ uint32_t DCCG_DISP_CNTL_REG; \
+ uint32_t DCCG_DS_CNTL; \
+ uint32_t DCCG_DS_DTO_INCR; \
+ uint32_t DCCG_DS_DTO_MODULO; \
+ uint32_t DCCG_DS_HW_CAL_INTERVAL; \
+ uint32_t DCCG_GTC_CNTL; \
+ uint32_t DCCG_GTC_CURRENT; \
+ uint32_t DCCG_GTC_DTO_INCR; \
+ uint32_t DCCG_GTC_DTO_MODULO; \
+ uint32_t DCCG_PERFMON_CNTL; \
+ uint32_t DCCG_PERFMON_CNTL2; \
+ uint32_t DCCG_SOFT_RESET; \
+ uint32_t DCCG_TEST_CLK_SEL; \
+ uint32_t DCCG_VSYNC_CNT_CTRL; \
+ uint32_t DCCG_VSYNC_CNT_INT_CTRL; \
+ uint32_t DCCG_VSYNC_OTG0_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG1_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG2_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG3_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG4_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG5_LATCH_VALUE; \
+ uint32_t DISPCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t DP_DTO_DBUF_EN; \
+ uint32_t DPIACLK_540M_DTO_MODULO; \
+ uint32_t DPIACLK_540M_DTO_PHASE; \
+ uint32_t DPIACLK_810M_DTO_MODULO; \
+ uint32_t DPIACLK_810M_DTO_PHASE; \
+ uint32_t DPIACLK_DTO_CNTL; \
+ uint32_t DPIASYMCLK_CNTL; \
+ uint32_t DPPCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t DPREFCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t DPREFCLK_CNTL; \
+ uint32_t DTBCLK_DTO_DBUF_EN; \
+ uint32_t FORCE_SYMCLK_DISABLE; \
+ uint32_t HDMICHARCLK0_CLOCK_CNTL; \
+ uint32_t MICROSECOND_TIME_BASE_DIV; \
+ uint32_t MILLISECOND_TIME_BASE_DIV; \
+ uint32_t OTG0_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG0_PIXEL_RATE_CNTL; \
+ uint32_t OTG1_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG1_PIXEL_RATE_CNTL; \
+ uint32_t OTG2_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG2_PIXEL_RATE_CNTL; \
+ uint32_t OTG3_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG3_PIXEL_RATE_CNTL; \
+ uint32_t PHYPLLA_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLB_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLC_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLD_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLE_PIXCLK_RESYNC_CNTL; \
+ uint32_t REFCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t SOCCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t SYMCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t SYMCLK_PSP_CNTL
+
struct dccg_registers {
DCCG_REG_VARIABLE_LIST;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
index 8664f0c4c9b7..97df04b7e39d 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
@@ -709,6 +709,128 @@ void dccg31_otg_drop_pixel(struct dccg *dccg,
OTG_DROP_PIXEL[otg_inst], 1);
}
+void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ dccg_reg_state->dc_mem_global_pwr_req_cntl = REG_READ(DC_MEM_GLOBAL_PWR_REQ_CNTL);
+ dccg_reg_state->dccg_audio_dtbclk_dto_modulo = REG_READ(DCCG_AUDIO_DTBCLK_DTO_MODULO);
+ dccg_reg_state->dccg_audio_dtbclk_dto_phase = REG_READ(DCCG_AUDIO_DTBCLK_DTO_PHASE);
+ dccg_reg_state->dccg_audio_dto_source = REG_READ(DCCG_AUDIO_DTO_SOURCE);
+ dccg_reg_state->dccg_audio_dto0_module = REG_READ(DCCG_AUDIO_DTO0_MODULE);
+ dccg_reg_state->dccg_audio_dto0_phase = REG_READ(DCCG_AUDIO_DTO0_PHASE);
+ dccg_reg_state->dccg_audio_dto1_module = REG_READ(DCCG_AUDIO_DTO1_MODULE);
+ dccg_reg_state->dccg_audio_dto1_phase = REG_READ(DCCG_AUDIO_DTO1_PHASE);
+ dccg_reg_state->dccg_cac_status = REG_READ(DCCG_CAC_STATUS);
+ dccg_reg_state->dccg_cac_status2 = REG_READ(DCCG_CAC_STATUS2);
+ dccg_reg_state->dccg_disp_cntl_reg = REG_READ(DCCG_DISP_CNTL_REG);
+ dccg_reg_state->dccg_ds_cntl = REG_READ(DCCG_DS_CNTL);
+ dccg_reg_state->dccg_ds_dto_incr = REG_READ(DCCG_DS_DTO_INCR);
+ dccg_reg_state->dccg_ds_dto_modulo = REG_READ(DCCG_DS_DTO_MODULO);
+ dccg_reg_state->dccg_ds_hw_cal_interval = REG_READ(DCCG_DS_HW_CAL_INTERVAL);
+ dccg_reg_state->dccg_gate_disable_cntl = REG_READ(DCCG_GATE_DISABLE_CNTL);
+ dccg_reg_state->dccg_gate_disable_cntl2 = REG_READ(DCCG_GATE_DISABLE_CNTL2);
+ dccg_reg_state->dccg_gate_disable_cntl3 = REG_READ(DCCG_GATE_DISABLE_CNTL3);
+ dccg_reg_state->dccg_gate_disable_cntl4 = REG_READ(DCCG_GATE_DISABLE_CNTL4);
+ dccg_reg_state->dccg_gate_disable_cntl5 = REG_READ(DCCG_GATE_DISABLE_CNTL5);
+ dccg_reg_state->dccg_gate_disable_cntl6 = REG_READ(DCCG_GATE_DISABLE_CNTL6);
+ dccg_reg_state->dccg_global_fgcg_rep_cntl = REG_READ(DCCG_GLOBAL_FGCG_REP_CNTL);
+ dccg_reg_state->dccg_gtc_cntl = REG_READ(DCCG_GTC_CNTL);
+ dccg_reg_state->dccg_gtc_current = REG_READ(DCCG_GTC_CURRENT);
+ dccg_reg_state->dccg_gtc_dto_incr = REG_READ(DCCG_GTC_DTO_INCR);
+ dccg_reg_state->dccg_gtc_dto_modulo = REG_READ(DCCG_GTC_DTO_MODULO);
+ dccg_reg_state->dccg_perfmon_cntl = REG_READ(DCCG_PERFMON_CNTL);
+ dccg_reg_state->dccg_perfmon_cntl2 = REG_READ(DCCG_PERFMON_CNTL2);
+ dccg_reg_state->dccg_soft_reset = REG_READ(DCCG_SOFT_RESET);
+ dccg_reg_state->dccg_test_clk_sel = REG_READ(DCCG_TEST_CLK_SEL);
+ dccg_reg_state->dccg_vsync_cnt_ctrl = REG_READ(DCCG_VSYNC_CNT_CTRL);
+ dccg_reg_state->dccg_vsync_cnt_int_ctrl = REG_READ(DCCG_VSYNC_CNT_INT_CTRL);
+ dccg_reg_state->dccg_vsync_otg0_latch_value = REG_READ(DCCG_VSYNC_OTG0_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg1_latch_value = REG_READ(DCCG_VSYNC_OTG1_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg2_latch_value = REG_READ(DCCG_VSYNC_OTG2_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg3_latch_value = REG_READ(DCCG_VSYNC_OTG3_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg4_latch_value = REG_READ(DCCG_VSYNC_OTG4_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg5_latch_value = REG_READ(DCCG_VSYNC_OTG5_LATCH_VALUE);
+ dccg_reg_state->dispclk_cgtt_blk_ctrl_reg = REG_READ(DISPCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->dispclk_freq_change_cntl = REG_READ(DISPCLK_FREQ_CHANGE_CNTL);
+ dccg_reg_state->dp_dto_dbuf_en = REG_READ(DP_DTO_DBUF_EN);
+ dccg_reg_state->dp_dto0_modulo = REG_READ(DP_DTO_MODULO[0]);
+ dccg_reg_state->dp_dto0_phase = REG_READ(DP_DTO_PHASE[0]);
+ dccg_reg_state->dp_dto1_modulo = REG_READ(DP_DTO_MODULO[1]);
+ dccg_reg_state->dp_dto1_phase = REG_READ(DP_DTO_PHASE[1]);
+ dccg_reg_state->dp_dto2_modulo = REG_READ(DP_DTO_MODULO[2]);
+ dccg_reg_state->dp_dto2_phase = REG_READ(DP_DTO_PHASE[2]);
+ dccg_reg_state->dp_dto3_modulo = REG_READ(DP_DTO_MODULO[3]);
+ dccg_reg_state->dp_dto3_phase = REG_READ(DP_DTO_PHASE[3]);
+ dccg_reg_state->dpiaclk_540m_dto_modulo = REG_READ(DPIACLK_540M_DTO_MODULO);
+ dccg_reg_state->dpiaclk_540m_dto_phase = REG_READ(DPIACLK_540M_DTO_PHASE);
+ dccg_reg_state->dpiaclk_810m_dto_modulo = REG_READ(DPIACLK_810M_DTO_MODULO);
+ dccg_reg_state->dpiaclk_810m_dto_phase = REG_READ(DPIACLK_810M_DTO_PHASE);
+ dccg_reg_state->dpiaclk_dto_cntl = REG_READ(DPIACLK_DTO_CNTL);
+ dccg_reg_state->dpiasymclk_cntl = REG_READ(DPIASYMCLK_CNTL);
+ dccg_reg_state->dppclk_cgtt_blk_ctrl_reg = REG_READ(DPPCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->dppclk_ctrl = REG_READ(DPPCLK_CTRL);
+ dccg_reg_state->dppclk_dto_ctrl = REG_READ(DPPCLK_DTO_CTRL);
+ dccg_reg_state->dppclk0_dto_param = REG_READ(DPPCLK_DTO_PARAM[0]);
+ dccg_reg_state->dppclk1_dto_param = REG_READ(DPPCLK_DTO_PARAM[1]);
+ dccg_reg_state->dppclk2_dto_param = REG_READ(DPPCLK_DTO_PARAM[2]);
+ dccg_reg_state->dppclk3_dto_param = REG_READ(DPPCLK_DTO_PARAM[3]);
+ dccg_reg_state->dprefclk_cgtt_blk_ctrl_reg = REG_READ(DPREFCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->dprefclk_cntl = REG_READ(DPREFCLK_CNTL);
+ dccg_reg_state->dpstreamclk_cntl = REG_READ(DPSTREAMCLK_CNTL);
+ dccg_reg_state->dscclk_dto_ctrl = REG_READ(DSCCLK_DTO_CTRL);
+ dccg_reg_state->dscclk0_dto_param = REG_READ(DSCCLK0_DTO_PARAM);
+ dccg_reg_state->dscclk1_dto_param = REG_READ(DSCCLK1_DTO_PARAM);
+ dccg_reg_state->dscclk2_dto_param = REG_READ(DSCCLK2_DTO_PARAM);
+ dccg_reg_state->dscclk3_dto_param = REG_READ(DSCCLK3_DTO_PARAM);
+ dccg_reg_state->dtbclk_dto_dbuf_en = REG_READ(DTBCLK_DTO_DBUF_EN);
+ dccg_reg_state->dtbclk_dto0_modulo = REG_READ(DTBCLK_DTO_MODULO[0]);
+ dccg_reg_state->dtbclk_dto0_phase = REG_READ(DTBCLK_DTO_PHASE[0]);
+ dccg_reg_state->dtbclk_dto1_modulo = REG_READ(DTBCLK_DTO_MODULO[1]);
+ dccg_reg_state->dtbclk_dto1_phase = REG_READ(DTBCLK_DTO_PHASE[1]);
+ dccg_reg_state->dtbclk_dto2_modulo = REG_READ(DTBCLK_DTO_MODULO[2]);
+ dccg_reg_state->dtbclk_dto2_phase = REG_READ(DTBCLK_DTO_PHASE[2]);
+ dccg_reg_state->dtbclk_dto3_modulo = REG_READ(DTBCLK_DTO_MODULO[3]);
+ dccg_reg_state->dtbclk_dto3_phase = REG_READ(DTBCLK_DTO_PHASE[3]);
+ dccg_reg_state->dtbclk_p_cntl = REG_READ(DTBCLK_P_CNTL);
+ dccg_reg_state->force_symclk_disable = REG_READ(FORCE_SYMCLK_DISABLE);
+ dccg_reg_state->hdmicharclk0_clock_cntl = REG_READ(HDMICHARCLK0_CLOCK_CNTL);
+ dccg_reg_state->hdmistreamclk_cntl = REG_READ(HDMISTREAMCLK_CNTL);
+ dccg_reg_state->hdmistreamclk0_dto_param = REG_READ(HDMISTREAMCLK0_DTO_PARAM);
+ dccg_reg_state->microsecond_time_base_div = REG_READ(MICROSECOND_TIME_BASE_DIV);
+ dccg_reg_state->millisecond_time_base_div = REG_READ(MILLISECOND_TIME_BASE_DIV);
+ dccg_reg_state->otg_pixel_rate_div = REG_READ(OTG_PIXEL_RATE_DIV);
+ dccg_reg_state->otg0_phypll_pixel_rate_cntl = REG_READ(OTG0_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg0_pixel_rate_cntl = REG_READ(OTG0_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg1_phypll_pixel_rate_cntl = REG_READ(OTG1_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg1_pixel_rate_cntl = REG_READ(OTG1_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg2_phypll_pixel_rate_cntl = REG_READ(OTG2_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg2_pixel_rate_cntl = REG_READ(OTG2_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg3_phypll_pixel_rate_cntl = REG_READ(OTG3_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg3_pixel_rate_cntl = REG_READ(OTG3_PIXEL_RATE_CNTL);
+ dccg_reg_state->phyasymclk_clock_cntl = REG_READ(PHYASYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phybsymclk_clock_cntl = REG_READ(PHYBSYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phycsymclk_clock_cntl = REG_READ(PHYCSYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phydsymclk_clock_cntl = REG_READ(PHYDSYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phyesymclk_clock_cntl = REG_READ(PHYESYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phyplla_pixclk_resync_cntl = REG_READ(PHYPLLA_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phypllb_pixclk_resync_cntl = REG_READ(PHYPLLB_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phypllc_pixclk_resync_cntl = REG_READ(PHYPLLC_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phyplld_pixclk_resync_cntl = REG_READ(PHYPLLD_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phyplle_pixclk_resync_cntl = REG_READ(PHYPLLE_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->refclk_cgtt_blk_ctrl_reg = REG_READ(REFCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->socclk_cgtt_blk_ctrl_reg = REG_READ(SOCCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->symclk_cgtt_blk_ctrl_reg = REG_READ(SYMCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->symclk_psp_cntl = REG_READ(SYMCLK_PSP_CNTL);
+ dccg_reg_state->symclk32_le_cntl = REG_READ(SYMCLK32_LE_CNTL);
+ dccg_reg_state->symclk32_se_cntl = REG_READ(SYMCLK32_SE_CNTL);
+ dccg_reg_state->symclka_clock_enable = REG_READ(SYMCLKA_CLOCK_ENABLE);
+ dccg_reg_state->symclkb_clock_enable = REG_READ(SYMCLKB_CLOCK_ENABLE);
+ dccg_reg_state->symclkc_clock_enable = REG_READ(SYMCLKC_CLOCK_ENABLE);
+ dccg_reg_state->symclkd_clock_enable = REG_READ(SYMCLKD_CLOCK_ENABLE);
+ dccg_reg_state->symclke_clock_enable = REG_READ(SYMCLKE_CLOCK_ENABLE);
+}
+
static const struct dccg_funcs dccg31_funcs = {
.update_dpp_dto = dccg31_update_dpp_dto,
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
@@ -727,6 +849,7 @@ static const struct dccg_funcs dccg31_funcs = {
.set_dispclk_change_mode = dccg31_set_dispclk_change_mode,
.disable_dsc = dccg31_disable_dscclk,
.enable_dsc = dccg31_enable_dscclk,
+ .dccg_read_reg_state = dccg31_read_reg_state,
};
struct dccg *dccg31_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
index cd261051dc2c..bf659920d4cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
@@ -236,4 +236,6 @@ void dccg31_disable_dscclk(struct dccg *dccg, int inst);
void dccg31_enable_dscclk(struct dccg *dccg, int inst);
+void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state);
+
#endif //__DCN31_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
index 8f6edd8e9beb..ef3db6beba25 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
@@ -377,7 +377,8 @@ static const struct dccg_funcs dccg314_funcs = {
.get_pixel_rate_div = dccg314_get_pixel_rate_div,
.trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync,
.set_valid_pixel_rate = dccg314_set_valid_pixel_rate,
- .set_dtbclk_p_src = dccg314_set_dtbclk_p_src
+ .set_dtbclk_p_src = dccg314_set_dtbclk_p_src,
+ .dccg_read_reg_state = dccg31_read_reg_state
};
struct dccg *dccg314_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
index 60ea1d248deb..a609635f35db 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
@@ -74,8 +74,7 @@
SR(DCCG_GATE_DISABLE_CNTL3),\
SR(HDMISTREAMCLK0_DTO_PARAM),\
SR(OTG_PIXEL_RATE_DIV),\
- SR(DTBCLK_P_CNTL),\
- SR(DCCG_AUDIO_DTO_SOURCE)
+ SR(DTBCLK_P_CNTL)
#define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index de6d62401362..bd2f528137b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -1114,6 +1114,16 @@ static void dccg35_trigger_dio_fifo_resync(struct dccg *dccg)
if (dispclk_rdivider_value != 0)
REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
}
+static void dccg35_wait_for_dentist_change_done(
+ struct dccg *dccg)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL);
+
+ REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
+}
static void dcn35_set_dppclk_enable(struct dccg *dccg,
uint32_t dpp_inst, uint32_t enable)
@@ -1174,9 +1184,9 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst,
dcn35_set_dppclk_enable(dccg, dpp_inst, true);
} else {
dcn35_set_dppclk_enable(dccg, dpp_inst, false);
- /*we have this in hwss: disable_plane*/
- //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
}
+ udelay(10);
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
@@ -1300,6 +1310,8 @@ static void dccg35_set_pixel_rate_div(
BREAK_TO_DEBUGGER();
return;
}
+ if (otg_inst < 4)
+ dccg35_wait_for_dentist_change_done(dccg);
}
static void dccg35_set_dtbclk_p_src(
@@ -1411,7 +1423,7 @@ static void dccg35_set_dtbclk_dto(
__func__, params->otg_inst, params->pixclk_khz,
params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo);
- } else {
+ } else if (!params->ref_dtbclk_khz && !req_dtbclk_khz) {
switch (params->otg_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0);
@@ -1664,7 +1676,7 @@ static void dccg35_dpp_root_clock_control(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (dccg->dpp_clock_gated[dpp_inst] == clock_on)
+ if (dccg->dpp_clock_gated[dpp_inst] != clock_on)
return;
if (clock_on) {
@@ -1682,9 +1694,12 @@ static void dccg35_dpp_root_clock_control(
DPPCLK0_DTO_PHASE, 0,
DPPCLK0_DTO_MODULO, 1);
/*we have this in hwss: disable_plane*/
- //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
}
+ // wait for clock to fully ramp
+ udelay(10);
+
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
DC_LOG_DEBUG("%s: dpp_inst(%d) clock_on = %d\n", __func__, dpp_inst, clock_on);
}
@@ -2438,6 +2453,7 @@ static const struct dccg_funcs dccg35_funcs = {
.disable_symclk_se = dccg35_disable_symclk_se,
.set_dtbclk_p_src = dccg35_set_dtbclk_p_src,
.dccg_root_gate_disable_control = dccg35_root_gate_disable_control,
+ .dccg_read_reg_state = dccg31_read_reg_state,
};
struct dccg *dccg35_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
index 51f98c5c51c4..7b9c36456cd9 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
@@ -41,8 +41,9 @@
SR(SYMCLKA_CLOCK_ENABLE),\
SR(SYMCLKB_CLOCK_ENABLE),\
SR(SYMCLKC_CLOCK_ENABLE),\
- SR(SYMCLKD_CLOCK_ENABLE),\
- SR(SYMCLKE_CLOCK_ENABLE)
+ SR(SYMCLKD_CLOCK_ENABLE), \
+ SR(SYMCLKE_CLOCK_ENABLE),\
+ SR(SYMCLK_PSP_CNTL)
#define DCCG_MASK_SH_LIST_DCN35(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
@@ -231,6 +232,14 @@
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_DELAY, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_SIZE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_MAX_ERRDET_CYCLES, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_RESET, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_STATE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_OVR_EN, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_CHG_FWD_CORR_DISABLE, mask_sh),\
struct dccg *dccg35_create(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index 0b8ed9b94d3c..663a18ee5162 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -886,6 +886,7 @@ static const struct dccg_funcs dccg401_funcs = {
.enable_symclk_se = dccg401_enable_symclk_se,
.disable_symclk_se = dccg401_disable_symclk_se,
.set_dtbclk_p_src = dccg401_set_dtbclk_p_src,
+ .dccg_read_reg_state = dccg31_read_reg_state
};
struct dccg *dccg401_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index a6006776333d..2dcf394edf22 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -283,7 +283,7 @@ struct abm *dce_abm_create(
const struct dce_abm_shift *abm_shift,
const struct dce_abm_mask *abm_mask)
{
- struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC);
+ struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
if (abm_dce == NULL) {
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index eeed840073fe..fcad61c618a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -1143,7 +1143,8 @@ void dce_aud_wall_dto_setup(
REG_UPDATE(DCCG_AUDIO_DTO1_PHASE,
DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase);
- REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ if (aud->masks->DCCG_AUDIO_DTO2_USE_512FBR_DTO)
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index a8e79104b684..5f8fba45d98d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -1126,7 +1126,7 @@ struct dmcu *dcn10_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -1147,7 +1147,7 @@ struct dmcu *dcn20_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -1168,7 +1168,7 @@ struct dmcu *dcn21_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 0c50fe266c8a..87dbb8d7ed27 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -302,6 +302,10 @@ static void setup_panel_mode(
if (ctx->dc->caps.psp_setup_panel_mode)
return;
+ /* The code below is only applicable to encoders with a digital transmitter. */
+ if (enc110->base.transmitter == TRANSMITTER_UNKNOWN)
+ return;
+
ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
value = REG_READ(DP_DPHY_INTERNAL_CTRL);
@@ -804,6 +808,33 @@ bool dce110_link_encoder_validate_dp_output(
return true;
}
+static bool dce110_link_encoder_validate_rgb_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing)
+{
+ /* When the VBIOS doesn't specify any limits, use 400 MHz.
+ * The value comes from amdgpu_atombios_get_clock_info.
+ */
+ uint32_t max_pixel_clock_khz = 400000;
+
+ if (enc110->base.ctx->dc_bios->fw_info_valid &&
+ enc110->base.ctx->dc_bios->fw_info.max_pixel_clock) {
+ max_pixel_clock_khz =
+ enc110->base.ctx->dc_bios->fw_info.max_pixel_clock;
+ }
+
+ if (crtc_timing->pix_clk_100hz > max_pixel_clock_khz * 10)
+ return false;
+
+ if (crtc_timing->display_color_depth != COLOR_DEPTH_888)
+ return false;
+
+ if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
+ return false;
+
+ return true;
+}
+
void dce110_link_encoder_construct(
struct dce110_link_encoder *enc110,
const struct encoder_init_data *init_data,
@@ -824,6 +855,7 @@ void dce110_link_encoder_construct(
enc110->base.connector = init_data->connector;
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ enc110->base.analog_engine = init_data->analog_engine;
enc110->base.features = *enc_features;
@@ -847,6 +879,11 @@ void dce110_link_encoder_construct(
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
+ if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII ||
+ enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
+ enc110->base.analog_engine != ENGINE_ID_UNKNOWN)
+ enc110->base.output_signals |= SIGNAL_TYPE_RGB;
+
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
@@ -885,6 +922,13 @@ void dce110_link_encoder_construct(
enc110->base.preferred_engine = ENGINE_ID_DIGG;
break;
default:
+ if (init_data->analog_engine != ENGINE_ID_UNKNOWN) {
+ /* The connector is analog-only, ie. VGA */
+ enc110->base.preferred_engine = init_data->analog_engine;
+ enc110->base.output_signals = SIGNAL_TYPE_RGB;
+ enc110->base.transmitter = TRANSMITTER_UNKNOWN;
+ break;
+ }
ASSERT_CRITICAL(false);
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
@@ -939,6 +983,10 @@ bool dce110_link_encoder_validate_output_with_stream(
is_valid = dce110_link_encoder_validate_dp_output(
enc110, &stream->timing);
break;
+ case SIGNAL_TYPE_RGB:
+ is_valid = dce110_link_encoder_validate_rgb_output(
+ enc110, &stream->timing);
+ break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_LVDS:
is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB;
@@ -969,6 +1017,10 @@ void dce110_link_encoder_hw_init(
cntl.coherent = false;
cntl.hpd_sel = enc110->base.hpd_source;
+ /* The code below is only applicable to encoders with a digital transmitter. */
+ if (enc110->base.transmitter == TRANSMITTER_UNKNOWN)
+ return;
+
if (enc110->base.connector.id == CONNECTOR_ID_EDP)
cntl.signal = SIGNAL_TYPE_EDP;
@@ -1034,6 +1086,8 @@ void dce110_link_encoder_setup(
/* DP MST */
REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5);
break;
+ case SIGNAL_TYPE_RGB:
+ break;
default:
ASSERT_CRITICAL(false);
/* invalid mode ! */
@@ -1282,6 +1336,24 @@ void dce110_link_encoder_disable_output(
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
+ switch (enc->analog_engine) {
+ case ENGINE_ID_DACA:
+ REG_UPDATE(DAC_ENABLE, DAC_ENABLE, 0);
+ break;
+ case ENGINE_ID_DACB:
+ /* DACB doesn't seem to be present on DCE6+,
+ * although there are references to it in the register file.
+ */
+ DC_LOG_ERROR("%s DACB is unsupported\n", __func__);
+ break;
+ default:
+ break;
+ }
+
+ /* The code below only applies to connectors that support digital signals. */
+ if (enc->transmitter == TRANSMITTER_UNKNOWN)
+ return;
+
if (!dce110_is_dig_enabled(enc)) {
/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
return;
@@ -1726,6 +1798,7 @@ void dce60_link_encoder_construct(
enc110->base.connector = init_data->connector;
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ enc110->base.analog_engine = init_data->analog_engine;
enc110->base.features = *enc_features;
@@ -1749,6 +1822,11 @@ void dce60_link_encoder_construct(
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
+ if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII ||
+ enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
+ enc110->base.analog_engine != ENGINE_ID_UNKNOWN)
+ enc110->base.output_signals |= SIGNAL_TYPE_RGB;
+
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
@@ -1787,6 +1865,13 @@ void dce60_link_encoder_construct(
enc110->base.preferred_engine = ENGINE_ID_DIGG;
break;
default:
+ if (init_data->analog_engine != ENGINE_ID_UNKNOWN) {
+ /* The connector is analog-only, ie. VGA */
+ enc110->base.preferred_engine = init_data->analog_engine;
+ enc110->base.output_signals = SIGNAL_TYPE_RGB;
+ enc110->base.transmitter = TRANSMITTER_UNKNOWN;
+ break;
+ }
ASSERT_CRITICAL(false);
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 261c70e01e33..c58b69bc319b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -101,18 +101,21 @@
SRI(DP_SEC_CNTL, DP, id), \
SRI(DP_VID_STREAM_CNTL, DP, id), \
SRI(DP_DPHY_FAST_TRAINING, DP, id), \
- SRI(DP_SEC_CNTL1, DP, id)
+ SRI(DP_SEC_CNTL1, DP, id), \
+ SR(DAC_ENABLE)
#endif
#define LE_DCE80_REG_LIST(id)\
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
- LE_COMMON_REG_LIST_BASE(id)
+ LE_COMMON_REG_LIST_BASE(id), \
+ SR(DAC_ENABLE)
#define LE_DCE100_REG_LIST(id)\
LE_COMMON_REG_LIST_BASE(id), \
SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
- SR(DCI_MEM_PWR_STATUS)
+ SR(DCI_MEM_PWR_STATUS), \
+ SR(DAC_ENABLE)
#define LE_DCE110_REG_LIST(id)\
LE_COMMON_REG_LIST_BASE(id), \
@@ -181,6 +184,9 @@ struct dce110_link_enc_registers {
uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
uint32_t DP_SEC_CNTL1;
+
+ /* DAC registers */
+ uint32_t DAC_ENABLE;
};
struct dce110_link_encoder {
@@ -215,10 +221,6 @@ bool dce110_link_encoder_validate_dvi_output(
enum signal_type signal,
const struct dc_crtc_timing *crtc_timing);
-bool dce110_link_encoder_validate_rgb_output(
- const struct dce110_link_encoder *enc110,
- const struct dc_crtc_timing *crtc_timing);
-
bool dce110_link_encoder_validate_dp_output(
const struct dce110_link_encoder *enc110,
const struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 1130d7619b26..574618d5d4a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -1567,3 +1567,17 @@ void dce110_stream_encoder_construct(
enc110->se_shift = se_shift;
enc110->se_mask = se_mask;
}
+
+static const struct stream_encoder_funcs dce110_an_str_enc_funcs = {};
+
+void dce110_analog_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id)
+{
+ enc110->base.funcs = &dce110_an_str_enc_funcs;
+ enc110->base.ctx = ctx;
+ enc110->base.id = eng_id;
+ enc110->base.bp = bp;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
index cc5020a8e1e1..068de1392121 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
@@ -708,6 +708,11 @@ void dce110_stream_encoder_construct(
const struct dce_stream_encoder_shift *se_shift,
const struct dce_stream_encoder_mask *se_mask);
+void dce110_analog_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id);
void dce110_se_audio_mute_control(
struct stream_encoder *enc, bool mute);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index d37ecfdde4f1..5bfa2b0d2afd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -61,10 +61,9 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
dc_dmub_srv_wait_for_inbox0_ack(dmub_srv);
}
-bool should_use_dmub_lock(struct dc_link *link)
+bool dmub_hw_lock_mgr_does_link_require_lock(const struct dc *dc, const struct dc_link *link)
{
- /* ASIC doesn't support DMUB */
- if (!link->ctx->dmub_srv)
+ if (!link)
return false;
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
@@ -73,16 +72,38 @@ bool should_use_dmub_lock(struct dc_link *link)
if (link->replay_settings.replay_feature_enabled)
return true;
- /* only use HW lock for PSR1 on single eDP */
if (link->psr_settings.psr_version == DC_PSR_VERSION_1) {
struct dc_link *edp_links[MAX_NUM_EDP];
int edp_num;
- dc_get_edp_links(link->dc, edp_links, &edp_num);
-
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num == 1)
return true;
}
+ return false;
+}
+bool dmub_hw_lock_mgr_does_context_require_lock(const struct dc *dc, const struct dc_state *context)
+{
+ if (!context)
+ return false;
+ for (int i = 0; i < context->stream_count; i++) {
+ const struct dc_link *link = context->streams[i]->link;
+
+ if (dmub_hw_lock_mgr_does_link_require_lock(dc, link))
+ return true;
+ }
return false;
}
+
+bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link)
+{
+ /* ASIC doesn't support DMUB */
+ if (!dc->ctx->dmub_srv)
+ return false;
+
+ if (dc->ctx->dce_version >= DCN_VERSION_4_01)
+ return false;
+
+ return dmub_hw_lock_mgr_does_link_require_lock(dc, link);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
index 5a72b168fb4a..4c80ca8484ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
@@ -37,6 +37,16 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
union dmub_inbox0_cmd_lock_hw hw_lock_cmd);
-bool should_use_dmub_lock(struct dc_link *link);
+/**
+ * should_use_dmub_inbox1_lock() - Checks if the DMCUB hardware lock via inbox1 should be used.
+ *
+ * @dc: pointer to DC object
+ * @link: optional pointer to the link object to check for enabled link features
+ *
+ * Return: true if the inbox1 lock should be used, false otherwise
+ */
+bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link);
+bool dmub_hw_lock_mgr_does_link_require_lock(const struct dc *dc, const struct dc_link *link);
+bool dmub_hw_lock_mgr_does_context_require_lock(const struct dc *dc, const struct dc_state *context);
#endif /*_DMUB_HW_LOCK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index f9542edff14b..cf1372aaff6c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -213,7 +213,8 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
*/
static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
uint32_t coasting_vtotal,
- uint8_t panel_inst)
+ uint8_t panel_inst,
+ uint16_t frame_skip_number)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
@@ -227,6 +228,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
+ pCmd->replay_set_coasting_vtotal_data.frame_skip_number = frame_skip_number;
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -283,7 +285,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
* Set REPLAY power optimization flags and coasting vtotal.
*/
static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub,
- unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal)
+ unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal, uint16_t frame_skip_number)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
@@ -301,6 +303,7 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm
pCmd->replay_set_power_opt_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
+ pCmd->replay_set_coasting_vtotal_data.frame_skip_number = frame_skip_number;
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -384,6 +387,19 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_disabled_adaptive_sync_sdp.data.force_disabled =
cmd_element->disabled_adaptive_sync_sdp_data.force_disabled;
break;
+ case Replay_Set_Version:
+ //Header
+ cmd.replay_set_version.header.sub_type =
+ DMUB_CMD__REPLAY_SET_VERSION;
+ cmd.replay_set_version.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_version) -
+ sizeof(struct dmub_cmd_header);
+ //Cmd Body
+ cmd.replay_set_version.replay_set_version_data.panel_inst =
+ cmd_element->version_data.panel_inst;
+ cmd.replay_set_version.replay_set_version_data.version =
+ cmd_element->version_data.version;
+ break;
case Replay_Set_General_Cmd:
//Header
cmd.replay_set_general_cmd.header.sub_type =
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
index e6346c0ffc0e..07c79739a980 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -27,11 +27,12 @@ struct dmub_replay_funcs {
void (*replay_send_cmd)(struct dmub_replay *dmub,
enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element);
void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint32_t coasting_vtotal,
- uint8_t panel_inst);
+ uint8_t panel_inst, uint16_t frame_skip_number);
void (*replay_residency)(struct dmub_replay *dmub,
uint8_t panel_inst, uint32_t *residency, const bool is_start, const enum pr_residency_mode mode);
void (*replay_set_power_opt_and_coasting_vtotal)(struct dmub_replay *dmub,
- unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal);
+ unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal,
+ uint16_t frame_skip_number);
};
struct dmub_replay *dmub_replay_create(struct dc_context *ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
deleted file mode 100644
index 4c21ce42054c..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ /dev/null
@@ -1,141 +0,0 @@
-# SPDX-License-Identifier: MIT */
-#
-# Copyright 2023 Advanced Micro Devices, Inc.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-# Authors: AMD
-#
-# Makefile for dml2.
-
-dml2_ccflags := $(CC_FLAGS_FPU)
-dml2_rcflags := $(CC_FLAGS_NO_FPU)
-
-ifneq ($(CONFIG_FRAME_WARN),0)
- ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
- ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
- frame_warn_limit := 4096
- else
- frame_warn_limit := 3072
- endif
- else
- frame_warn_limit := 2048
- endif
-
- ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
- frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
- endif
-endif
-
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_core
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_mcg/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_dpmm/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_pmo/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_standalone_libraries/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/inc
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/inc
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/
-
-CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_ccflags)
-
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_rcflags)
-
-DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \
- dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \
- dml_display_rq_dlg_calc.o
-
-AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
-
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
-
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
-
-DML21 := src/dml2_top/dml2_top_interfaces.o
-DML21 += src/dml2_top/dml2_top_soc15.o
-DML21 += src/dml2_core/dml2_core_dcn4.o
-DML21 += src/dml2_core/dml2_core_utils.o
-DML21 += src/dml2_core/dml2_core_factory.o
-DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
-DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
-DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
-DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
-DML21 += src/dml2_mcg/dml2_mcg_factory.o
-DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
-DML21 += src/dml2_pmo/dml2_pmo_factory.o
-DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
-DML21 += src/dml2_standalone_libraries/lib_float_math.o
-DML21 += dml21_translation_helper.o
-DML21 += dml21_wrapper.o
-DML21 += dml21_utils.o
-
-AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2/dml21/,$(DML21))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DML21)
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
new file mode 100644
index 000000000000..97e068b6bf6b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: MIT */
+#
+# Copyright 2023 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Authors: AMD
+#
+# Makefile for dml2.
+
+dml2_ccflags := $(CC_FLAGS_FPU)
+dml2_rcflags := $(CC_FLAGS_NO_FPU)
+
+ifneq ($(CONFIG_FRAME_WARN),0)
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+ ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
+ frame_warn_limit := 4096
+ else
+ frame_warn_limit := 3072
+ endif
+ else
+ frame_warn_limit := 2056
+ endif
+
+ ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
+ frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
+ endif
+endif
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_core
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_mcg/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_dpmm/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_pmo/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/
+
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_rcflags)
+
+DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \
+ dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \
+ dml_display_rq_dlg_calc.o
+
+AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2_0/,$(DML2))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
+
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_rcflags)
+
+DML21 := src/dml2_top/dml2_top_interfaces.o
+DML21 += src/dml2_top/dml2_top_soc15.o
+DML21 += src/dml2_core/dml2_core_dcn4.o
+DML21 += src/dml2_core/dml2_core_utils.o
+DML21 += src/dml2_core/dml2_core_factory.o
+DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
+DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
+DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
+DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
+DML21 += src/dml2_mcg/dml2_mcg_factory.o
+DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
+DML21 += src/dml2_pmo/dml2_pmo_factory.o
+DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
+DML21 += src/dml2_standalone_libraries/lib_float_math.o
+DML21 += dml21_translation_helper.o
+DML21 += dml21_wrapper.o
+DML21 += dml21_utils.o
+
+AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2_0/dml21/,$(DML21))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DML21)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h
index e450445bc05d..b954c9648fbe 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h
@@ -53,17 +53,17 @@ typedef const void *const_pvoid;
typedef const char *const_pchar;
typedef struct rgba_struct {
- uint8 a;
- uint8 r;
- uint8 g;
- uint8 b;
+ uint8 a;
+ uint8 r;
+ uint8 g;
+ uint8 b;
} rgba_t;
typedef struct {
- uint8 blue;
- uint8 green;
- uint8 red;
- uint8 alpha;
+ uint8 blue;
+ uint8 green;
+ uint8 red;
+ uint8 alpha;
} gen_color_t;
typedef union {
@@ -87,7 +87,7 @@ typedef union {
} uintfloat64;
#ifndef UNREFERENCED_PARAMETER
-#define UNREFERENCED_PARAMETER(x) x = x
+#define UNREFERENCED_PARAMETER(x) (x = x)
#endif
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
index 4b9b2e84d381..c468f492b876 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
@@ -10205,6 +10205,7 @@ dml_bool_t dml_get_is_phantom_pipe(struct display_mode_lib_st *mode_lib, dml_uin
return (mode_lib->ms.cache_display_cfg.plane.UseMALLForPStateChange[plane_idx] == dml_use_mall_pstate_change_phantom_pipe);
}
+
#define dml_get_per_surface_var_func(variable, type, interval_var) type dml_get_##variable(struct display_mode_lib_st *mode_lib, dml_uint_t surface_idx) \
{ \
dml_uint_t plane_idx; \
@@ -10333,3 +10334,4 @@ dml_get_per_surface_var_func(bigk_fragment_size, dml_uint_t, mode_lib->mp.BIGK_F
dml_get_per_surface_var_func(dpte_bytes_per_row, dml_uint_t, mode_lib->mp.PixelPTEBytesPerRow);
dml_get_per_surface_var_func(meta_bytes_per_row, dml_uint_t, mode_lib->mp.MetaRowByte);
dml_get_per_surface_var_func(det_buffer_size_kbytes, dml_uint_t, mode_lib->ms.DETBufferSizeInKByte);
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h
index a38ed89c47a9..a38ed89c47a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h
index dbeb08466092..5b40dcdc4406 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h
@@ -274,7 +274,6 @@ enum dml_clk_cfg_policy {
dml_use_state_freq = 2
};
-
struct soc_state_bounding_box_st {
dml_float_t socclk_mhz;
dml_float_t dscclk_mhz;
@@ -1894,7 +1893,7 @@ struct display_mode_lib_scratch_st {
struct CalculatePrefetchSchedule_params_st CalculatePrefetchSchedule_params;
};
-/// @brief Represent the overall soc/ip enviroment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output
+/// @brief Represent the overall soc/ip environment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output
struct display_mode_lib_st {
dml_uint_t project;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h
index 14d389525296..e574c81edf5e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h
@@ -52,7 +52,7 @@
#define __DML_VBA_DEBUG__
#define __DML_VBA_ENABLE_INLINE_CHECK_ 0
#define __DML_VBA_MIN_VSTARTUP__ 9 //<brief At which vstartup the DML start to try if the mode can be supported
-#define __DML_ARB_TO_RET_DELAY__ 7 + 95 //<brief Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET)
+#define __DML_ARB_TO_RET_DELAY__ (7 + 95) //<brief Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET)
#define __DML_MIN_DCFCLK_FACTOR__ 1.15 //<brief fudge factor for min dcfclk calclation
#define __DML_MAX_VRATIO_PRE__ 4.0 //<brief Prefetch schedule max vratio
#define __DML_MAX_VRATIO_PRE_OTO__ 4.0 //<brief Prefetch schedule max vratio for one to one scheduling calculation for prefetch
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c
index 89890c88fd66..89890c88fd66 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h
index 113b0265e1d1..a82b49cf7fb0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h
@@ -30,7 +30,6 @@
#include "display_mode_core_structs.h"
#include "cmntypes.h"
-
#include "dml_assert.h"
#include "dml_logging.h"
@@ -72,5 +71,4 @@ __DML_DLL_EXPORT__ dml_uint_t dml_get_plane_idx(const struct display_mode_lib_st
__DML_DLL_EXPORT__ dml_uint_t dml_get_pipe_idx(const struct display_mode_lib_st *mode_lib, dml_uint_t plane_idx);
__DML_DLL_EXPORT__ void dml_calc_pipe_plane_mapping(const struct dml_hw_resource_st *hw, dml_uint_t *pipe_plane);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
index bf5e7f4e0416..bf5e7f4e0416 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
index 9880d3e0398e..9880d3e0398e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
index ee721606b883..ee721606b883 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h
index 4bff52eaaef8..4bff52eaaef8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
index 08f7f03b1023..798abb2b2e67 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
@@ -224,9 +224,7 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
/* Populate stream, plane mappings and other fields in display config. */
- DC_FP_START();
result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
- DC_FP_END();
if (!result)
return false;
@@ -281,9 +279,7 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
mode_support->dml2_instance = dml_init->dml2_instance;
- DC_FP_START();
dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
- DC_FP_END();
dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
DC_FP_START();
is_supported = dml2_check_mode_supported(mode_support);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h
index 15f92029d2e5..15f92029d2e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h
index 793e1c038efd..16a4f97bca4e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML_DML_DCN4_SOC_BB__
#define __DML_DML_DCN4_SOC_BB__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h
index 281d7ad230d8..281d7ad230d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h
index a64ec4dcf11a..a64ec4dcf11a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h
index 91955bbe24b8..bf57df42d1d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h
@@ -46,7 +46,6 @@ struct dml2_display_dlg_regs {
uint32_t dst_y_delta_drq_limit;
uint32_t refcyc_per_vm_dmdata;
uint32_t dmdata_dl_delta;
- uint32_t dst_y_svp_drq_limit;
// MRQ
uint32_t refcyc_per_meta_chunk_vblank_l;
@@ -122,6 +121,8 @@ struct dml2_display_rq_regs {
uint32_t crq_expansion_mode;
uint32_t plane1_base_address;
uint32_t unbounded_request_enabled;
+ bool pte_buffer_mode;
+ bool force_one_row_for_frame;
// MRQ
uint32_t mrq_expansion_mode;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
index e8dc6471c0be..35aa954248cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
@@ -49,6 +49,11 @@ enum dml2_source_format_class {
dml2_422_packed_12 = 18
};
+enum dml2_sample_positioning {
+ dml2_interstitial = 0,
+ dml2_cosited = 1
+};
+
enum dml2_rotation_angle {
dml2_rotation_0 = 0,
dml2_rotation_90 = 1,
@@ -82,6 +87,15 @@ enum dml2_output_link_dp_rate {
dml2_dp_rate_uhbr20 = 6
};
+enum dml2_pstate_type {
+ dml2_pstate_type_uclk = 0,
+ dml2_pstate_type_fclk = 1,
+ dml2_pstate_type_ppt = 2,
+ dml2_pstate_type_temp_read = 3,
+ dml2_pstate_type_dummy_pstate = 4,
+ dml2_pstate_type_count = 5
+};
+
enum dml2_uclk_pstate_change_strategy {
dml2_uclk_pstate_change_strategy_auto = 0,
dml2_uclk_pstate_change_strategy_force_vactive = 1,
@@ -222,7 +236,11 @@ struct dml2_composition_cfg {
struct {
bool enabled;
+ bool easf_enabled;
+ bool isharp_enabled;
bool upsp_enabled;
+ enum dml2_sample_positioning upsp_sample_positioning;
+ unsigned int upsp_vtaps;
struct {
double h_ratio;
double v_ratio;
@@ -384,7 +402,7 @@ struct dml2_plane_parameters {
// reserved_vblank_time_ns is the minimum time to reserve in vblank for Twait
// The actual reserved vblank time used for the corresponding stream in mode_programming would be at least as much as this per-plane override.
long reserved_vblank_time_ns;
- unsigned int max_vactive_det_fill_delay_us; // 0 = no reserved time, +ve = explicit max delay
+ unsigned int max_vactive_det_fill_delay_us[dml2_pstate_type_count]; // 0 = no reserved time, +ve = explicit max delay
unsigned int gpuvm_min_page_size_kbytes;
unsigned int hostvm_min_page_size_kbytes;
@@ -413,7 +431,6 @@ struct dml2_stream_parameters {
bool disable_dynamic_odm;
bool disable_subvp;
int minimum_vblank_idle_requirement_us;
- bool minimize_active_latency_hiding;
struct {
struct {
@@ -456,6 +473,7 @@ struct dml2_display_cfg {
bool enable;
bool value;
} force_nom_det_size_kbytes;
+
bool mode_support_check_disable;
bool mcache_admissibility_check_disable;
bool surface_viewport_size_check_disable;
@@ -478,7 +496,6 @@ struct dml2_display_cfg {
bool synchronize_ddr_displays_for_uclk_pstate_change;
bool max_outstanding_when_urgent_expected_disable;
bool enable_subvp_implicit_pmo; //enables PMO to switch pipe uclk strategy to subvp, and generate phantom programming
- unsigned int best_effort_min_active_latency_hiding_us;
bool all_streams_blanked;
} overrides;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h
index 8f624a912e78..8f624a912e78 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h
index 176f55947664..1fbc520c2540 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h
@@ -89,8 +89,8 @@ struct dml2_soc_qos_parameters {
struct dml2_soc_power_management_parameters {
double dram_clk_change_blackout_us;
- double dram_clk_change_read_only_us;
- double dram_clk_change_write_only_us;
+ double dram_clk_change_read_only_us; // deprecated
+ double dram_clk_change_write_only_us; // deprecated
double fclk_change_blackout_us;
double g7_ppt_blackout_us;
double g7_temperature_read_blackout_us;
@@ -145,6 +145,8 @@ struct dml2_soc_bb {
struct dml2_soc_vmin_clock_limits vmin_limit;
double lower_bound_bandwidth_dchub;
+ double fraction_of_urgent_bandwidth_nominal_target;
+ double fraction_of_urgent_bandwidth_flip_target;
unsigned int dprefclk_mhz;
unsigned int xtalclk_mhz;
unsigned int pcie_refclk_mhz;
@@ -170,6 +172,7 @@ struct dml2_soc_bb {
struct dml2_ip_capabilities {
unsigned int pipe_count;
unsigned int otg_count;
+ unsigned int TDLUT_33cube_count;
unsigned int num_dsc;
unsigned int max_num_dp2p0_streams;
unsigned int max_num_hdmi_frl_outputs;
@@ -188,7 +191,9 @@ struct dml2_ip_capabilities {
unsigned int subvp_prefetch_end_to_mall_start_us;
unsigned int subvp_fw_processing_delay;
unsigned int max_vactive_det_fill_delay_us;
-
+ unsigned int ppt_max_allow_delay_us;
+ unsigned int temp_read_max_allow_delay_us;
+ unsigned int dummy_pstate_max_allow_delay_us;
/* FAMS2 delays */
struct {
unsigned int max_allow_delay_us;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
index 41adb1104d0f..452e4a2e72c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
@@ -70,6 +70,8 @@ struct dml2_pmo_options {
bool disable_dyn_odm;
bool disable_dyn_odm_for_multi_stream;
bool disable_dyn_odm_for_stream_with_svp;
+ struct dml2_pmo_pstate_strategy *override_strategy_lists[DML2_MAX_PLANES];
+ unsigned int num_override_strategies_per_list[DML2_MAX_PLANES];
};
struct dml2_options {
@@ -310,6 +312,7 @@ struct dml2_mode_support_info {
bool NumberOfOTGSupport;
bool NumberOfHDMIFRLSupport;
bool NumberOfDP2p0Support;
+ bool NumberOfTDLUT33cubeSupport;
bool WritebackScaleRatioAndTapsSupport;
bool CursorSupport;
bool PitchSupport;
@@ -357,6 +360,8 @@ struct dml2_mode_support_info {
unsigned int AlignedCPitch[DML2_MAX_PLANES];
bool g6_temp_read_support;
bool temp_read_or_ppt_support;
+ bool qos_bandwidth_support;
+ bool dcfclk_support;
}; // dml2_mode_support_info
struct dml2_display_cfg_programming {
@@ -671,6 +676,8 @@ struct dml2_display_cfg_programming {
unsigned int PrefetchMode[DML2_MAX_PLANES]; // LEGACY_ONLY
bool ROBUrgencyAvoidance;
double LowestPrefetchMargin;
+
+ unsigned int pstate_recout_reduction_lines[DML2_MAX_PLANES];
} misc;
struct dml2_mode_support_info mode_support_info;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
index 6ee37386f672..eba948e187c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -28,6 +28,7 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = {
.writeback_interface_buffer_size_kbytes = 90,
//Number of pipes after DCN Pipe harvesting
.max_num_dpp = 4,
+ .max_num_opp = 4,
.max_num_otg = 4,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
index a68bb001a346..a68bb001a346 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index bf62d42b3f78..a02e9fd6b5ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -1303,6 +1303,7 @@ static double TruncToValidBPP(
MinDSCBPP = 8;
MaxDSCBPP = 16;
} else {
+
if (Output == dml2_hdmi || Output == dml2_hdmifrl) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 24;
@@ -1320,6 +1321,7 @@ static double TruncToValidBPP(
MaxDSCBPP = 16;
}
}
+
if (Output == dml2_dp2p0) {
MaxLinkBPP = LinkBitRate * Lanes / PixelClock * 128.0 / 132.0 * 383.0 / 384.0 * 65536.0 / 65540.0;
} else if (DSCEnable && Output == dml2_dp) {
@@ -4047,7 +4049,9 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
bool UseDSC,
unsigned int NumberOfDSCSlices,
unsigned int TotalNumberOfActiveDPP,
+ unsigned int TotalNumberOfActiveOPP,
unsigned int MaxNumDPP,
+ unsigned int MaxNumOPP,
double DISPCLKRequired,
unsigned int NumberOfDPPRequired,
unsigned int MaxHActiveForDSC,
@@ -4063,7 +4067,7 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
if (DISPCLKRequired > MaxDispclk)
return false;
- if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP)
+ if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP || (TotalNumberOfActiveOPP + NumberOfDPPRequired) > MaxNumOPP)
return false;
if (are_odm_segments_symmetrical) {
if (HActive % (NumberOfDPPRequired * pixels_per_clock_cycle))
@@ -4109,7 +4113,9 @@ static noinline_for_stack void CalculateODMMode(
double MaxDispclk,
bool DSCEnable,
unsigned int TotalNumberOfActiveDPP,
+ unsigned int TotalNumberOfActiveOPP,
unsigned int MaxNumDPP,
+ unsigned int MaxNumOPP,
double PixelClock,
unsigned int NumberOfDSCSlices,
@@ -4179,7 +4185,9 @@ static noinline_for_stack void CalculateODMMode(
UseDSC,
NumberOfDSCSlices,
TotalNumberOfActiveDPP,
+ TotalNumberOfActiveOPP,
MaxNumDPP,
+ MaxNumOPP,
DISPCLKRequired,
NumberOfDPPRequired,
MaxHActiveForDSC,
@@ -6964,7 +6972,7 @@ static void calculate_bytes_to_fetch_required_to_hide_latency(
stream_index = p->display_cfg->plane_descriptors[plane_index].stream_index;
- dst_lines_to_hide = (unsigned int)math_ceil(p->latency_to_hide_us /
+ dst_lines_to_hide = (unsigned int)math_ceil(p->latency_to_hide_us[0] /
((double)p->display_cfg->stream_descriptors[stream_index].timing.h_total /
(double)p->display_cfg->stream_descriptors[stream_index].timing.pixel_clock_khz * 1000.0));
@@ -7061,9 +7069,9 @@ static void calculate_excess_vactive_bandwidth_required(
excess_vactive_fill_bw_l[plane_index] = 0.0;
excess_vactive_fill_bw_c[plane_index] = 0.0;
- if (display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us > 0) {
- excess_vactive_fill_bw_l[plane_index] = (double)bytes_required_l[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us;
- excess_vactive_fill_bw_c[plane_index] = (double)bytes_required_c[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us;
+ if (display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] > 0) {
+ excess_vactive_fill_bw_l[plane_index] = (double)bytes_required_l[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk];
+ excess_vactive_fill_bw_c[plane_index] = (double)bytes_required_c[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk];
}
}
}
@@ -8358,6 +8366,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculateSwathAndDETConfiguration(&mode_lib->scratch, CalculateSwathAndDETConfiguration_params);
mode_lib->ms.TotalNumberOfActiveDPP = 0;
+ mode_lib->ms.TotalNumberOfActiveOPP = 0;
mode_lib->ms.support.TotalAvailablePipesSupport = true;
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
@@ -8393,7 +8402,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.max_dispclk_freq_mhz,
false, // DSCEnable
mode_lib->ms.TotalNumberOfActiveDPP,
+ mode_lib->ms.TotalNumberOfActiveOPP,
mode_lib->ip.max_num_dpp,
+ mode_lib->ip.max_num_opp,
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
mode_lib->ms.support.NumberOfDSCSlices[k],
@@ -8412,7 +8423,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.max_dispclk_freq_mhz,
true, // DSCEnable
mode_lib->ms.TotalNumberOfActiveDPP,
+ mode_lib->ms.TotalNumberOfActiveOPP,
mode_lib->ip.max_num_dpp,
+ mode_lib->ip.max_num_opp,
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
mode_lib->ms.support.NumberOfDSCSlices[k],
@@ -8516,20 +8529,23 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 1;
+ mode_lib->ms.NoOfOPP[k] = 1;
if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_4to1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 4;
+ mode_lib->ms.NoOfOPP[k] = 4;
} else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_3to1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 3;
+ mode_lib->ms.NoOfOPP[k] = 3;
} else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_2to1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 2;
+ mode_lib->ms.NoOfOPP[k] = 2;
} else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 2) {
mode_lib->ms.MPCCombine[k] = true;
mode_lib->ms.NoOfDPP[k] = 2;
- mode_lib->ms.TotalNumberOfActiveDPP++;
} else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 1;
@@ -8540,7 +8556,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
mode_lib->ms.MPCCombine[k] = true;
mode_lib->ms.NoOfDPP[k] = 2;
- mode_lib->ms.TotalNumberOfActiveDPP++;
}
}
#if defined(__DML_VBA_DEBUG__)
@@ -8548,8 +8563,16 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
#endif
}
+ mode_lib->ms.TotalNumberOfActiveDPP = 0;
+ mode_lib->ms.TotalNumberOfActiveOPP = 0;
+ for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
+ mode_lib->ms.TotalNumberOfActiveDPP += mode_lib->ms.NoOfDPP[k];
+ mode_lib->ms.TotalNumberOfActiveOPP += mode_lib->ms.NoOfOPP[k];
+ }
if (mode_lib->ms.TotalNumberOfActiveDPP > (unsigned int)mode_lib->ip.max_num_dpp)
mode_lib->ms.support.TotalAvailablePipesSupport = false;
+ if (mode_lib->ms.TotalNumberOfActiveOPP > (unsigned int)mode_lib->ip.max_num_opp)
+ mode_lib->ms.support.TotalAvailablePipesSupport = false;
mode_lib->ms.TotalNumberOfSingleDPPSurfaces = 0;
@@ -9028,11 +9051,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_width_c = mode_lib->ms.SwathWidthC;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_l = mode_lib->ms.SwathHeightY;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_c = mode_lib->ms.SwathHeightC;
- calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us[0] = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
/* outputs */
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l;
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l[dml2_pstate_type_uclk];
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c[dml2_pstate_type_uclk];
calculate_bytes_to_fetch_required_to_hide_latency(calculate_bytes_to_fetch_required_to_hide_latency_params);
@@ -9040,8 +9063,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_excess_vactive_bandwidth_required(
display_cfg,
mode_lib->ms.num_active_planes,
- s->pstate_bytes_required_l,
- s->pstate_bytes_required_c,
+ s->pstate_bytes_required_l[dml2_pstate_type_uclk],
+ s->pstate_bytes_required_c[dml2_pstate_type_uclk],
/* outputs */
mode_lib->ms.excess_vactive_fill_bw_l,
mode_lib->ms.excess_vactive_fill_bw_c);
@@ -9483,8 +9506,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_vactive_det_fill_latency(
display_cfg,
mode_lib->ms.num_active_planes,
- s->pstate_bytes_required_l,
- s->pstate_bytes_required_c,
+ s->pstate_bytes_required_l[dml2_pstate_type_uclk],
+ s->pstate_bytes_required_c[dml2_pstate_type_uclk],
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
mode_lib->ms.vactive_sw_bw_l,
@@ -9492,7 +9515,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.surface_avg_vactive_required_bw,
mode_lib->ms.surface_peak_required_bw,
/* outputs */
- mode_lib->ms.dram_change_vactive_det_fill_delay_us);
+ mode_lib->ms.pstate_vactive_det_fill_delay_us[dml2_pstate_type_uclk]);
#ifdef __DML_VBA_DEBUG__
DML_LOG_VERBOSE("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us);
@@ -10986,11 +11009,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_width_c = mode_lib->mp.SwathWidthC;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_l = mode_lib->mp.SwathHeightY;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_c = mode_lib->mp.SwathHeightC;
- calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us[0] = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
/* outputs */
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l;
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l[dml2_pstate_type_uclk];
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c[dml2_pstate_type_uclk];
calculate_bytes_to_fetch_required_to_hide_latency(calculate_bytes_to_fetch_required_to_hide_latency_params);
@@ -10998,8 +11021,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_excess_vactive_bandwidth_required(
display_cfg,
s->num_active_planes,
- s->pstate_bytes_required_l,
- s->pstate_bytes_required_c,
+ s->pstate_bytes_required_l[dml2_pstate_type_uclk],
+ s->pstate_bytes_required_c[dml2_pstate_type_uclk],
/* outputs */
mode_lib->mp.excess_vactive_fill_bw_l,
mode_lib->mp.excess_vactive_fill_bw_c);
@@ -12756,7 +12779,7 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
{
const struct dml2_plane_parameters *plane_descriptor = &display_cfg->display_config.plane_descriptors[plane_index];
const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[plane_descriptor->stream_index];
- const struct dml2_fams2_meta *stream_fams2_meta = &display_cfg->stage3.stream_fams2_meta[plane_descriptor->stream_index];
+ const struct dml2_pstate_meta *stream_pstate_meta = &display_cfg->stage3.stream_pstate_meta[plane_descriptor->stream_index];
struct dmub_fams2_cmd_stream_static_base_state *base_programming = &fams2_base_programming->stream_v1.base;
union dmub_fams2_cmd_stream_static_sub_state *sub_programming = &fams2_sub_programming->stream_v1.sub_state;
@@ -12771,24 +12794,24 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
/* from display configuration */
base_programming->htotal = (uint16_t)stream_descriptor->timing.h_total;
base_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total;
- base_programming->vblank_start = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->vblank_start = (uint16_t)(stream_pstate_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch);
- base_programming->vblank_end = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->vblank_end = (uint16_t)(stream_pstate_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
stream_descriptor->timing.v_active);
base_programming->config.bits.is_drr = stream_descriptor->timing.drr_config.enabled;
/* from meta */
base_programming->otg_vline_time_ns =
- (unsigned int)(stream_fams2_meta->otg_vline_time_us * 1000.0);
- base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_fams2_meta->scheduling_delay_otg_vlines;
- base_programming->contention_delay_otg_vlines = (uint8_t)stream_fams2_meta->contention_delay_otg_vlines;
- base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines;
- base_programming->drr_keepout_otg_vline = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ (unsigned int)(stream_pstate_meta->otg_vline_time_us * 1000.0);
+ base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_pstate_meta->scheduling_delay_otg_vlines;
+ base_programming->contention_delay_otg_vlines = (uint8_t)stream_pstate_meta->contention_delay_otg_vlines;
+ base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines;
+ base_programming->drr_keepout_otg_vline = (uint16_t)(stream_pstate_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
- stream_fams2_meta->method_drr.programming_delay_otg_vlines);
- base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_fams2_meta->allow_to_target_delay_otg_vlines;
- base_programming->max_vtotal = (uint16_t)stream_fams2_meta->max_vtotal;
+ stream_pstate_meta->method_drr.programming_delay_otg_vlines);
+ base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_pstate_meta->allow_to_target_delay_otg_vlines;
+ base_programming->max_vtotal = (uint16_t)stream_pstate_meta->max_vtotal;
/* from core */
base_programming->config.bits.min_ttu_vblank_usable = true;
@@ -12807,11 +12830,11 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
/* legacy vactive */
base_programming->type = FAMS2_STREAM_TYPE_VACTIVE;
sub_programming->legacy.vactive_det_fill_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vactive.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vactive.common.allow_end_otg_vline;
base_programming->config.bits.clamp_vtotal_min = true;
break;
case dml2_pstate_method_vblank:
@@ -12819,22 +12842,22 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
/* legacy vblank */
base_programming->type = FAMS2_STREAM_TYPE_VBLANK;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vblank.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vblank.common.allow_end_otg_vline;
base_programming->config.bits.clamp_vtotal_min = true;
break;
case dml2_pstate_method_fw_drr:
/* drr */
base_programming->type = FAMS2_STREAM_TYPE_DRR;
sub_programming->drr.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_drr.programming_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_drr.programming_delay_otg_vlines;
sub_programming->drr.nom_stretched_vtotal =
- (uint16_t)stream_fams2_meta->method_drr.stretched_vtotal;
+ (uint16_t)stream_pstate_meta->method_drr.stretched_vtotal;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_drr.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_drr.common.allow_end_otg_vline;
/* drr only clamps to vtotal min for single display */
base_programming->config.bits.clamp_vtotal_min = display_cfg->display_config.num_streams == 1;
sub_programming->drr.only_stretch_if_required = true;
@@ -12847,13 +12870,13 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
(uint16_t)(plane_descriptor->composition.scaler_info.plane0.v_ratio * 1000.0);
sub_programming->subvp.vratio_denominator = 1000;
sub_programming->subvp.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.programming_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_subvp.programming_delay_otg_vlines;
sub_programming->subvp.prefetch_to_mall_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
sub_programming->subvp.phantom_vtotal =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vtotal;
+ (uint16_t)stream_pstate_meta->method_subvp.phantom_vtotal;
sub_programming->subvp.phantom_vactive =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vactive;
+ (uint16_t)stream_pstate_meta->method_subvp.phantom_vactive;
sub_programming->subvp.config.bits.is_multi_planar =
plane_descriptor->surface.plane1.height > 0;
sub_programming->subvp.config.bits.is_yuv420 =
@@ -12862,9 +12885,9 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
plane_descriptor->pixel_format == dml2_420_12;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_subvp.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_subvp.common.allow_end_otg_vline;
base_programming->config.bits.clamp_vtotal_min = true;
break;
case dml2_pstate_method_reserved_hw:
@@ -12920,7 +12943,8 @@ void dml2_core_calcs_get_plane_support_info(const struct dml2_display_cfg *displ
out->active_latency_hiding_us = (int)mode_lib->ms.VActiveLatencyHidingUs[plane_idx];
- out->dram_change_vactive_det_fill_delay_us = (unsigned int)math_ceil(mode_lib->ms.dram_change_vactive_det_fill_delay_us[plane_idx]);
+ out->vactive_det_fill_delay_us[dml2_pstate_type_uclk] =
+ (unsigned int)math_ceil(mode_lib->ms.pstate_vactive_det_fill_delay_us[dml2_pstate_type_uclk][plane_idx]);
}
void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index)
@@ -13001,7 +13025,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.InvalidCombinationOfMALLUseForPState = mode_lib->ms.support.InvalidCombinationOfMALLUseForPState;
out->informative.mode_support_info.ExceededMALLSize = mode_lib->ms.support.ExceededMALLSize;
out->informative.mode_support_info.EnoughWritebackUnits = mode_lib->ms.support.EnoughWritebackUnits;
- out->informative.mode_support_info.temp_read_or_ppt_support = mode_lib->ms.support.temp_read_or_ppt_support;
+ out->informative.mode_support_info.temp_read_or_ppt_support = mode_lib->ms.support.global_temp_read_or_ppt_supported;
out->informative.mode_support_info.g6_temp_read_support = mode_lib->ms.support.g6_temp_read_support;
out->informative.mode_support_info.ExceededMultistreamSlots = mode_lib->ms.support.ExceededMultistreamSlots;
@@ -13027,7 +13051,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.VRatioInPrefetchSupported = mode_lib->ms.support.VRatioInPrefetchSupported;
out->informative.mode_support_info.DISPCLK_DPPCLK_Support = mode_lib->ms.support.DISPCLK_DPPCLK_Support;
out->informative.mode_support_info.TotalAvailablePipesSupport = mode_lib->ms.support.TotalAvailablePipesSupport;
+ out->informative.mode_support_info.NumberOfTDLUT33cubeSupport = mode_lib->ms.support.NumberOfTDLUT33cubeSupport;
out->informative.mode_support_info.ViewportSizeSupport = mode_lib->ms.support.ViewportSizeSupport;
+ out->informative.mode_support_info.qos_bandwidth_support = mode_lib->ms.support.qos_bandwidth_support;
+ out->informative.mode_support_info.dcfclk_support = mode_lib->ms.support.dcfclk_support;
for (k = 0; k < out->display_config.num_planes; k++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
index 27ef0e096b25..27ef0e096b25 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
index 640087e862f8..cc4f0663c6d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
@@ -15,6 +15,8 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance
memset(out, 0, sizeof(struct dml2_core_instance));
+ out->project_id = project_id;
+
switch (project_id) {
case dml2_project_dcn4x_stage1:
result = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h
index 411c514fe65c..411c514fe65c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h
index ffb8c09f37a5..1087a8c926ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -36,7 +36,9 @@ struct dml2_core_ip_params {
unsigned int max_line_buffer_lines;
unsigned int writeback_interface_buffer_size_kbytes;
unsigned int max_num_dpp;
+ unsigned int max_num_opp;
unsigned int max_num_otg;
+ unsigned int TDLUT_33cube_count;
unsigned int max_num_wb;
unsigned int max_dchub_pscl_bw_pix_per_clk;
unsigned int max_pscl_lb_bw_pix_per_clk;
@@ -46,6 +48,7 @@ struct dml2_core_ip_params {
double max_vscl_ratio;
unsigned int max_hscl_taps;
unsigned int max_vscl_taps;
+ unsigned int odm_combine_support_mask;
unsigned int num_dsc;
unsigned int maximum_dsc_bits_per_component;
unsigned int maximum_pixels_per_line_per_dsc_unit;
@@ -82,7 +85,6 @@ struct dml2_core_ip_params {
unsigned int subvp_swath_height_margin_lines;
unsigned int subvp_fw_processing_delay_us;
unsigned int subvp_pstate_allow_width_us;
-
// MRQ
bool dcn_mrq_present;
unsigned int zero_size_buffer_entries;
@@ -103,6 +105,8 @@ struct dml2_core_internal_DmlPipe {
unsigned int DPPPerSurface;
bool ScalerEnabled;
bool UPSPEnabled;
+ unsigned int UPSPVTaps;
+ enum dml2_sample_positioning UPSPSamplePositioning;
enum dml2_rotation_angle RotationAngle;
bool mirrored;
unsigned int ViewportHeight;
@@ -230,6 +234,7 @@ struct dml2_core_internal_mode_support_info {
bool MSOOrODMSplitWithNonDPLink;
bool NotEnoughLanesForMSO;
bool NumberOfOTGSupport;
+ bool NumberOfTDLUT33cubeSupport;
bool NumberOfHDMIFRLSupport;
bool NumberOfDP2p0Support;
bool WritebackScaleRatioAndTapsSupport;
@@ -259,8 +264,11 @@ struct dml2_core_internal_mode_support_info {
bool DCCMetaBufferSizeNotExceeded;
enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support temp_read_or_ppt_support[DML2_MAX_PLANES];
+ bool global_dram_clock_change_support_required;
bool global_dram_clock_change_supported;
bool global_fclk_change_supported;
+ bool global_temp_read_or_ppt_supported;
bool USRRetrainingSupport;
bool AvgBandwidthSupport;
bool UrgVactiveBandwidthSupport;
@@ -331,7 +339,6 @@ struct dml2_core_internal_mode_support_info {
bool incorrect_imall_usage;
bool g6_temp_read_support;
- bool temp_read_or_ppt_support;
struct dml2_core_internal_watermarks watermarks;
bool dcfclk_support;
@@ -566,6 +573,7 @@ struct dml2_core_internal_mode_support {
enum dml2_odm_mode ODMMode[DML2_MAX_PLANES];
unsigned int SurfaceSizeInMALL[DML2_MAX_PLANES];
unsigned int NoOfDPP[DML2_MAX_PLANES];
+ unsigned int NoOfOPP[DML2_MAX_PLANES];
bool MPCCombine[DML2_MAX_PLANES];
double dcfclk_deepsleep;
double MinDPPCLKUsingSingleDPP[DML2_MAX_PLANES];
@@ -576,6 +584,7 @@ struct dml2_core_internal_mode_support {
bool PTEBufferSizeNotExceeded[DML2_MAX_PLANES];
bool DCCMetaBufferSizeNotExceeded[DML2_MAX_PLANES];
unsigned int TotalNumberOfActiveDPP;
+ unsigned int TotalNumberOfActiveOPP;
unsigned int TotalNumberOfSingleDPPSurfaces;
unsigned int TotalNumberOfDCCActiveDPP;
unsigned int Total3dlutActive;
@@ -584,7 +593,7 @@ struct dml2_core_internal_mode_support {
double VActiveLatencyHidingMargin[DML2_MAX_PLANES];
double VActiveLatencyHidingUs[DML2_MAX_PLANES];
unsigned int MaxVStartupLines[DML2_MAX_PLANES];
- double dram_change_vactive_det_fill_delay_us[DML2_MAX_PLANES];
+ double pstate_vactive_det_fill_delay_us[dml2_pstate_type_count][DML2_MAX_PLANES];
unsigned int num_mcaches_l[DML2_MAX_PLANES];
unsigned int mcache_row_bytes_l[DML2_MAX_PLANES];
@@ -614,8 +623,8 @@ struct dml2_core_internal_mode_support {
unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES];
unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES];
unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
unsigned int cursor_bytes_per_line[DML2_MAX_PLANES];
@@ -639,7 +648,7 @@ struct dml2_core_internal_mode_support {
unsigned int DSTYAfterScaler[DML2_MAX_PLANES];
unsigned int DSTXAfterScaler[DML2_MAX_PLANES];
- enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES];
+ enum dml2_pstate_method uclk_pstate_switch_modes[DML2_MAX_PLANES];
};
/// @brief A mega structure that houses various info for model programming step.
@@ -830,6 +839,7 @@ struct dml2_core_internal_mode_program {
double max_urgent_latency_us;
double df_response_time_us;
+ enum dml2_pstate_method uclk_pstate_switch_modes[DML2_MAX_PLANES];
// -------------------
// Output
// -------------------
@@ -956,11 +966,12 @@ struct dml2_core_internal_mode_program {
double MaxActiveFCLKChangeLatencySupported;
bool USRRetrainingSupport;
bool g6_temp_read_support;
- bool temp_read_or_ppt_support;
enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support temp_read_or_ppt_support[DML2_MAX_PLANES];
bool global_dram_clock_change_supported;
bool global_fclk_change_supported;
+ bool global_temp_read_or_ppt_supported;
double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
double WritebackAllowFCLKChangeEndPosition[DML2_MAX_PLANES];
double WritebackAllowDRAMClockChangeEndPosition[DML2_MAX_PLANES];
@@ -1127,8 +1138,8 @@ struct dml2_core_calcs_mode_support_locals {
unsigned int cursor_bytes[DML2_MAX_PLANES];
bool stream_visited[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES];
double prefetch_sw_bytes[DML2_MAX_PLANES];
double Tpre_rounded[DML2_MAX_PLANES];
@@ -1219,8 +1230,8 @@ struct dml2_core_calcs_mode_programming_locals {
double Tr0_trips_flip_rounded[DML2_MAX_PLANES];
unsigned int per_pipe_flip_bytes[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES];
double prefetch_sw_bytes[DML2_MAX_PLANES];
double Tpre_rounded[DML2_MAX_PLANES];
@@ -1306,7 +1317,7 @@ struct dml2_core_calcs_CalculateVMRowAndSwath_params {
unsigned int HostVMMinPageSize;
unsigned int DCCMetaBufferSizeBytes;
bool mrq_present;
- enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES];
+ enum dml2_pstate_method *uclk_pstate_switch_modes;
// Output
bool *PTEBufferSizeNotExceeded;
@@ -1733,10 +1744,12 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param
unsigned int max_request_size_bytes;
unsigned int *meta_row_height_l;
unsigned int *meta_row_height_c;
+ enum dml2_pstate_method *uclk_pstate_switch_modes;
// Output
struct dml2_core_internal_watermarks *Watermark;
enum dml2_pstate_change_support *DRAMClockChangeSupport;
+ bool *global_dram_clock_change_support_required;
bool *global_dram_clock_change_supported;
double *MaxActiveDRAMClockChangeLatencySupported;
unsigned int *SubViewportLinesNeededInMALL;
@@ -1747,10 +1760,10 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param
double *VActiveLatencyHidingMargin;
double *VActiveLatencyHidingUs;
bool *g6_temp_read_support;
- bool *temp_read_or_ppt_support;
+ enum dml2_pstate_change_support *temp_read_or_ppt_support;
+ bool *global_temp_read_or_ppt_supported;
};
-
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params {
const struct dml2_display_cfg *display_cfg;
unsigned int ConfigReturnBufferSizeInKByte;
@@ -2240,7 +2253,7 @@ struct dml2_core_calcs_calculate_bytes_to_fetch_required_to_hide_latency_params
unsigned int *swath_width_c;
unsigned int *swath_height_l;
unsigned int *swath_height_c;
- double latency_to_hide_us;
+ double latency_to_hide_us[DML2_MAX_PLANES];
/* outputs */
unsigned int *bytes_required_l;
@@ -2308,6 +2321,7 @@ struct dml2_core_calcs_mode_support_ex {
const struct dml2_display_cfg *in_display_cfg;
const struct dml2_mcg_min_clock_table *min_clk_table;
int min_clk_index;
+ enum dml2_project_id project_id;
//unsigned int in_state_index;
struct dml2_core_internal_mode_support_info *out_evaluation_info;
};
@@ -2320,6 +2334,7 @@ struct dml2_core_calcs_mode_programming_ex {
const struct dml2_mcg_min_clock_table *min_clk_table;
const struct core_display_cfg_support_info *cfg_support_info;
int min_clk_index;
+ enum dml2_project_id project_id;
struct dml2_display_cfg_programming *programming;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
index 5f301befed16..b57d0f6ea6a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
@@ -306,6 +306,8 @@ void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mod
DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
if (!fail_only || support->g6_temp_read_support == 0)
DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ if (!fail_only || (support->global_dram_clock_change_supported == 0 && support->global_dram_clock_change_support_required))
+ DML_LOG_VERBOSE("DML: support: dram_clock_change_support = %d\n", support->global_dram_clock_change_supported);
if (!fail_only || support->ImmediateFlipSupport == 0)
DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
if (!fail_only || support->LinkCapacitySupport == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
index 95f0d017add4..95f0d017add4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index 22969a533a7b..22969a533a7b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
index e7b58f2efda4..e7b58f2efda4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
index dfd01440737d..dfd01440737d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
index 20ba2e446f1d..20ba2e446f1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
index a265f254152c..a265f254152c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
index 02da6f45cbf7..f54fde8fba90 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
@@ -10,4 +10,4 @@
bool mcg_dcn4_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
bool mcg_dcn4_unit_test(void);
-#endif
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
index c60b8fe90819..c60b8fe90819 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h
index ad307deca3b0..ad307deca3b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
index 1b9579a32ff2..1b9579a32ff2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
index f00bd9e72a86..f00bd9e72a86 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index d88b3e0082dd..c26e100fcaf2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -642,6 +642,11 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
int i = 0;
struct dml2_pmo_instance *pmo = in_out->instance;
+ unsigned int base_list_size = 0;
+ const struct dml2_pmo_pstate_strategy *base_list = NULL;
+ unsigned int *expanded_list_size = NULL;
+ struct dml2_pmo_pstate_strategy *expanded_list = NULL;
+
pmo->soc_bb = in_out->soc_bb;
pmo->ip_caps = in_out->ip_caps;
pmo->mpc_combine_limit = 2;
@@ -656,53 +661,71 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
pmo->options = in_out->options;
/* generate permutations of p-state configs from base strategy list */
- for (i = 1; i <= PMO_DCN4_MAX_DISPLAYS; i++) {
- switch (i) {
+ for (i = 0; i < PMO_DCN4_MAX_DISPLAYS; i++) {
+ switch (i+1) {
case 1:
- DML_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_1_display,
- base_strategy_list_1_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_1_display;
+ base_list_size = base_strategy_list_1_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display;
+
break;
case 2:
- DML_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_2_display,
- base_strategy_list_2_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_2_display;
+ base_list_size = base_strategy_list_2_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display;
+
break;
case 3:
- DML_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_3_display,
- base_strategy_list_3_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_3_display;
+ base_list_size = base_strategy_list_3_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display;
+
break;
case 4:
- DML_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_4_display,
- base_strategy_list_4_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_4_display;
+ base_list_size = base_strategy_list_4_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display;
+
break;
}
+
+ DML_ASSERT(base_list_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+
+ /* populate list */
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_list,
+ base_list_size,
+ i + 1,
+ expanded_list,
+ expanded_list_size);
}
return true;
@@ -1026,13 +1049,13 @@ static bool all_timings_support_vblank(const struct dml2_pmo_instance *pmo,
return synchronizable;
}
-static unsigned int calc_svp_microschedule(const struct dml2_fams2_meta *fams2_meta)
+static unsigned int calc_svp_microschedule(const struct dml2_pstate_meta *pstate_meta)
{
- return fams2_meta->contention_delay_otg_vlines +
- fams2_meta->method_subvp.programming_delay_otg_vlines +
- fams2_meta->method_subvp.phantom_vtotal +
- fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
- fams2_meta->dram_clk_change_blackout_otg_vlines;
+ return pstate_meta->contention_delay_otg_vlines +
+ pstate_meta->method_subvp.programming_delay_otg_vlines +
+ pstate_meta->method_subvp.phantom_vtotal +
+ pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
+ pstate_meta->blackout_otg_vlines;
}
static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
@@ -1042,29 +1065,29 @@ static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
unsigned int i;
for (i = 0; i < DML2_MAX_PLANES; i++) {
const struct dml2_stream_parameters *stream_descriptor;
- const struct dml2_fams2_meta *stream_fams2_meta;
+ const struct dml2_pstate_meta *stream_pstate_meta;
if (is_bit_set_in_bitfield(mask, i)) {
stream_descriptor = &display_config->display_config.stream_descriptors[i];
- stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
+ stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i];
if (!stream_descriptor->timing.drr_config.enabled)
return false;
/* cannot support required vtotal */
- if (stream_fams2_meta->method_drr.stretched_vtotal > stream_fams2_meta->max_vtotal) {
+ if (stream_pstate_meta->method_drr.stretched_vtotal > stream_pstate_meta->max_vtotal) {
return false;
}
/* check rr is within bounds */
- if (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min ||
- stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) {
+ if (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min ||
+ stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) {
return false;
}
/* check required stretch is allowed */
if (stream_descriptor->timing.drr_config.max_instant_vtotal_delta > 0 &&
- stream_fams2_meta->method_drr.stretched_vtotal - stream_fams2_meta->nom_vtotal > stream_descriptor->timing.drr_config.max_instant_vtotal_delta) {
+ stream_pstate_meta->method_drr.stretched_vtotal - stream_pstate_meta->nom_vtotal > (int)stream_descriptor->timing.drr_config.max_instant_vtotal_delta) {
return false;
}
}
@@ -1079,7 +1102,7 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
{
const struct dml2_stream_parameters *stream_descriptor;
const struct dml2_plane_parameters *plane_descriptor;
- const struct dml2_fams2_meta *stream_fams2_meta;
+ const struct dml2_pstate_meta *stream_pstate_meta;
unsigned int microschedule_vlines;
unsigned int i;
unsigned int mcaches_per_plane;
@@ -1124,13 +1147,13 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
for (i = 0; i < DML2_MAX_PLANES; i++) {
if (is_bit_set_in_bitfield(mask, i)) {
stream_descriptor = &display_config->display_config.stream_descriptors[i];
- stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
+ stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i];
if (stream_descriptor->overrides.disable_subvp) {
return false;
}
- microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_fams2_meta[i]);
+ microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_pstate_meta[i]);
/* block if using an interlaced timing */
if (stream_descriptor->timing.interlaced) {
@@ -1141,8 +1164,8 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
* 2) refresh rate must be within the allowed bounds
*/
if (microschedule_vlines >= stream_descriptor->timing.v_active ||
- (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min ||
- stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) {
+ (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min ||
+ stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) {
return false;
}
}
@@ -1232,43 +1255,43 @@ static bool all_planes_match_method(const struct display_configuation_with_meta
}
static void build_method_scheduling_params(
- struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta,
- struct dml2_fams2_meta *stream_fams2_meta)
+ struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta,
+ struct dml2_pstate_meta *stream_pstate_meta)
{
- stream_method_fams2_meta->allow_time_us =
- (double)((int)stream_method_fams2_meta->allow_end_otg_vline - (int)stream_method_fams2_meta->allow_start_otg_vline) *
- stream_fams2_meta->otg_vline_time_us;
- if (stream_method_fams2_meta->allow_time_us >= stream_method_fams2_meta->period_us) {
+ stream_method_pstate_meta->allow_time_us =
+ (double)((int)stream_method_pstate_meta->allow_end_otg_vline - (int)stream_method_pstate_meta->allow_start_otg_vline) *
+ stream_pstate_meta->otg_vline_time_us;
+ if (stream_method_pstate_meta->allow_time_us >= stream_method_pstate_meta->period_us) {
/* when allow wave overlaps an entire frame, it is always schedulable (DRR can do this)*/
- stream_method_fams2_meta->disallow_time_us = 0.0;
+ stream_method_pstate_meta->disallow_time_us = 0.0;
} else {
- stream_method_fams2_meta->disallow_time_us =
- stream_method_fams2_meta->period_us - stream_method_fams2_meta->allow_time_us;
+ stream_method_pstate_meta->disallow_time_us =
+ stream_method_pstate_meta->period_us - stream_method_pstate_meta->allow_time_us;
}
}
-static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
+static struct dml2_pstate_per_method_common_meta *get_per_method_common_meta(
struct dml2_pmo_instance *pmo,
enum dml2_pstate_method stream_pstate_method,
int stream_idx)
{
- struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL;
+ struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta = NULL;
switch (stream_pstate_method) {
case dml2_pstate_method_vactive:
case dml2_pstate_method_fw_vactive_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vactive.common;
break;
case dml2_pstate_method_vblank:
case dml2_pstate_method_fw_vblank_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vblank.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vblank.common;
break;
case dml2_pstate_method_fw_svp:
case dml2_pstate_method_fw_svp_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_subvp.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_subvp.common;
break;
case dml2_pstate_method_fw_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_drr.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_drr.common;
break;
case dml2_pstate_method_reserved_hw:
case dml2_pstate_method_reserved_fw:
@@ -1277,10 +1300,10 @@ static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
case dml2_pstate_method_count:
case dml2_pstate_method_na:
default:
- stream_method_fams2_meta = NULL;
+ stream_method_pstate_meta = NULL;
}
- return stream_method_fams2_meta;
+ return stream_method_pstate_meta;
}
static bool is_timing_group_schedulable(
@@ -1288,10 +1311,10 @@ static bool is_timing_group_schedulable(
const struct display_configuation_with_meta *display_cfg,
const struct dml2_pmo_pstate_strategy *pstate_strategy,
const unsigned int timing_group_idx,
- struct dml2_fams2_per_method_common_meta *group_fams2_meta)
+ struct dml2_pstate_per_method_common_meta *group_pstate_meta)
{
unsigned int i;
- struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta;
+ struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta;
unsigned int base_stream_idx = 0;
struct dml2_pmo_scratch *s = &pmo->scratch;
@@ -1305,31 +1328,31 @@ static bool is_timing_group_schedulable(
}
/* init allow start and end lines for timing group */
- stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx);
- if (!stream_method_fams2_meta)
+ stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx);
+ if (!stream_method_pstate_meta)
return false;
- group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline;
- group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline;
- group_fams2_meta->period_us = stream_method_fams2_meta->period_us;
+ group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline;
+ group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline;
+ group_pstate_meta->period_us = stream_method_pstate_meta->period_us;
for (i = base_stream_idx + 1; i < display_cfg->display_config.num_streams; i++) {
if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) {
- stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
- if (!stream_method_fams2_meta)
+ stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
+ if (!stream_method_pstate_meta)
continue;
- if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) {
+ if (group_pstate_meta->allow_start_otg_vline < stream_method_pstate_meta->allow_start_otg_vline) {
/* set group allow start to larger otg vline */
- group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline;
+ group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline;
}
- if (group_fams2_meta->allow_end_otg_vline > stream_method_fams2_meta->allow_end_otg_vline) {
+ if (group_pstate_meta->allow_end_otg_vline > stream_method_pstate_meta->allow_end_otg_vline) {
/* set group allow end to smaller otg vline */
- group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline;
+ group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline;
}
/* check waveform still has positive width */
- if (group_fams2_meta->allow_start_otg_vline >= group_fams2_meta->allow_end_otg_vline) {
+ if (group_pstate_meta->allow_start_otg_vline >= group_pstate_meta->allow_end_otg_vline) {
/* timing group is not schedulable */
return false;
}
@@ -1337,10 +1360,10 @@ static bool is_timing_group_schedulable(
}
/* calculate the rest of the meta */
- build_method_scheduling_params(group_fams2_meta, &pmo->scratch.pmo_dcn4.stream_fams2_meta[base_stream_idx]);
+ build_method_scheduling_params(group_pstate_meta, &pmo->scratch.pmo_dcn4.stream_pstate_meta[base_stream_idx]);
- return group_fams2_meta->allow_time_us > 0.0 &&
- group_fams2_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us;
+ return group_pstate_meta->allow_time_us > 0.0 &&
+ group_pstate_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us;
}
static bool is_config_schedulable(
@@ -1354,7 +1377,7 @@ static bool is_config_schedulable(
double max_allow_delay_us = 0.0;
- memset(s->pmo_dcn4.group_common_fams2_meta, 0, sizeof(s->pmo_dcn4.group_common_fams2_meta));
+ memset(s->pmo_dcn4.group_common_pstate_meta, 0, sizeof(s->pmo_dcn4.group_common_pstate_meta));
memset(s->pmo_dcn4.sorted_group_gtl_disallow_index, 0, sizeof(unsigned int) * DML2_MAX_PLANES);
/* search for a general solution to the schedule */
@@ -1369,12 +1392,12 @@ static bool is_config_schedulable(
for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
s->pmo_dcn4.sorted_group_gtl_disallow_index[i] = i;
s->pmo_dcn4.sorted_group_gtl_period_index[i] = i;
- if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) {
+ if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_pstate_meta[i])) {
/* synchronized timing group was not schedulable */
schedulable = false;
break;
}
- max_allow_delay_us += s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us;
+ max_allow_delay_us += s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us;
}
if ((schedulable && s->pmo_dcn4.num_timing_groups <= 1) || !schedulable) {
@@ -1391,8 +1414,8 @@ static bool is_config_schedulable(
bool swapped = false;
for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
- double j_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us;
- double jp1_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us;
+ double j_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us;
+ double jp1_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us;
if (j_disallow_us < jp1_disallow_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_disallow_index[j],
@@ -1410,19 +1433,19 @@ static bool is_config_schedulable(
* other display, or when >2 streams continue to halve the remaining allow time.
*/
for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
- if (s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us <= 0.0) {
+ if (s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us <= 0.0) {
/* this timing group always allows */
continue;
}
- double max_allow_time_us = s->pmo_dcn4.group_common_fams2_meta[i].allow_time_us;
+ double max_allow_time_us = s->pmo_dcn4.group_common_pstate_meta[i].allow_time_us;
for (j = 0; j < s->pmo_dcn4.num_timing_groups; j++) {
unsigned int sorted_j = s->pmo_dcn4.sorted_group_gtl_disallow_index[j];
/* stream can't overlap itself */
- if (i != sorted_j && s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us > 0.0) {
+ if (i != sorted_j && s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us > 0.0) {
max_allow_time_us = math_min2(
- s->pmo_dcn4.group_common_fams2_meta[sorted_j].allow_time_us,
- (max_allow_time_us - s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us) / 2);
+ s->pmo_dcn4.group_common_pstate_meta[sorted_j].allow_time_us,
+ (max_allow_time_us - s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us) / 2);
if (max_allow_time_us < 0.0) {
/* failed exit early */
@@ -1450,8 +1473,8 @@ static bool is_config_schedulable(
bool swapped = false;
for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
- double j_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us;
- double jp1_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us;
+ double j_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us;
+ double jp1_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us;
if (j_period_us < jp1_period_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_period_index[j],
@@ -1470,7 +1493,7 @@ static bool is_config_schedulable(
unsigned int sorted_i = s->pmo_dcn4.sorted_group_gtl_period_index[i];
unsigned int sorted_ip1 = s->pmo_dcn4.sorted_group_gtl_period_index[i + 1];
- if (s->pmo_dcn4.group_common_fams2_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_fams2_meta[sorted_ip1].period_us ||
+ if (s->pmo_dcn4.group_common_pstate_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_pstate_meta[sorted_ip1].period_us ||
(s->pmo_dcn4.group_is_drr_enabled[sorted_ip1] && s->pmo_dcn4.group_is_drr_active[sorted_ip1])) {
schedulable = false;
break;
@@ -1492,18 +1515,18 @@ static bool is_config_schedulable(
/* default period_0 > period_1 */
unsigned int lrg_idx = 0;
unsigned int sml_idx = 1;
- if (s->pmo_dcn4.group_common_fams2_meta[0].period_us < s->pmo_dcn4.group_common_fams2_meta[1].period_us) {
+ if (s->pmo_dcn4.group_common_pstate_meta[0].period_us < s->pmo_dcn4.group_common_pstate_meta[1].period_us) {
/* period_0 < period_1 */
lrg_idx = 1;
sml_idx = 0;
}
- period_ratio = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us;
- shift_per_period = s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio));
- max_shift_us = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us;
- max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us;
+ period_ratio = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us;
+ shift_per_period = s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio));
+ max_shift_us = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us;
+ max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us;
if (shift_per_period > 0.0 &&
- shift_per_period < s->pmo_dcn4.group_common_fams2_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us &&
+ shift_per_period < s->pmo_dcn4.group_common_pstate_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us &&
max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) {
schedulable = true;
}
@@ -1646,22 +1669,22 @@ static int get_vactive_pstate_margin(const struct display_configuation_with_meta
return min_vactive_margin_us;
}
-static unsigned int get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta *display_cfg, int plane_mask)
+static int get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta *display_cfg, int plane_mask)
{
unsigned char i;
- unsigned int max_vactive_fill_us = 0;
+ int max_vactive_fill_us = 0;
for (i = 0; i < DML2_MAX_PLANES; i++) {
if (is_bit_set_in_bitfield(plane_mask, i)) {
- if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us > max_vactive_fill_us)
- max_vactive_fill_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us;
+ if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].vactive_det_fill_delay_us[dml2_pstate_type_uclk] > max_vactive_fill_us)
+ max_vactive_fill_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].vactive_det_fill_delay_us[dml2_pstate_type_uclk];
}
}
return max_vactive_fill_us;
}
-static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
+static void build_pstate_meta_per_stream(struct dml2_pmo_instance *pmo,
struct display_configuation_with_meta *display_config,
int stream_index)
{
@@ -1669,7 +1692,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
const struct dml2_stream_parameters *stream_descriptor = &display_config->display_config.stream_descriptors[stream_index];
const struct core_stream_support_info *stream_info = &display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index];
const struct dml2_timing_cfg *timing = &stream_descriptor->timing;
- struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index];
+ struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index];
/* worst case all other streams require some programming at the same time, 0 if only 1 stream */
unsigned int contention_delay_us = (ip_caps->fams2.vertical_interrupt_ack_delay_us +
@@ -1677,142 +1700,142 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
(display_config->display_config.num_streams - 1);
/* common */
- stream_fams2_meta->valid = true;
- stream_fams2_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0;
- stream_fams2_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active;
- stream_fams2_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
- (stream_fams2_meta->nom_vtotal * timing->h_total);
- stream_fams2_meta->nom_frame_time_us =
- (double)stream_fams2_meta->nom_vtotal * stream_fams2_meta->otg_vline_time_us;
- stream_fams2_meta->vblank_start = timing->v_blank_end + timing->v_active;
+ stream_pstate_meta->valid = true;
+ stream_pstate_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0;
+ stream_pstate_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active;
+ stream_pstate_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
+ (stream_pstate_meta->nom_vtotal * timing->h_total);
+ stream_pstate_meta->nom_frame_time_us =
+ (double)stream_pstate_meta->nom_vtotal * stream_pstate_meta->otg_vline_time_us;
+ stream_pstate_meta->vblank_start = timing->v_blank_end + timing->v_active;
if (stream_descriptor->timing.drr_config.enabled == true) {
if (stream_descriptor->timing.drr_config.min_refresh_uhz != 0.0) {
- stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
+ stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
((double)stream_descriptor->timing.drr_config.min_refresh_uhz * stream_descriptor->timing.h_total) * 1e9);
} else {
/* assume min of 48Hz */
- stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
+ stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
(48000000.0 * stream_descriptor->timing.h_total) * 1e9);
}
} else {
- stream_fams2_meta->max_vtotal = stream_fams2_meta->nom_vtotal;
- }
- stream_fams2_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
- (stream_fams2_meta->max_vtotal * timing->h_total);
- stream_fams2_meta->max_frame_time_us =
- (double)stream_fams2_meta->max_vtotal * stream_fams2_meta->otg_vline_time_us;
-
- stream_fams2_meta->scheduling_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->contention_delay_otg_vlines =
- (unsigned int)math_ceil(contention_delay_us / stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->max_vtotal = stream_pstate_meta->nom_vtotal;
+ }
+ stream_pstate_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
+ (stream_pstate_meta->max_vtotal * timing->h_total);
+ stream_pstate_meta->max_frame_time_us =
+ (double)stream_pstate_meta->max_vtotal * stream_pstate_meta->otg_vline_time_us;
+
+ stream_pstate_meta->scheduling_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->contention_delay_otg_vlines =
+ (unsigned int)math_ceil(contention_delay_us / stream_pstate_meta->otg_vline_time_us);
/* worst case allow to target needs to account for all streams' allow events overlapping, and 1 line for error */
- stream_fams2_meta->allow_to_target_delay_otg_vlines =
- (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_fams2_meta->otg_vline_time_us)) + 1;
- stream_fams2_meta->min_allow_width_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->allow_to_target_delay_otg_vlines =
+ (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_pstate_meta->otg_vline_time_us)) + 1;
+ stream_pstate_meta->min_allow_width_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_pstate_meta->otg_vline_time_us);
/* this value should account for urgent latency */
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines =
+ stream_pstate_meta->blackout_otg_vlines =
(unsigned int)math_ceil(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us /
- stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->otg_vline_time_us);
/* scheduling params should be built based on the worst case for allow_time:disallow_time */
/* vactive */
if (display_config->display_config.num_streams == 1) {
/* for single stream, guarantee at least an instant of allow */
- stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
+ stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
math_max2(0.0,
- timing->v_active - math_max2(1.0, stream_fams2_meta->min_allow_width_otg_vlines) - stream_fams2_meta->dram_clk_change_blackout_otg_vlines));
+ timing->v_active - math_max2(1.0, stream_pstate_meta->min_allow_width_otg_vlines) - stream_pstate_meta->blackout_otg_vlines));
} else {
/* for multi stream, bound to a max fill time defined by IP caps */
- stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
- (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
+ (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_pstate_meta->otg_vline_time_us);
}
- stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us = stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_fams2_meta->otg_vline_time_us;
+ stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us = stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_pstate_meta->otg_vline_time_us;
- if (stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) {
- stream_fams2_meta->method_vactive.common.allow_start_otg_vline =
- timing->v_blank_end + stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
- stream_fams2_meta->method_vactive.common.allow_end_otg_vline =
- stream_fams2_meta->vblank_start -
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
+ if (stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) {
+ stream_pstate_meta->method_vactive.common.allow_start_otg_vline =
+ timing->v_blank_end + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
+ stream_pstate_meta->method_vactive.common.allow_end_otg_vline =
+ stream_pstate_meta->vblank_start -
+ stream_pstate_meta->blackout_otg_vlines;
} else {
- stream_fams2_meta->method_vactive.common.allow_start_otg_vline = 0;
- stream_fams2_meta->method_vactive.common.allow_end_otg_vline = 0;
+ stream_pstate_meta->method_vactive.common.allow_start_otg_vline = 0;
+ stream_pstate_meta->method_vactive.common.allow_end_otg_vline = 0;
}
- stream_fams2_meta->method_vactive.common.period_us = stream_fams2_meta->nom_frame_time_us;
- build_method_scheduling_params(&stream_fams2_meta->method_vactive.common, stream_fams2_meta);
+ stream_pstate_meta->method_vactive.common.period_us = stream_pstate_meta->nom_frame_time_us;
+ build_method_scheduling_params(&stream_pstate_meta->method_vactive.common, stream_pstate_meta);
/* vblank */
- stream_fams2_meta->method_vblank.common.allow_start_otg_vline = stream_fams2_meta->vblank_start;
- stream_fams2_meta->method_vblank.common.allow_end_otg_vline =
- stream_fams2_meta->method_vblank.common.allow_start_otg_vline + 1;
- stream_fams2_meta->method_vblank.common.period_us = stream_fams2_meta->nom_frame_time_us;
- build_method_scheduling_params(&stream_fams2_meta->method_vblank.common, stream_fams2_meta);
+ stream_pstate_meta->method_vblank.common.allow_start_otg_vline = stream_pstate_meta->vblank_start;
+ stream_pstate_meta->method_vblank.common.allow_end_otg_vline =
+ stream_pstate_meta->method_vblank.common.allow_start_otg_vline + 1;
+ stream_pstate_meta->method_vblank.common.period_us = stream_pstate_meta->nom_frame_time_us;
+ build_method_scheduling_params(&stream_pstate_meta->method_vblank.common, stream_pstate_meta);
/* subvp */
- stream_fams2_meta->method_subvp.programming_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_subvp.phantom_vactive =
- stream_fams2_meta->allow_to_target_delay_otg_vlines +
- stream_fams2_meta->min_allow_width_otg_vlines +
+ stream_pstate_meta->method_subvp.programming_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_subvp.phantom_vactive =
+ stream_pstate_meta->allow_to_target_delay_otg_vlines +
+ stream_pstate_meta->min_allow_width_otg_vlines +
stream_info->phantom_min_v_active;
- stream_fams2_meta->method_subvp.phantom_vfp =
- stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines;
+ stream_pstate_meta->method_subvp.phantom_vfp =
+ stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines;
/* phantom vtotal = v_bp(vstartup) + v_sync(1) + v_fp(throttle_delay) + v_active(allow_to_target + min_allow + min_vactive)*/
- stream_fams2_meta->method_subvp.phantom_vtotal =
+ stream_pstate_meta->method_subvp.phantom_vtotal =
stream_info->phantom_v_startup +
- stream_fams2_meta->method_subvp.phantom_vfp +
+ stream_pstate_meta->method_subvp.phantom_vfp +
1 +
- stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines +
- stream_fams2_meta->method_subvp.phantom_vactive;
- stream_fams2_meta->method_subvp.common.allow_start_otg_vline =
+ stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines +
+ stream_pstate_meta->method_subvp.phantom_vactive;
+ stream_pstate_meta->method_subvp.common.allow_start_otg_vline =
stream_descriptor->timing.v_blank_end +
- stream_fams2_meta->contention_delay_otg_vlines +
- stream_fams2_meta->method_subvp.programming_delay_otg_vlines +
- stream_fams2_meta->method_subvp.phantom_vtotal +
- stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
- stream_fams2_meta->allow_to_target_delay_otg_vlines;
- stream_fams2_meta->method_subvp.common.allow_end_otg_vline =
- stream_fams2_meta->vblank_start -
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
- stream_fams2_meta->method_subvp.common.period_us = stream_fams2_meta->nom_frame_time_us;
- build_method_scheduling_params(&stream_fams2_meta->method_subvp.common, stream_fams2_meta);
+ stream_pstate_meta->contention_delay_otg_vlines +
+ stream_pstate_meta->method_subvp.programming_delay_otg_vlines +
+ stream_pstate_meta->method_subvp.phantom_vtotal +
+ stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines;
+ stream_pstate_meta->method_subvp.common.allow_end_otg_vline =
+ stream_pstate_meta->vblank_start -
+ stream_pstate_meta->blackout_otg_vlines;
+ stream_pstate_meta->method_subvp.common.period_us = stream_pstate_meta->nom_frame_time_us;
+ build_method_scheduling_params(&stream_pstate_meta->method_subvp.common, stream_pstate_meta);
/* drr */
- stream_fams2_meta->method_drr.programming_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_drr.common.allow_start_otg_vline =
- stream_fams2_meta->vblank_start +
- stream_fams2_meta->allow_to_target_delay_otg_vlines;
- stream_fams2_meta->method_drr.common.period_us = stream_fams2_meta->nom_frame_time_us;
+ stream_pstate_meta->method_drr.programming_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_drr.common.allow_start_otg_vline =
+ stream_pstate_meta->vblank_start +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines;
+ stream_pstate_meta->method_drr.common.period_us = stream_pstate_meta->nom_frame_time_us;
if (display_config->display_config.num_streams <= 1) {
/* only need to stretch vblank for blackout time */
- stream_fams2_meta->method_drr.stretched_vtotal =
- stream_fams2_meta->nom_vtotal +
- stream_fams2_meta->allow_to_target_delay_otg_vlines +
- stream_fams2_meta->min_allow_width_otg_vlines +
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
+ stream_pstate_meta->method_drr.stretched_vtotal =
+ stream_pstate_meta->nom_vtotal +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines +
+ stream_pstate_meta->min_allow_width_otg_vlines +
+ stream_pstate_meta->blackout_otg_vlines;
} else {
/* multi display needs to always be schedulable */
- stream_fams2_meta->method_drr.stretched_vtotal =
- stream_fams2_meta->nom_vtotal * 2 +
- stream_fams2_meta->allow_to_target_delay_otg_vlines +
- stream_fams2_meta->min_allow_width_otg_vlines +
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
- }
- stream_fams2_meta->method_drr.common.allow_end_otg_vline =
- stream_fams2_meta->method_drr.stretched_vtotal -
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
- build_method_scheduling_params(&stream_fams2_meta->method_drr.common, stream_fams2_meta);
+ stream_pstate_meta->method_drr.stretched_vtotal =
+ stream_pstate_meta->nom_vtotal * 2 +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines +
+ stream_pstate_meta->min_allow_width_otg_vlines +
+ stream_pstate_meta->blackout_otg_vlines;
+ }
+ stream_pstate_meta->method_drr.common.allow_end_otg_vline =
+ stream_pstate_meta->method_drr.stretched_vtotal -
+ stream_pstate_meta->blackout_otg_vlines;
+ build_method_scheduling_params(&stream_pstate_meta->method_drr.common, stream_pstate_meta);
}
static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo,
@@ -1820,14 +1843,14 @@ static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo,
int stream_index)
{
struct dml2_implicit_svp_meta *stream_svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index];
- struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index];
+ struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index];
stream_svp_meta->valid = true;
/* PMO FAMS2 precaulcates these values */
- stream_svp_meta->v_active = stream_fams2_meta->method_subvp.phantom_vactive;
- stream_svp_meta->v_front_porch = stream_fams2_meta->method_subvp.phantom_vfp;
- stream_svp_meta->v_total = stream_fams2_meta->method_subvp.phantom_vtotal;
+ stream_svp_meta->v_active = stream_pstate_meta->method_subvp.phantom_vactive;
+ stream_svp_meta->v_front_porch = stream_pstate_meta->method_subvp.phantom_vfp;
+ stream_svp_meta->v_total = stream_pstate_meta->method_subvp.phantom_vtotal;
}
bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out)
@@ -1879,7 +1902,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index);
/* FAMS2 meta */
- build_fams2_meta_per_stream(pmo, display_config, stream_index);
+ build_pstate_meta_per_stream(pmo, display_config, stream_index);
/* SVP meta */
build_subvp_meta_per_stream(pmo, display_config, stream_index);
@@ -1939,9 +1962,6 @@ static void reset_display_configuration(struct display_configuation_with_meta *d
for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
display_config->stage3.stream_svp_meta[stream_index].valid = false;
-
- display_config->display_config.stream_descriptors[stream_index].overrides.minimize_active_latency_hiding = false;
- display_config->display_config.overrides.best_effort_min_active_latency_hiding_us = 0;
}
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -1974,7 +1994,6 @@ static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *
plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_force_drr;
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_drr;
-
}
}
}
@@ -2040,7 +2059,6 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met
plane->overrides.reserved_vblank_time_ns);
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vblank;
-
}
}
}
@@ -2055,6 +2073,7 @@ static void setup_planes_for_vblank_drr_by_mask(struct display_configuation_with
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
plane = &display_config->display_config.plane_descriptors[plane_index];
+
plane->overrides.reserved_vblank_time_ns = (long)(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000);
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vblank_drr;
@@ -2076,8 +2095,8 @@ static void setup_planes_for_vactive_by_mask(struct display_configuation_with_me
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
- display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
- (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
+ display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] =
+ (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
}
}
}
@@ -2097,8 +2116,8 @@ static void setup_planes_for_vactive_drr_by_mask(struct display_configuation_wit
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vactive_drr;
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
- display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
- (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
+ display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] =
+ (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
}
}
}
@@ -2144,9 +2163,9 @@ static bool setup_display_config(struct display_configuation_with_meta *display_
/* copy FAMS2 meta */
if (success) {
display_config->stage3.fams2_required = fams2_required;
- memcpy(&display_config->stage3.stream_fams2_meta,
- &scratch->pmo_dcn4.stream_fams2_meta,
- sizeof(struct dml2_fams2_meta) * DML2_MAX_PLANES);
+ memcpy(&display_config->stage3.stream_pstate_meta,
+ &scratch->pmo_dcn4.stream_pstate_meta,
+ sizeof(struct dml2_pstate_meta) * DML2_MAX_PLANES);
}
return success;
@@ -2188,12 +2207,12 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp
return false;
for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
- struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index];
+ struct dml2_pstate_meta *stream_pstate_meta = &s->pmo_dcn4.stream_pstate_meta[stream_index];
if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) ||
- get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) {
+ get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us) {
p_state_supported = false;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
index 6baab7ad6ecc..6baab7ad6ecc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
index 55d2464365d0..55d2464365d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
index 7218de1824cc..b90f6263cd85 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
@@ -10,4 +10,4 @@
bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *out);
-#endif
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c
index e17b5ceba447..e17b5ceba447 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h
index e13b0c5939b0..e13b0c5939b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
index 5a33e2f357f4..5a33e2f357f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
index 5e14d85821e2..5e14d85821e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h
index 14d0ae03dce6..14d0ae03dce6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c
index 4a7c4c62111e..4a7c4c62111e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h
index 53bd8602f9ef..53bd8602f9ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h
index 611c80f4f1bf..611c80f4f1bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
index d52aa82283b3..1a6c0727cd2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
@@ -152,7 +152,7 @@ struct core_plane_support_info {
int active_latency_hiding_us;
int mall_svp_size_requirement_ways;
int nominal_vblank_pstate_latency_hiding_us;
- unsigned int dram_change_vactive_det_fill_delay_us;
+ int vactive_det_fill_delay_us[dml2_pstate_type_count];
};
struct core_stream_support_info {
@@ -209,6 +209,7 @@ struct dml2_core_mode_support_result {
unsigned int uclk_pstate_supported;
unsigned int fclk_pstate_supported;
+ struct dml2_core_internal_watermarks watermarks;
} global;
struct {
@@ -255,56 +256,70 @@ struct dml2_implicit_svp_meta {
unsigned long v_front_porch;
};
-struct dml2_fams2_per_method_common_meta {
+struct dml2_pstate_per_method_common_meta {
/* generic params */
- unsigned int allow_start_otg_vline;
- unsigned int allow_end_otg_vline;
+ int allow_start_otg_vline;
+ int allow_end_otg_vline;
/* scheduling params */
double allow_time_us;
double disallow_time_us;
double period_us;
};
-struct dml2_fams2_meta {
+struct dml2_pstate_meta {
bool valid;
double otg_vline_time_us;
- unsigned int scheduling_delay_otg_vlines;
- unsigned int vertical_interrupt_ack_delay_otg_vlines;
- unsigned int allow_to_target_delay_otg_vlines;
- unsigned int contention_delay_otg_vlines;
- unsigned int min_allow_width_otg_vlines;
- unsigned int nom_vtotal;
- unsigned int vblank_start;
+ int scheduling_delay_otg_vlines;
+ int vertical_interrupt_ack_delay_otg_vlines;
+ int allow_to_target_delay_otg_vlines;
+ int contention_delay_otg_vlines;
+ int min_allow_width_otg_vlines;
+ int nom_vtotal;
+ int vblank_start;
double nom_refresh_rate_hz;
double nom_frame_time_us;
- unsigned int max_vtotal;
+ int max_vtotal;
double min_refresh_rate_hz;
double max_frame_time_us;
- unsigned int dram_clk_change_blackout_otg_vlines;
+ int blackout_otg_vlines;
+ int max_allow_delay_otg_vlines;
+ double nom_vblank_time_us;
struct {
double max_vactive_det_fill_delay_us;
- unsigned int max_vactive_det_fill_delay_otg_vlines;
- struct dml2_fams2_per_method_common_meta common;
+ double vactive_latency_hiding_us;
+ double reserved_vblank_required_us;
+ int max_vactive_det_fill_delay_otg_vlines;
+ int reserved_blank_required_vlines;
+ struct dml2_pstate_per_method_common_meta common;
} method_vactive;
struct {
- struct dml2_fams2_per_method_common_meta common;
+ struct dml2_pstate_per_method_common_meta common;
} method_vblank;
struct {
- unsigned int programming_delay_otg_vlines;
- unsigned int df_throttle_delay_otg_vlines;
- unsigned int prefetch_to_mall_delay_otg_vlines;
+ int programming_delay_otg_vlines;
+ int df_throttle_delay_otg_vlines;
+ int prefetch_to_mall_delay_otg_vlines;
unsigned long phantom_vactive;
unsigned long phantom_vfp;
unsigned long phantom_vtotal;
- struct dml2_fams2_per_method_common_meta common;
+ struct dml2_pstate_per_method_common_meta common;
} method_subvp;
struct {
- unsigned int programming_delay_otg_vlines;
- unsigned int stretched_vtotal;
- struct dml2_fams2_per_method_common_meta common;
+ int programming_delay_otg_vlines;
+ int stretched_vtotal;
+ struct dml2_pstate_per_method_common_meta common;
} method_drr;
};
+/* mask of synchronized timings by stream index */
+struct dml2_pmo_synchronized_timing_groups {
+ unsigned int num_timing_groups;
+ unsigned int synchronized_timing_group_masks[DML2_MAX_PLANES];
+ bool group_is_drr_enabled[DML2_MAX_PLANES];
+ bool group_is_drr_active[DML2_MAX_PLANES];
+ double group_line_time_us[DML2_MAX_PLANES];
+};
+
struct dml2_optimization_stage3_state {
bool performed;
bool success;
@@ -319,7 +334,7 @@ struct dml2_optimization_stage3_state {
// Meta-data for FAMS2
bool fams2_required;
- struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES];
+ struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES];
int min_clk_index_for_latency;
};
@@ -472,6 +487,7 @@ struct dml2_core_scratch {
};
struct dml2_core_instance {
+ enum dml2_project_id project_id;
struct dml2_mcg_min_clock_table *minimum_clock_table;
struct dml2_core_internal_state_inputs inputs;
struct dml2_core_internal_state_intermediates intermediates;
@@ -619,6 +635,12 @@ struct dml2_pmo_optimize_for_stutter_in_out {
#define PMO_DCN4_MAX_NUM_VARIANTS 2
#define PMO_DCN4_MAX_BASE_STRATEGIES 10
+struct dml2_scheduling_check_locals {
+ struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES];
+ unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES];
+ unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES];
+};
+
struct dml2_pmo_scratch {
union {
struct {
@@ -648,7 +670,7 @@ struct dml2_pmo_scratch {
// Stores all the implicit SVP meta information indexed by stream index of the display
// configuration under inspection, built at optimization stage init
struct dml2_implicit_svp_meta stream_svp_meta[DML2_MAX_PLANES];
- struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES];
+ struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES];
unsigned int optimal_vblank_reserved_time_for_stutter_us[DML2_PMO_STUTTER_CANDIDATE_LIST_SIZE];
unsigned int num_stutter_candidates;
@@ -663,7 +685,7 @@ struct dml2_pmo_scratch {
double group_line_time_us[DML2_MAX_PLANES];
/* scheduling check locals */
- struct dml2_fams2_per_method_common_meta group_common_fams2_meta[DML2_MAX_PLANES];
+ struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES];
unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES];
unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES];
double group_phase_offset[DML2_MAX_PLANES];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
index 4cfe64aa8492..4cfe64aa8492 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h
index 1538b708d8be..1538b708d8be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h
index 7ca7f2a743c2..7ca7f2a743c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h
index 140ec01545db..55b3e3ca54f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h
@@ -23,7 +23,7 @@
* Authors: AMD
*
*/
-
+
#ifndef __DML2_INTERNAL_TYPES_H__
#define __DML2_INTERNAL_TYPES_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
index c59f825cfae9..66040c877d68 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
@@ -24,6 +24,7 @@
*
*/
+
#include "dml2_dc_types.h"
#include "dml2_internal_types.h"
#include "dml2_utils.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h
index 9d64851f54e7..9d64851f54e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
index ef693f608d59..ef693f608d59 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h
index e83e05248592..e83e05248592 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
index 3b866e876bf4..d834cb595afa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
@@ -301,6 +301,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
out->pct_ideal_dram_bw_after_urgent_pixel_only = 65.0;
break;
+
case dml_project_dcn401:
out->pct_ideal_fabric_bw_after_urgent = 76; //67;
out->max_avg_sdp_bw_use_normal_percent = 75; //80;
@@ -424,6 +425,8 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
p->in_states->state_array[1].dcfclk_mhz = 1434.0;
p->in_states->state_array[1].dram_speed_mts = 1000 * transactions_per_mem_clock;
break;
+
+
case dml_project_dcn401:
p->in_states->num_states = 2;
transactions_per_mem_clock = 16;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h
index d764773938f4..d764773938f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c
index 9a33158b63bf..9a33158b63bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h
index 04fcfe637119..04fcfe637119 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
index 9deb03a18ccc..9deb03a18ccc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h
index c384e141cebc..c384e141cebc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h
index 17f0972b1af7..17f0972b1af7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h
index f7d30b47beff..d459f93cf40b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h
@@ -31,3 +31,4 @@
*/
#include "os_types.h"
#include "cmntypes.h"
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c
index 00d22e542469..00d22e542469 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h
index bf491cf0582d..bf491cf0582d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h
index 2a2f84e07ca8..7fadbe6d7af4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h
@@ -23,6 +23,7 @@
* Authors: AMD
*
*/
+
#ifndef __DML_LOGGING_H__
#define __DML_LOGGING_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index 01480a04f85e..ce91e5d28956 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -199,6 +199,8 @@ void dpp_reset(struct dpp *dpp_base)
memset(&dpp->scl_data, 0, sizeof(dpp->scl_data));
memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data));
+
+ dpp_base->cursor_offload = false;
}
@@ -484,10 +486,12 @@ void dpp1_set_cursor_position(
cur_en = 0; /* not visible beyond top edge*/
if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
- REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
-
- dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
}
+
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp1_cnv_set_optional_cursor_attributes(
@@ -497,8 +501,13 @@ void dpp1_cnv_set_optional_cursor_attributes(
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
if (attr) {
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
+ }
+
+ dpp_base->att.fp_scale_bias.bits.fp_bias = attr->bias;
+ dpp_base->att.fp_scale_bias.bits.fp_scale = attr->scale;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
index f466182963f7..b12f34345a58 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
@@ -1348,7 +1348,8 @@ struct dcn_dpp_mask {
uint32_t CURSOR0_COLOR1; \
uint32_t DPP_CONTROL; \
uint32_t CM_HDR_MULT_COEF; \
- uint32_t CURSOR0_FP_SCALE_BIAS;
+ uint32_t CURSOR0_FP_SCALE_BIAS; \
+ uint32_t OBUF_CONTROL;
struct dcn_dpp_registers {
DPP_COMMON_REG_VARIABLE_LIST
@@ -1450,7 +1451,6 @@ void dpp1_set_degamma(
void dpp1_set_degamma_pwl(struct dpp *dpp_base,
const struct pwl_params *params);
-
void dpp_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index 09be2a90cc79..ef4a16117181 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -84,6 +84,22 @@ void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s)
}
}
+void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ dpp_reg_state->recout_start = REG_READ(RECOUT_START);
+ dpp_reg_state->recout_size = REG_READ(RECOUT_SIZE);
+ dpp_reg_state->scl_horz_filter_scale_ratio = REG_READ(SCL_HORZ_FILTER_SCALE_RATIO);
+ dpp_reg_state->scl_vert_filter_scale_ratio = REG_READ(SCL_VERT_FILTER_SCALE_RATIO);
+ dpp_reg_state->scl_mode = REG_READ(SCL_MODE);
+ dpp_reg_state->cm_control = REG_READ(CM_CONTROL);
+ dpp_reg_state->dpp_control = REG_READ(DPP_CONTROL);
+ dpp_reg_state->dscl_control = REG_READ(DSCL_CONTROL);
+ dpp_reg_state->obuf_control = REG_READ(OBUF_CONTROL);
+ dpp_reg_state->mpc_size = REG_READ(MPC_SIZE);
+}
+
/*program post scaler scs block in dpp CM*/
void dpp3_program_post_csc(
struct dpp *dpp_base,
@@ -396,17 +412,21 @@ void dpp3_set_cursor_attributes(
}
}
- REG_UPDATE_3(CURSOR0_CONTROL,
- CUR0_MODE, color_format,
- CUR0_EXPANSION_MODE, 0,
- CUR0_ROM_EN, cur_rom_en);
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE_3(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0,
+ CUR0_ROM_EN, cur_rom_en);
if (color_format == CURSOR_MODE_MONO) {
/* todo: clarify what to program these to */
- REG_UPDATE(CURSOR0_COLOR0,
- CUR0_COLOR0, 0x00000000);
- REG_UPDATE(CURSOR0_COLOR1,
- CUR0_COLOR1, 0xFFFFFFFF);
+
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
}
dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
@@ -578,9 +598,6 @@ static void dpp3_power_on_blnd_lut(
dpp_base->ctx->dc->optimized_required = true;
dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
}
- } else {
- REG_SET(CM_MEM_PWR_CTRL, 0,
- BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
index f236824126e9..d4a70b4379ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
@@ -594,6 +594,8 @@ void dpp3_program_CM_dealpha(
void dpp30_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s);
+void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state);
+
bool dpp3_get_optimal_number_of_taps(
struct dpp *dpp,
struct scaler_data *scl_data,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
index fa67e54bf94e..8a5aa5e86850 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
@@ -134,6 +134,7 @@ static struct dpp_funcs dcn32_dpp_funcs = {
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
.dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
+ .dpp_read_reg_state = dpp30_read_reg_state,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
index f7a373a3d70a..977d83bf7741 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -95,6 +95,7 @@ void dpp35_program_bias_and_scale_fcnv(
static struct dpp_funcs dcn35_dpp_funcs = {
.dpp_program_gamcor_lut = dpp3_program_gamcor_lut,
.dpp_read_state = dpp30_read_state,
+ .dpp_read_reg_state = dpp30_read_reg_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
.dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
index 36187f890d5d..96c2c853de42 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
@@ -248,6 +248,7 @@ static struct dpp_funcs dcn401_dpp_funcs = {
.set_optional_cursor_attributes = dpp401_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+ .dpp_read_reg_state = dpp30_read_reg_state,
.set_cursor_matrix = dpp401_set_cursor_matrix,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index 7aab77b58869..62bf7cea21d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -103,17 +103,21 @@ void dpp401_set_cursor_attributes(
}
}
- REG_UPDATE_3(CURSOR0_CONTROL,
- CUR0_MODE, color_format,
- CUR0_EXPANSION_MODE, 0,
- CUR0_ROM_EN, cur_rom_en);
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE_3(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0,
+ CUR0_ROM_EN, cur_rom_en);
if (color_format == CURSOR_MODE_MONO) {
/* todo: clarify what to program these to */
- REG_UPDATE(CURSOR0_COLOR0,
- CUR0_COLOR0, 0x00000000);
- REG_UPDATE(CURSOR0_COLOR1,
- CUR0_COLOR1, 0xFFFFFFFF);
+
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
}
dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
@@ -132,10 +136,12 @@ void dpp401_set_cursor_position(
uint32_t cur_en = pos->enable ? 1 : 0;
if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
- REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
-
- dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
}
+
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp401_set_optional_cursor_attributes(
@@ -145,10 +151,17 @@ void dpp401_set_optional_cursor_attributes(
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
if (attr) {
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale);
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale);
+ }
+
+ dpp_base->att.fp_scale_bias_g_y.bits.fp_bias_g_y = attr->bias;
+ dpp_base->att.fp_scale_bias_g_y.bits.fp_scale_g_y = attr->scale;
+ dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb = attr->bias;
+ dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb = attr->scale;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index 89f0d999bf35..242f1e6f0d8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
@@ -35,6 +35,7 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const
static const struct dsc_funcs dcn20_dsc_funcs = {
.dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
+ .dsc_read_reg_state = dsc2_read_reg_state,
.dsc_validate_stream = dsc2_validate_stream,
.dsc_set_config = dsc2_set_config,
.dsc_get_packed_pps = dsc2_get_packed_pps,
@@ -155,6 +156,13 @@ void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state
DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source);
}
+void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state)
+{
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+
+ dccg_reg_state->dsc_top_control = REG_READ(DSC_TOP_CONTROL);
+ dccg_reg_state->dscc_interrupt_control_status = REG_READ(DSCC_INTERRUPT_CONTROL_STATUS);
+}
bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
{
@@ -407,7 +415,7 @@ bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values
dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
// Need to find the ceiling value for the slice width
- dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h;
+ dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dsc_padding + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h;
// TODO: in addition to validating slice height (pic height must be divisible by slice height),
// see what happens when the same condition doesn't apply for slice_width/pic_width.
dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
index a9c04fc95bd1..2337c3a97235 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
@@ -606,6 +606,7 @@ bool dsc2_get_packed_pps(struct display_stream_compressor *dsc,
uint8_t *dsc_packed_pps);
void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
+void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state);
bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
index 6f4f5a3c4861..e712985f7abd 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
@@ -28,10 +28,11 @@
#include "reg_helper.h"
static void dsc35_enable(struct display_stream_compressor *dsc, int opp_pipe);
+static void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
static const struct dsc_funcs dcn35_dsc_funcs = {
- .dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
+ .dsc_read_reg_state = dsc2_read_reg_state,
.dsc_validate_stream = dsc2_validate_stream,
.dsc_set_config = dsc2_set_config,
.dsc_get_packed_pps = dsc2_get_packed_pps,
@@ -39,6 +40,7 @@ static const struct dsc_funcs dcn35_dsc_funcs = {
.dsc_disable = dsc2_disable,
.dsc_disconnect = dsc2_disconnect,
.dsc_wait_disconnect_pending_clear = dsc2_wait_disconnect_pending_clear,
+ .dsc_get_single_enc_caps = dsc35_get_single_enc_caps,
};
/* Macro definitios for REG_SET macros*/
@@ -110,3 +112,31 @@ void dsc35_set_fgcg(struct dcn20_dsc *dsc20, bool enable)
{
REG_UPDATE(DSC_TOP_CONTROL, DSC_FGCG_REP_DIS, !enable);
}
+
+void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz)
+{
+ dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */
+
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1;
+
+ dsc_enc_caps->lb_bit_depth = 13;
+ dsc_enc_caps->is_block_pred_supported = true;
+
+ dsc_enc_caps->color_formats.bits.RGB = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
+
+ dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
+ dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1;
+ dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1;
+
+ dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000;
+
+ dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */
+ dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index 7bd92ae8b13e..c1bdbb38c690 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -26,6 +26,7 @@ static const struct dsc_funcs dcn401_dsc_funcs = {
.dsc_disconnect = dsc401_disconnect,
.dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear,
.dsc_get_single_enc_caps = dsc401_get_single_enc_caps,
+ .dsc_read_reg_state = dsc2_read_reg_state
};
/* Macro definitios for REG_SET macros*/
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
index b0bd1f9425b5..81c83d5fe042 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
@@ -41,6 +41,7 @@ struct dsc_config {
enum dc_color_depth color_depth; /* Bits per component */
bool is_odm;
struct dc_dsc_config dc_dsc_cfg;
+ uint32_t dsc_padding;
};
@@ -65,6 +66,10 @@ struct dcn_dsc_state {
uint32_t dsc_opp_source;
};
+struct dcn_dsc_reg_state {
+ uint32_t dsc_top_control;
+ uint32_t dscc_interrupt_control_status;
+};
/* DSC encoder capabilities
* They differ from the DPCD DSC caps because they are based on AMD DSC encoder caps.
@@ -99,6 +104,7 @@ struct dsc_enc_caps {
struct dsc_funcs {
void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
+ void (*dsc_read_reg_state)(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state);
bool (*dsc_validate_stream)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
void (*dsc_set_config)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
index e7e5f6d4778e..181a93dc46e6 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
@@ -440,33 +440,15 @@ void hubbub3_init_watermarks(struct hubbub *hubbub)
REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, reg);
}
-void hubbub3_get_det_sizes(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes)
+void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- REG_GET_2(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, &curr_det_sizes[0],
- DET0_SIZE, &target_det_sizes[0]);
-
- REG_GET_2(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, &curr_det_sizes[1],
- DET1_SIZE, &target_det_sizes[1]);
-
- REG_GET_2(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, &curr_det_sizes[2],
- DET2_SIZE, &target_det_sizes[2]);
-
- REG_GET_2(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, &curr_det_sizes[3],
- DET3_SIZE, &target_det_sizes[3]);
-
-}
-
-uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub)
-{
- struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- uint32_t compbuf_config_error = 0;
-
- REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR,
- &compbuf_config_error);
-
- return compbuf_config_error;
+ hubbub_reg_state->det0_ctrl = REG_READ(DCHUBBUB_DET0_CTRL);
+ hubbub_reg_state->det1_ctrl = REG_READ(DCHUBBUB_DET1_CTRL);
+ hubbub_reg_state->det2_ctrl = REG_READ(DCHUBBUB_DET2_CTRL);
+ hubbub_reg_state->det3_ctrl = REG_READ(DCHUBBUB_DET3_CTRL);
+ hubbub_reg_state->compbuf_ctrl = REG_READ(DCHUBBUB_COMPBUF_CTRL);
}
static const struct hubbub_funcs hubbub30_funcs = {
@@ -486,8 +468,7 @@ static const struct hubbub_funcs hubbub30_funcs = {
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub3_init_watermarks,
.hubbub_read_state = hubbub2_read_state,
- .get_det_sizes = hubbub3_get_det_sizes,
- .compbuf_config_error = hubbub3_compbuf_config_error,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub3_construct(struct dcn20_hubbub *hubbub3,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
index 49a469969d36..9e14de3ccaee 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
@@ -133,10 +133,6 @@ void hubbub3_force_pstate_change_control(struct hubbub *hubbub,
void hubbub3_init_watermarks(struct hubbub *hubbub);
-void hubbub3_get_det_sizes(struct hubbub *hubbub,
- uint32_t *curr_det_sizes,
- uint32_t *target_det_sizes);
-
-uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub);
+void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
index cdb20251a154..5a03758e3de6 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
@@ -933,8 +933,8 @@ int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
dcn20_vmid_setup(&hubbub2->vmid[15], &phys_config);
}
-
- dcn21_dchvm_init(hubbub);
+ if (hubbub->funcs->dchvm_init)
+ hubbub->funcs->dchvm_init(hubbub);
return NUM_VMID;
}
@@ -1071,8 +1071,8 @@ static const struct hubbub_funcs hubbub31_funcs = {
.program_compbuf_size = dcn31_program_compbuf_size,
.init_crb = dcn31_init_crb,
.hubbub_read_state = hubbub2_read_state,
- .get_det_sizes = hubbub3_get_det_sizes,
- .compbuf_config_error = hubbub3_compbuf_config_error,
+ .hubbub_read_reg_state = hubbub3_read_reg_state,
+ .dchvm_init = dcn21_dchvm_init
};
void hubbub31_construct(struct dcn20_hubbub *hubbub31,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
index 4d4ca6d77bbd..237331b35378 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
@@ -1037,8 +1037,7 @@ static const struct hubbub_funcs hubbub32_funcs = {
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.set_request_limit = hubbub32_set_request_limit,
.get_mall_en = hubbub32_get_mall_en,
- .get_det_sizes = hubbub3_get_det_sizes,
- .compbuf_config_error = hubbub3_compbuf_config_error,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub32_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index a443722a8632..43ba399f4822 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -549,6 +549,55 @@ void hubbub35_init(struct hubbub *hubbub)
memset(&hubbub2->watermarks.a.cstate_pstate, 0, sizeof(hubbub2->watermarks.a.cstate_pstate));
}
+void dcn35_dchvm_init(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t riommu_active;
+ int i;
+
+ //Init DCHVM block
+ REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
+
+ //Poll until RIOMMU_ACTIVE = 1
+ for (i = 0; i < 100; i++) {
+ REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
+
+ if (riommu_active)
+ break;
+ else
+ udelay(5);
+ }
+
+ if (riommu_active) {
+ // Disable gating and memory power requests
+ REG_UPDATE(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, 1);
+ REG_UPDATE_4(DCHVM_CLK_CTRL,
+ HVM_DISPCLK_R_GATE_DIS, 1,
+ HVM_DISPCLK_G_GATE_DIS, 1,
+ HVM_DCFCLK_R_GATE_DIS, 1,
+ HVM_DCFCLK_G_GATE_DIS, 1);
+
+ //Reflect the power status of DCHUBBUB
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
+
+ //Start rIOMMU prefetching
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
+
+ //Poll until HOSTVM_PREFETCH_DONE = 1
+ REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
+
+ //Enable memory power requests
+ REG_UPDATE(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, 0);
+ // Enable dynamic clock gating
+ REG_UPDATE_4(DCHVM_CLK_CTRL,
+ HVM_DISPCLK_R_GATE_DIS, 0,
+ HVM_DISPCLK_G_GATE_DIS, 0,
+ HVM_DCFCLK_R_GATE_DIS, 0,
+ HVM_DCFCLK_G_GATE_DIS, 0);
+ hubbub->riommu_active = true;
+ }
+}
+
/*static void hubbub35_set_request_limit(struct hubbub *hubbub,
int memory_channel_count,
int words_per_channel)
@@ -589,8 +638,8 @@ static const struct hubbub_funcs hubbub35_funcs = {
.hubbub_read_state = hubbub2_read_state,
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.dchubbub_init = hubbub35_init,
- .get_det_sizes = hubbub3_get_det_sizes,
- .compbuf_config_error = hubbub3_compbuf_config_error,
+ .hubbub_read_reg_state = hubbub3_read_reg_state,
+ .dchvm_init = dcn35_dchvm_init
};
void hubbub35_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h
index 23fecf88556c..9f65fff1bd4d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h
@@ -168,4 +168,5 @@ void dcn35_program_compbuf_size(struct hubbub *hubbub,
unsigned int compbuf_size_kb, bool safe_to_increase);
void dcn35_init_crb(struct hubbub *hubbub);
void hubbub35_init(struct hubbub *hubbub);
+void dcn35_dchvm_init(struct hubbub *hubbub);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
index a36273a52880..d11afd1ce72a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
@@ -1247,8 +1247,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = {
.program_compbuf_segments = dcn401_program_compbuf_segments,
.wait_for_det_update = dcn401_wait_for_det_update,
.program_arbiter = dcn401_program_arbiter,
- .get_det_sizes = hubbub3_get_det_sizes,
- .compbuf_config_error = hubbub3_compbuf_config_error,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub401_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
index 9b026600b90e..6378e3fd7249 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
@@ -550,6 +550,7 @@ void hubp_reset(struct hubp *hubp)
{
memset(&hubp->pos, 0, sizeof(hubp->pos));
memset(&hubp->att, 0, sizeof(hubp->att));
+ hubp->cursor_offload = false;
}
void hubp1_program_surface_config(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
index cf2eb9793008..f2571076fc50 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
@@ -105,7 +105,9 @@
SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\
SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\
SRI(HUBP_CLK_CNTL, HUBP, id),\
- SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id)
+ SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id),\
+ SRI(HUBP_MEASURE_WIN_CTRL_DCFCLK, HUBP, id),\
+ SRI(HUBP_MEASURE_WIN_CTRL_DPPCLK, HUBP, id)
/* Register address initialization macro for ASICs with VM */
#define HUBP_REG_LIST_DCN_VM(id)\
@@ -251,7 +253,19 @@
uint32_t CURSOR_HOT_SPOT; \
uint32_t CURSOR_DST_OFFSET; \
uint32_t HUBP_CLK_CNTL; \
- uint32_t HUBPRET_READ_LINE_VALUE
+ uint32_t HUBPRET_READ_LINE_VALUE; \
+ uint32_t HUBP_MEASURE_WIN_CTRL_DCFCLK; \
+ uint32_t HUBP_MEASURE_WIN_CTRL_DPPCLK; \
+ uint32_t HUBPRET_INTERRUPT; \
+ uint32_t HUBPRET_MEM_PWR_CTRL; \
+ uint32_t HUBPRET_MEM_PWR_STATUS; \
+ uint32_t HUBPRET_READ_LINE_CTRL0; \
+ uint32_t HUBPRET_READ_LINE_CTRL1; \
+ uint32_t HUBPRET_READ_LINE0; \
+ uint32_t HUBPRET_READ_LINE1; \
+ uint32_t HUBPREQ_MEM_PWR_CTRL; \
+ uint32_t HUBPREQ_MEM_PWR_STATUS
+
#define HUBP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -688,6 +702,123 @@ struct dcn_fl_regs_st {
uint32_t lut_fl_mode;
uint32_t lut_fl_format;
};
+struct dcn_hubp_reg_state {
+ uint32_t hubp_cntl;
+ uint32_t mall_config;
+ uint32_t mall_sub_vp;
+ uint32_t hubp_req_size_config;
+ uint32_t hubp_req_size_config_c;
+ uint32_t vmpg_config;
+ uint32_t addr_config;
+ uint32_t pri_viewport_dimension;
+ uint32_t pri_viewport_dimension_c;
+ uint32_t pri_viewport_start;
+ uint32_t pri_viewport_start_c;
+ uint32_t sec_viewport_dimension;
+ uint32_t sec_viewport_dimension_c;
+ uint32_t sec_viewport_start;
+ uint32_t sec_viewport_start_c;
+ uint32_t surface_config;
+ uint32_t tiling_config;
+ uint32_t clk_cntl;
+ uint32_t mall_status;
+ uint32_t measure_win_ctrl_dcfclk;
+ uint32_t measure_win_ctrl_dppclk;
+
+ uint32_t blank_offset_0;
+ uint32_t blank_offset_1;
+ uint32_t cursor_settings;
+ uint32_t dcn_cur0_ttu_cntl0;
+ uint32_t dcn_cur0_ttu_cntl1;
+ uint32_t dcn_cur1_ttu_cntl0;
+ uint32_t dcn_cur1_ttu_cntl1;
+ uint32_t dcn_dmdat_vm_cntl;
+ uint32_t dcn_expansion_mode;
+ uint32_t dcn_global_ttu_cntl;
+ uint32_t dcn_surf0_ttu_cntl0;
+ uint32_t dcn_surf0_ttu_cntl1;
+ uint32_t dcn_surf1_ttu_cntl0;
+ uint32_t dcn_surf1_ttu_cntl1;
+ uint32_t dcn_ttu_qos_wm;
+ uint32_t dcn_vm_mx_l1_tlb_cntl;
+ uint32_t dcn_vm_system_aperture_high_addr;
+ uint32_t dcn_vm_system_aperture_low_addr;
+ uint32_t dcsurf_flip_control;
+ uint32_t dcsurf_flip_control2;
+ uint32_t dcsurf_primary_meta_surface_address;
+ uint32_t dcsurf_primary_meta_surface_address_c;
+ uint32_t dcsurf_primary_meta_surface_address_high;
+ uint32_t dcsurf_primary_meta_surface_address_high_c;
+ uint32_t dcsurf_primary_surface_address;
+ uint32_t dcsurf_primary_surface_address_c;
+ uint32_t dcsurf_primary_surface_address_high;
+ uint32_t dcsurf_primary_surface_address_high_c;
+ uint32_t dcsurf_secondary_meta_surface_address;
+ uint32_t dcsurf_secondary_meta_surface_address_c;
+ uint32_t dcsurf_secondary_meta_surface_address_high;
+ uint32_t dcsurf_secondary_meta_surface_address_high_c;
+ uint32_t dcsurf_secondary_surface_address;
+ uint32_t dcsurf_secondary_surface_address_c;
+ uint32_t dcsurf_secondary_surface_address_high;
+ uint32_t dcsurf_secondary_surface_address_high_c;
+ uint32_t dcsurf_surface_control;
+ uint32_t dcsurf_surface_earliest_inuse;
+ uint32_t dcsurf_surface_earliest_inuse_c;
+ uint32_t dcsurf_surface_earliest_inuse_high;
+ uint32_t dcsurf_surface_earliest_inuse_high_c;
+ uint32_t dcsurf_surface_flip_interrupt;
+ uint32_t dcsurf_surface_inuse;
+ uint32_t dcsurf_surface_inuse_c;
+ uint32_t dcsurf_surface_inuse_high;
+ uint32_t dcsurf_surface_inuse_high_c;
+ uint32_t dcsurf_surface_pitch;
+ uint32_t dcsurf_surface_pitch_c;
+ uint32_t dst_after_scaler;
+ uint32_t dst_dimensions;
+ uint32_t dst_y_delta_drq_limit;
+ uint32_t flip_parameters_0;
+ uint32_t flip_parameters_1;
+ uint32_t flip_parameters_2;
+ uint32_t flip_parameters_3;
+ uint32_t flip_parameters_4;
+ uint32_t flip_parameters_5;
+ uint32_t flip_parameters_6;
+ uint32_t hubpreq_mem_pwr_ctrl;
+ uint32_t hubpreq_mem_pwr_status;
+ uint32_t nom_parameters_0;
+ uint32_t nom_parameters_1;
+ uint32_t nom_parameters_2;
+ uint32_t nom_parameters_3;
+ uint32_t nom_parameters_4;
+ uint32_t nom_parameters_5;
+ uint32_t nom_parameters_6;
+ uint32_t nom_parameters_7;
+ uint32_t per_line_delivery;
+ uint32_t per_line_delivery_pre;
+ uint32_t prefetch_settings;
+ uint32_t prefetch_settings_c;
+ uint32_t ref_freq_to_pix_freq;
+ uint32_t uclk_pstate_force;
+ uint32_t vblank_parameters_0;
+ uint32_t vblank_parameters_1;
+ uint32_t vblank_parameters_2;
+ uint32_t vblank_parameters_3;
+ uint32_t vblank_parameters_4;
+ uint32_t vblank_parameters_5;
+ uint32_t vblank_parameters_6;
+ uint32_t vmid_settings_0;
+
+ uint32_t hubpret_control;
+ uint32_t hubpret_interrupt;
+ uint32_t hubpret_mem_pwr_ctrl;
+ uint32_t hubpret_mem_pwr_status;
+ uint32_t hubpret_read_line_ctrl0;
+ uint32_t hubpret_read_line_ctrl1;
+ uint32_t hubpret_read_line_status;
+ uint32_t hubpret_read_line_value;
+ uint32_t hubpret_read_line0;
+ uint32_t hubpret_read_line1;
+};
struct dcn_hubp_state {
struct _vcs_dpi_display_dlg_regs_st dlg_attr;
@@ -718,7 +849,6 @@ struct dcn_hubp_state {
uint32_t hubp_cntl;
uint32_t flip_control;
};
-
struct dcn10_hubp {
struct hubp base;
struct dcn_hubp_state state;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
index 91259b896e03..92288de4cc10 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
@@ -613,26 +613,28 @@ void hubp2_cursor_set_attributes(
hubp->curs_attr = *attr;
- REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
- CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
- REG_UPDATE(CURSOR_SURFACE_ADDRESS,
- CURSOR_SURFACE_ADDRESS, attr->address.low_part);
-
- REG_UPDATE_2(CURSOR_SIZE,
- CURSOR_WIDTH, attr->width,
- CURSOR_HEIGHT, attr->height);
-
- REG_UPDATE_4(CURSOR_CONTROL,
- CURSOR_MODE, attr->color_format,
- CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
- CURSOR_PITCH, hw_pitch,
- CURSOR_LINES_PER_CHUNK, lpc);
-
- REG_SET_2(CURSOR_SETTINGS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
+ if (!hubp->cursor_offload) {
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
+ CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS,
+ CURSOR_SURFACE_ADDRESS, attr->address.low_part);
+
+ REG_UPDATE_2(CURSOR_SIZE,
+ CURSOR_WIDTH, attr->width,
+ CURSOR_HEIGHT, attr->height);
+
+ REG_UPDATE_4(CURSOR_CONTROL,
+ CURSOR_MODE, attr->color_format,
+ CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
+ CURSOR_PITCH, hw_pitch,
+ CURSOR_LINES_PER_CHUNK, lpc);
+
+ REG_SET_2(CURSOR_SETTINGS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+ }
hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
hubp->att.SURFACE_ADDR = attr->address.low_part;
@@ -1059,23 +1061,28 @@ void hubp2_cursor_set_position(
cur_en = 0; /* not visible beyond top edge*/
if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
- if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ bool cursor_not_programmed = hubp->att.SURFACE_ADDR == 0 && hubp->att.SURFACE_ADDR_HIGH == 0;
+
+ if (cur_en && cursor_not_programmed)
hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_UPDATE(CURSOR_CONTROL,
- CURSOR_ENABLE, cur_en);
+ if (!hubp->cursor_offload)
+ REG_UPDATE(CURSOR_CONTROL, CURSOR_ENABLE, cur_en);
}
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, pos->x,
- CURSOR_Y_POSITION, pos->y);
+ if (!hubp->cursor_offload) {
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, pos->x,
+ CURSOR_Y_POSITION, pos->y);
- REG_SET_2(CURSOR_HOT_SPOT, 0,
- CURSOR_HOT_SPOT_X, pos->x_hotspot,
- CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ }
- REG_SET(CURSOR_DST_OFFSET, 0,
- CURSOR_DST_X_OFFSET, dst_x_offset);
/* TODO Handle surface pixel formats other than 4:4:4 */
/* Cursor Position Register Config */
hubp->pos.cur_ctl.bits.cur_enable = cur_en;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
index f325db555102..7062e6653062 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
@@ -145,7 +145,8 @@
uint32_t FLIP_PARAMETERS_2;\
uint32_t DCN_CUR1_TTU_CNTL0;\
uint32_t DCN_CUR1_TTU_CNTL1;\
- uint32_t VMID_SETTINGS_0
+ uint32_t VMID_SETTINGS_0;\
+ uint32_t DST_Y_DELTA_DRQ_LIMIT
/*shared with dcn3.x*/
#define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
@@ -176,7 +177,10 @@
uint32_t HUBP_3DLUT_CONTROL;\
uint32_t HUBP_3DLUT_DLG_PARAM;\
uint32_t DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE;\
- uint32_t DCHUBP_MCACHEID_CONFIG
+ uint32_t DCHUBP_MCACHEID_CONFIG;\
+ uint32_t DCHUBP_MALL_SUB_VP;\
+ uint32_t DCHUBP_ADDR_CONFIG;\
+ uint32_t HUBP_MALL_STATUS
#define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
DCN_HUBP_REG_FIELD_BASE_LIST(type); \
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
index e2740482e1cf..08ea0a1b9e7f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
@@ -73,8 +73,6 @@
* On any mode switch, if the new reg values are smaller than the current values,
* then update the regs with the new values.
*
- * Link to the ticket: http://ontrack-internal.amd.com/browse/DEDCN21-142
- *
*/
void apply_DEDCN21_142_wa_for_hostvm_deadline(
struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
index 556214b2227d..0cc6f4558989 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
@@ -476,6 +476,126 @@ void hubp3_read_state(struct hubp *hubp)
}
+void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ reg_state->hubp_cntl = REG_READ(DCHUBP_CNTL);
+ reg_state->mall_config = REG_READ(DCHUBP_MALL_CONFIG);
+ reg_state->mall_sub_vp = REG_READ(DCHUBP_MALL_SUB_VP);
+ reg_state->hubp_req_size_config = REG_READ(DCHUBP_REQ_SIZE_CONFIG);
+ reg_state->hubp_req_size_config_c = REG_READ(DCHUBP_REQ_SIZE_CONFIG_C);
+ reg_state->vmpg_config = REG_READ(DCHUBP_VMPG_CONFIG);
+ reg_state->addr_config = REG_READ(DCSURF_ADDR_CONFIG);
+ reg_state->pri_viewport_dimension = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION);
+ reg_state->pri_viewport_dimension_c = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION_C);
+ reg_state->pri_viewport_start = REG_READ(DCSURF_PRI_VIEWPORT_START);
+ reg_state->pri_viewport_start_c = REG_READ(DCSURF_PRI_VIEWPORT_START_C);
+ reg_state->sec_viewport_dimension = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION);
+ reg_state->sec_viewport_dimension_c = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION_C);
+ reg_state->sec_viewport_start = REG_READ(DCSURF_SEC_VIEWPORT_START);
+ reg_state->sec_viewport_start_c = REG_READ(DCSURF_SEC_VIEWPORT_START_C);
+ reg_state->surface_config = REG_READ(DCSURF_SURFACE_CONFIG);
+ reg_state->tiling_config = REG_READ(DCSURF_TILING_CONFIG);
+ reg_state->clk_cntl = REG_READ(HUBP_CLK_CNTL);
+ reg_state->mall_status = REG_READ(HUBP_MALL_STATUS);
+ reg_state->measure_win_ctrl_dcfclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DCFCLK);
+ reg_state->measure_win_ctrl_dppclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DPPCLK);
+
+ reg_state->blank_offset_0 = REG_READ(BLANK_OFFSET_0);
+ reg_state->blank_offset_1 = REG_READ(BLANK_OFFSET_1);
+ reg_state->cursor_settings = REG_READ(CURSOR_SETTINGS);
+ reg_state->dcn_cur0_ttu_cntl0 = REG_READ(DCN_CUR0_TTU_CNTL0);
+ reg_state->dcn_cur0_ttu_cntl1 = REG_READ(DCN_CUR0_TTU_CNTL1);
+ reg_state->dcn_cur1_ttu_cntl0 = REG_READ(DCN_CUR1_TTU_CNTL0);
+ reg_state->dcn_cur1_ttu_cntl1 = REG_READ(DCN_CUR1_TTU_CNTL1);
+ reg_state->dcn_dmdat_vm_cntl = REG_READ(DCN_DMDATA_VM_CNTL);
+ reg_state->dcn_expansion_mode = REG_READ(DCN_EXPANSION_MODE);
+ reg_state->dcn_global_ttu_cntl = REG_READ(DCN_GLOBAL_TTU_CNTL);
+ reg_state->dcn_surf0_ttu_cntl0 = REG_READ(DCN_SURF0_TTU_CNTL0);
+ reg_state->dcn_surf0_ttu_cntl1 = REG_READ(DCN_SURF0_TTU_CNTL1);
+ reg_state->dcn_surf1_ttu_cntl0 = REG_READ(DCN_SURF1_TTU_CNTL0);
+ reg_state->dcn_surf1_ttu_cntl1 = REG_READ(DCN_SURF1_TTU_CNTL1);
+ reg_state->dcn_ttu_qos_wm = REG_READ(DCN_TTU_QOS_WM);
+ reg_state->dcn_vm_mx_l1_tlb_cntl = REG_READ(DCN_VM_MX_L1_TLB_CNTL);
+ reg_state->dcn_vm_system_aperture_high_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR);
+ reg_state->dcn_vm_system_aperture_low_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_LOW_ADDR);
+ reg_state->dcsurf_flip_control = REG_READ(DCSURF_FLIP_CONTROL);
+ reg_state->dcsurf_flip_control2 = REG_READ(DCSURF_FLIP_CONTROL2);
+ reg_state->dcsurf_primary_meta_surface_address = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS);
+ reg_state->dcsurf_primary_meta_surface_address_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_primary_meta_surface_address_high = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_primary_meta_surface_address_high_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_primary_surface_address = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS);
+ reg_state->dcsurf_primary_surface_address_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_primary_surface_address_high = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_primary_surface_address_high_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_secondary_meta_surface_address = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS);
+ reg_state->dcsurf_secondary_meta_surface_address_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_secondary_meta_surface_address_high = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_secondary_meta_surface_address_high_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_secondary_surface_address = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS);
+ reg_state->dcsurf_secondary_surface_address_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_secondary_surface_address_high = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_secondary_surface_address_high_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_surface_control = REG_READ(DCSURF_SURFACE_CONTROL);
+ reg_state->dcsurf_surface_earliest_inuse = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE);
+ reg_state->dcsurf_surface_earliest_inuse_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_C);
+ reg_state->dcsurf_surface_earliest_inuse_high = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH);
+ reg_state->dcsurf_surface_earliest_inuse_high_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C);
+ reg_state->dcsurf_surface_flip_interrupt = REG_READ(DCSURF_SURFACE_FLIP_INTERRUPT);
+ reg_state->dcsurf_surface_inuse = REG_READ(DCSURF_SURFACE_INUSE);
+ reg_state->dcsurf_surface_inuse_c = REG_READ(DCSURF_SURFACE_INUSE_C);
+ reg_state->dcsurf_surface_inuse_high = REG_READ(DCSURF_SURFACE_INUSE_HIGH);
+ reg_state->dcsurf_surface_inuse_high_c = REG_READ(DCSURF_SURFACE_INUSE_HIGH_C);
+ reg_state->dcsurf_surface_pitch = REG_READ(DCSURF_SURFACE_PITCH);
+ reg_state->dcsurf_surface_pitch_c = REG_READ(DCSURF_SURFACE_PITCH_C);
+ reg_state->dst_after_scaler = REG_READ(DST_AFTER_SCALER);
+ reg_state->dst_dimensions = REG_READ(DST_DIMENSIONS);
+ reg_state->dst_y_delta_drq_limit = REG_READ(DST_Y_DELTA_DRQ_LIMIT);
+ reg_state->flip_parameters_0 = REG_READ(FLIP_PARAMETERS_0);
+ reg_state->flip_parameters_1 = REG_READ(FLIP_PARAMETERS_1);
+ reg_state->flip_parameters_2 = REG_READ(FLIP_PARAMETERS_2);
+ reg_state->flip_parameters_3 = REG_READ(FLIP_PARAMETERS_3);
+ reg_state->flip_parameters_4 = REG_READ(FLIP_PARAMETERS_4);
+ reg_state->flip_parameters_5 = REG_READ(FLIP_PARAMETERS_5);
+ reg_state->flip_parameters_6 = REG_READ(FLIP_PARAMETERS_6);
+ reg_state->hubpreq_mem_pwr_ctrl = REG_READ(HUBPREQ_MEM_PWR_CTRL);
+ reg_state->hubpreq_mem_pwr_status = REG_READ(HUBPREQ_MEM_PWR_STATUS);
+ reg_state->nom_parameters_0 = REG_READ(NOM_PARAMETERS_0);
+ reg_state->nom_parameters_1 = REG_READ(NOM_PARAMETERS_1);
+ reg_state->nom_parameters_2 = REG_READ(NOM_PARAMETERS_2);
+ reg_state->nom_parameters_3 = REG_READ(NOM_PARAMETERS_3);
+ reg_state->nom_parameters_4 = REG_READ(NOM_PARAMETERS_4);
+ reg_state->nom_parameters_5 = REG_READ(NOM_PARAMETERS_5);
+ reg_state->nom_parameters_6 = REG_READ(NOM_PARAMETERS_6);
+ reg_state->nom_parameters_7 = REG_READ(NOM_PARAMETERS_7);
+ reg_state->per_line_delivery = REG_READ(PER_LINE_DELIVERY);
+ reg_state->per_line_delivery_pre = REG_READ(PER_LINE_DELIVERY_PRE);
+ reg_state->prefetch_settings = REG_READ(PREFETCH_SETTINGS);
+ reg_state->prefetch_settings_c = REG_READ(PREFETCH_SETTINGS_C);
+ reg_state->ref_freq_to_pix_freq = REG_READ(REF_FREQ_TO_PIX_FREQ);
+ reg_state->uclk_pstate_force = REG_READ(UCLK_PSTATE_FORCE);
+ reg_state->vblank_parameters_0 = REG_READ(VBLANK_PARAMETERS_0);
+ reg_state->vblank_parameters_1 = REG_READ(VBLANK_PARAMETERS_1);
+ reg_state->vblank_parameters_2 = REG_READ(VBLANK_PARAMETERS_2);
+ reg_state->vblank_parameters_3 = REG_READ(VBLANK_PARAMETERS_3);
+ reg_state->vblank_parameters_4 = REG_READ(VBLANK_PARAMETERS_4);
+ reg_state->vblank_parameters_5 = REG_READ(VBLANK_PARAMETERS_5);
+ reg_state->vblank_parameters_6 = REG_READ(VBLANK_PARAMETERS_6);
+ reg_state->vmid_settings_0 = REG_READ(VMID_SETTINGS_0);
+ reg_state->hubpret_control = REG_READ(HUBPRET_CONTROL);
+ reg_state->hubpret_interrupt = REG_READ(HUBPRET_INTERRUPT);
+ reg_state->hubpret_mem_pwr_ctrl = REG_READ(HUBPRET_MEM_PWR_CTRL);
+ reg_state->hubpret_mem_pwr_status = REG_READ(HUBPRET_MEM_PWR_STATUS);
+ reg_state->hubpret_read_line_ctrl0 = REG_READ(HUBPRET_READ_LINE_CTRL0);
+ reg_state->hubpret_read_line_ctrl1 = REG_READ(HUBPRET_READ_LINE_CTRL1);
+ reg_state->hubpret_read_line_status = REG_READ(HUBPRET_READ_LINE_STATUS);
+ reg_state->hubpret_read_line_value = REG_READ(HUBPRET_READ_LINE_VALUE);
+ reg_state->hubpret_read_line0 = REG_READ(HUBPRET_READ_LINE0);
+ reg_state->hubpret_read_line1 = REG_READ(HUBPRET_READ_LINE1);
+}
+
void hubp3_setup(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
@@ -505,30 +625,6 @@ void hubp3_init(struct hubp *hubp)
hubp_reset(hubp);
}
-uint32_t hubp3_get_current_read_line(struct hubp *hubp)
-{
- uint32_t read_line = 0;
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
-
- REG_GET(HUBPRET_READ_LINE_VALUE,
- PIPE_READ_LINE,
- &read_line);
-
- return read_line;
-}
-
-unsigned int hubp3_get_underflow_status(struct hubp *hubp)
-{
- uint32_t hubp_underflow = 0;
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
-
- REG_GET(DCHUBP_CNTL,
- HUBP_UNDERFLOW_STATUS,
- &hubp_underflow);
-
- return hubp_underflow;
-}
-
static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -558,8 +654,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_clear_tiling = hubp3_clear_tiling,
- .hubp_get_underflow_status = hubp3_get_underflow_status,
- .hubp_get_current_read_line = hubp3_get_current_read_line,
+ .hubp_read_reg_state = hubp3_read_reg_state
};
bool hubp3_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
index 842f4eb72cc8..c767e9f4f9b3 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
@@ -296,6 +296,8 @@ void hubp3_dmdata_set_attributes(
void hubp3_read_state(struct hubp *hubp);
+void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state);
+
void hubp3_init(struct hubp *hubp);
void hubp3_clear_tiling(struct hubp *hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
index 47101847c2b7..189045f85039 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
@@ -110,9 +110,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank,
.hubp_clear_tiling = hubp3_clear_tiling,
- .hubp_get_underflow_status = hubp3_get_underflow_status,
- .hubp_get_current_read_line = hubp3_get_current_read_line,
- .hubp_get_det_config_error = hubp31_get_det_config_error,
+ .hubp_read_reg_state = hubp3_read_reg_state,
};
bool hubp31_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
index a5f23bb2a76a..a781085b046b 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
@@ -118,29 +118,7 @@ void hubp32_cursor_set_attributes(
uint32_t cursor_width = ((attr->width + 63) / 64) * 64;
uint32_t cursor_height = attr->height;
uint32_t cursor_size = cursor_width * cursor_height;
-
- hubp->curs_attr = *attr;
-
- REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
- CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
- REG_UPDATE(CURSOR_SURFACE_ADDRESS,
- CURSOR_SURFACE_ADDRESS, attr->address.low_part);
-
- REG_UPDATE_2(CURSOR_SIZE,
- CURSOR_WIDTH, attr->width,
- CURSOR_HEIGHT, attr->height);
-
- REG_UPDATE_4(CURSOR_CONTROL,
- CURSOR_MODE, attr->color_format,
- CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
- CURSOR_PITCH, hw_pitch,
- CURSOR_LINES_PER_CHUNK, lpc);
-
- REG_SET_2(CURSOR_SETTINGS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
+ bool use_mall_for_cursor;
switch (attr->color_format) {
case CURSOR_MODE_MONO:
@@ -158,11 +136,49 @@ void hubp32_cursor_set_attributes(
cursor_size *= 8;
break;
}
+ use_mall_for_cursor = cursor_size > 16384 ? 1 : 0;
+
+ hubp->curs_attr = *attr;
- if (cursor_size > 16384)
- REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true);
- else
- REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);
+ if (!hubp->cursor_offload) {
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
+ CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS,
+ CURSOR_SURFACE_ADDRESS, attr->address.low_part);
+
+ REG_UPDATE_2(CURSOR_SIZE,
+ CURSOR_WIDTH, attr->width,
+ CURSOR_HEIGHT, attr->height);
+
+ REG_UPDATE_4(CURSOR_CONTROL,
+ CURSOR_MODE, attr->color_format,
+ CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
+ CURSOR_PITCH, hw_pitch,
+ CURSOR_LINES_PER_CHUNK, lpc);
+
+ REG_SET_2(CURSOR_SETTINGS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+
+ REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, use_mall_for_cursor);
+ }
+ hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
+ hubp->att.SURFACE_ADDR = attr->address.low_part;
+ hubp->att.size.bits.width = attr->width;
+ hubp->att.size.bits.height = attr->height;
+ hubp->att.cur_ctl.bits.mode = attr->color_format;
+
+ hubp->cur_rect.w = attr->width;
+ hubp->cur_rect.h = attr->height;
+
+ hubp->att.cur_ctl.bits.pitch = hw_pitch;
+ hubp->att.cur_ctl.bits.line_per_chunk = lpc;
+ hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION;
+ hubp->att.settings.bits.dst_y_offset = 0;
+ hubp->att.settings.bits.chunk_hdl_adjust = 3;
+ hubp->use_mall_for_cursor = use_mall_for_cursor;
}
void hubp32_init(struct hubp *hubp)
{
@@ -206,9 +222,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_update_mall_sel = hubp32_update_mall_sel,
.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
.hubp_clear_tiling = hubp3_clear_tiling,
- .hubp_get_underflow_status = hubp3_get_underflow_status,
- .hubp_get_current_read_line = hubp3_get_current_read_line,
- .hubp_get_det_config_error = hubp31_get_det_config_error,
+ .hubp_read_reg_state = hubp3_read_reg_state
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index b140808f21af..79c583e258c7 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -209,6 +209,7 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.dmdata_load = hubp2_dmdata_load,
.dmdata_status_done = hubp2_dmdata_status_done,
.hubp_read_state = hubp3_read_state,
+ .hubp_read_reg_state = hubp3_read_reg_state,
.hubp_clear_underflow = hubp2_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp35_init,
@@ -218,9 +219,6 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank_value,
.hubp_clear_tiling = hubp3_clear_tiling,
- .hubp_get_underflow_status = hubp3_get_underflow_status,
- .hubp_get_current_read_line = hubp3_get_current_read_line,
- .hubp_get_det_config_error = hubp31_get_det_config_error,
};
bool hubp35_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index 0fcbc6a35be6..f01eae50d02f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -783,21 +783,23 @@ void hubp401_cursor_set_position(
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_UPDATE(CURSOR_CONTROL,
- CURSOR_ENABLE, cur_en);
+ if (!hubp->cursor_offload)
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, cur_en);
}
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, x_pos,
- CURSOR_Y_POSITION, y_pos);
+ if (!hubp->cursor_offload) {
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, x_pos,
+ CURSOR_Y_POSITION, y_pos);
- REG_SET_2(CURSOR_HOT_SPOT, 0,
- CURSOR_HOT_SPOT_X, pos->x_hotspot,
- CURSOR_HOT_SPOT_Y, pos->y_hotspot);
-
- REG_SET(CURSOR_DST_OFFSET, 0,
- CURSOR_DST_X_OFFSET, dst_x_offset);
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ }
/* Cursor Position Register Config */
hubp->pos.cur_ctl.bits.cur_enable = cur_en;
hubp->pos.position.bits.x_pos = pos->x;
@@ -1071,9 +1073,7 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
.hubp_clear_tiling = hubp401_clear_tiling,
.hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config,
- .hubp_get_underflow_status = hubp3_get_underflow_status,
- .hubp_get_current_read_line = hubp3_get_current_read_line,
- .hubp_get_det_config_error = hubp31_get_det_config_error,
+ .hubp_read_reg_state = hubp3_read_reg_state
};
bool hubp401_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index fdabbeec8ffa..4570b8016de5 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -31,7 +31,7 @@
#include "dcn30/dcn30_hubp.h"
#include "dcn31/dcn31_hubp.h"
#include "dcn32/dcn32_hubp.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
#define HUBP_3DLUT_FL_REG_LIST_DCN401(inst)\
SRI_ARR_US(_3DLUT_FL_CONFIG, HUBP, inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 24184b4eb352..8fe399939220 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -659,6 +659,20 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
}
}
+static void
+dce110_dac_encoder_control(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct bp_encoder_control encoder_control = {0};
+
+ encoder_control.action = enable ? ENCODER_CONTROL_ENABLE : ENCODER_CONTROL_DISABLE;
+ encoder_control.engine_id = link->link_enc->analog_engine;
+ encoder_control.pixel_clock = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+
+ bios->funcs->encoder_control(bios, &encoder_control);
+}
+
void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
{
enum dc_lane_count lane_count =
@@ -671,7 +685,6 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
uint32_t early_control = 0;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
- link_hwss->setup_stream_attribute(pipe_ctx);
link_hwss->setup_stream_encoder(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
@@ -689,6 +702,9 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
early_control = lane_count;
tg->funcs->set_early_control(tg, early_control);
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dce110_dac_encoder_control(pipe_ctx, true);
}
static enum bp_result link_transmitter_control(
@@ -1086,6 +1102,9 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
if (!pipe_ctx->stream)
return;
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ return;
+
dc = pipe_ctx->stream->ctx->dc;
clk_mgr = dc->clk_mgr;
link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res);
@@ -1122,6 +1141,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
if (!pipe_ctx || !pipe_ctx->stream)
return;
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ return;
+
dc = pipe_ctx->stream->ctx->dc;
clk_mgr = dc->clk_mgr;
link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res);
@@ -1196,6 +1218,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dce110_dac_encoder_control(pipe_ctx, false);
}
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
@@ -1581,6 +1606,51 @@ static enum dc_status dce110_enable_stream_timing(
return DC_OK;
}
+static void
+dce110_select_crtc_source(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct bp_crtc_source_select crtc_source_select = {0};
+ enum engine_id engine_id = link->link_enc->preferred_engine;
+ uint8_t bit_depth;
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ engine_id = link->link_enc->analog_engine;
+
+ switch (pipe_ctx->stream->timing.display_color_depth) {
+ case COLOR_DEPTH_UNDEFINED:
+ bit_depth = 0;
+ break;
+ case COLOR_DEPTH_666:
+ bit_depth = 6;
+ break;
+ default:
+ case COLOR_DEPTH_888:
+ bit_depth = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bit_depth = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bit_depth = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bit_depth = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bit_depth = 16;
+ break;
+ }
+
+ crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst;
+ crtc_source_select.bit_depth = bit_depth;
+ crtc_source_select.engine_id = engine_id;
+ crtc_source_select.sink_signal = pipe_ctx->stream->signal;
+
+ bios->funcs->select_crtc_source(bios, &crtc_source_select);
+}
+
enum dc_status dce110_apply_single_controller_ctx_to_hw(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
@@ -1600,6 +1670,10 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
hws->funcs.disable_stream_gating(dc, pipe_ctx);
}
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_RGB) {
+ dce110_select_crtc_source(pipe_ctx);
+ }
+
if (pipe_ctx->stream_res.audio != NULL) {
struct audio_output audio_output = {0};
@@ -1679,7 +1753,8 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx->stream_res.tg, event_triggers, 2);
- if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal) &&
+ !dc_is_rgb_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
@@ -1913,6 +1988,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool can_apply_edp_fast_boot = false;
bool can_apply_seamless_boot = false;
bool keep_edp_vdd_on = false;
+ bool should_clean_dsc_block = true;
struct dc_bios *dcb = dc->ctx->dc_bios;
DC_LOGGER_INIT();
@@ -2005,9 +2081,15 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
power_down_all_hw_blocks(dc);
/* DSC could be enabled on eDP during VBIOS post.
- * To clean up dsc blocks if eDP is in link but not active.
+ * To clean up dsc blocks if all eDP dpms_off is true.
*/
- if (edp_link_with_sink && (edp_stream_num == 0))
+ for (i = 0; i < edp_stream_num; i++) {
+ if (!edp_streams[i]->dpms_off) {
+ should_clean_dsc_block = false;
+ }
+ }
+
+ if (should_clean_dsc_block)
clean_up_dsc_blocks(dc);
disable_vga_and_power_gate_all_controllers(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index e9fe97f0c4ea..fa62e40a9858 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -2245,7 +2245,7 @@ void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
if (lock)
delay_cursor_until_vupdate(dc, pipe);
- if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -3090,6 +3090,9 @@ static void dcn10_update_dchubp_dpp(
}
if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ if (dc->hwss.abort_cursor_offload_update)
+ dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
+
dc->hwss.set_cursor_attribute(pipe_ctx);
dc->hwss.set_cursor_position(pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 9477c9f9e196..c8ff8ae85a03 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -614,6 +614,14 @@ void dcn20_dpp_pg_control(
* DOMAIN11_PGFSM_PWR_STATUS, pwr_status,
* 1, 1000);
*/
+
+ /* Force disable cursor on plane powerdown on DPP 5 using dpp_force_disable_cursor */
+ if (!power_on) {
+ struct dpp *dpp5 = hws->ctx->dc->res_pool->dpps[dpp_inst];
+ if (dpp5 && dpp5->funcs->dpp_force_disable_cursor)
+ dpp5->funcs->dpp_force_disable_cursor(dpp5);
+ }
+
break;
default:
BREAK_TO_DEBUGGER();
@@ -1449,7 +1457,7 @@ void dcn20_pipe_control_lock(
!flip_immediate)
dcn20_setup_gsl_group_as_lock(dc, pipe, false);
- if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -1793,6 +1801,9 @@ void dcn20_update_dchubp_dpp(
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ if (dc->hwss.abort_cursor_offload_update)
+ dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
+
dc->hwss.set_cursor_attribute(pipe_ctx);
dc->hwss.set_cursor_position(pipe_ctx);
@@ -3052,8 +3063,6 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
- link_hwss->setup_stream_attribute(pipe_ctx);
-
if (dc->res_pool->dccg->funcs->set_pixel_rate_div)
dc->res_pool->dccg->funcs->set_pixel_rate_div(
dc->res_pool->dccg,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index e47ed5571dfd..81bcadf5e57e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -53,7 +53,8 @@
#include "link_service.h"
#include "dc_state_priv.h"
-
+#define TO_DCN_DCCG(dccg)\
+ container_of(dccg, struct dcn_dccg, base)
#define DC_LOGGER_INIT(logger)
@@ -1235,44 +1236,47 @@ void dcn30_get_underflow_debug_data(const struct dc *dc,
{
struct hubbub *hubbub = dc->res_pool->hubbub;
- if (tg) {
- uint32_t v_blank_start = 0, v_blank_end = 0;
-
- out_data->otg_inst = tg->inst;
-
- tg->funcs->get_scanoutpos(tg,
- &v_blank_start,
- &v_blank_end,
- &out_data->h_position,
- &out_data->v_position);
-
- out_data->otg_frame_count = tg->funcs->get_frame_count(tg);
-
- out_data->otg_underflow = tg->funcs->is_optc_underflow_occurred(tg);
+ if (hubbub) {
+ if (hubbub->funcs->hubbub_read_reg_state) {
+ hubbub->funcs->hubbub_read_reg_state(hubbub, out_data->hubbub_reg_state);
+ }
}
for (int i = 0; i < MAX_PIPES; i++) {
struct hubp *hubp = dc->res_pool->hubps[i];
-
- if (hubp) {
- if (hubp->funcs->hubp_get_underflow_status)
- out_data->hubps[i].hubp_underflow = hubp->funcs->hubp_get_underflow_status(hubp);
-
- if (hubp->funcs->hubp_in_blank)
- out_data->hubps[i].hubp_in_blank = hubp->funcs->hubp_in_blank(hubp);
-
- if (hubp->funcs->hubp_get_current_read_line)
- out_data->hubps[i].hubp_readline = hubp->funcs->hubp_get_current_read_line(hubp);
-
- if (hubp->funcs->hubp_get_det_config_error)
- out_data->hubps[i].det_config_error = hubp->funcs->hubp_get_det_config_error(hubp);
- }
+ struct dpp *dpp = dc->res_pool->dpps[i];
+ struct output_pixel_processor *opp = dc->res_pool->opps[i];
+ struct display_stream_compressor *dsc = dc->res_pool->dscs[i];
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct timing_generator *optc = dc->res_pool->timing_generators[i];
+ struct dccg *dccg = dc->res_pool->dccg;
+
+ if (hubp)
+ if (hubp->funcs->hubp_read_reg_state)
+ hubp->funcs->hubp_read_reg_state(hubp, out_data->hubp_reg_state[i]);
+
+ if (dpp)
+ if (dpp->funcs->dpp_read_reg_state)
+ dpp->funcs->dpp_read_reg_state(dpp, out_data->dpp_reg_state[i]);
+
+ if (opp)
+ if (opp->funcs->opp_read_reg_state)
+ opp->funcs->opp_read_reg_state(opp, out_data->opp_reg_state[i]);
+
+ if (dsc)
+ if (dsc->funcs->dsc_read_reg_state)
+ dsc->funcs->dsc_read_reg_state(dsc, out_data->dsc_reg_state[i]);
+
+ if (mpc)
+ if (mpc->funcs->mpc_read_reg_state)
+ mpc->funcs->mpc_read_reg_state(mpc, i, out_data->mpc_reg_state[i]);
+
+ if (optc)
+ if (optc->funcs->optc_read_reg_state)
+ optc->funcs->optc_read_reg_state(optc, out_data->optc_reg_state[i]);
+
+ if (dccg)
+ if (dccg->funcs->dccg_read_reg_state)
+ dccg->funcs->dccg_read_reg_state(dccg, out_data->dccg_reg_state[i]);
}
-
- if (hubbub->funcs->get_det_sizes)
- hubbub->funcs->get_det_sizes(hubbub, out_data->curr_det_sizes, out_data->target_det_sizes);
-
- if (hubbub->funcs->compbuf_config_error)
- out_data->compbuf_config_error = hubbub->funcs->compbuf_config_error(hubbub);
-
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index b822f2dffff0..d1ecdb92b072 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -710,7 +710,8 @@ bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx,
panel_cntl->inst,
panel_cntl->pwrseq_inst);
- dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst);
+ if (backlight_level_params->control_type != BACKLIGHT_CONTROL_AMD_AUX)
+ dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index f925f669f2a4..4ee6ed610de0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -108,6 +108,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index f39292952702..bf19ba65d09a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1061,6 +1061,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
if (should_use_dto_dscclk)
dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 05011061822c..7aa0f452e8f7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -364,6 +364,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
@@ -816,8 +817,6 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
struct dpp *dpp = pipe_ctx->plane_res.dpp;
- struct dccg *dccg = dc->res_pool->dccg;
-
/* enable DCFCLK current DCHUB */
pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
@@ -825,7 +824,6 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
/* initialize HUBP on power up */
pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
/*make sure DPPCLK is on*/
- dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, true);
dpp->funcs->dpp_dppclk_control(dpp, false, true);
/* make sure OPP_PIPE_CLOCK_EN = 1 */
pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
@@ -859,7 +857,6 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
- struct dccg *dccg = dc->res_pool->dccg;
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
@@ -878,7 +875,6 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->funcs->hubp_clk_cntl(hubp, false);
dpp->funcs->dpp_dppclk_control(dpp, false, false);
- dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, false);
hubp->power_gated = true;
@@ -1592,3 +1588,141 @@ void dcn35_hardware_release(struct dc *dc)
if (dc->hwss.hw_block_power_up)
dc->hwss.hw_block_power_up(dc, &pg_update_state);
}
+
+void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
+ return;
+
+ /*
+ * Insert a blank update to modify the write index and set pipe_mask to 0.
+ *
+ * While the DMU is interlocked with driver full pipe programming via
+ * the DMU HW lock, if the cursor update begins to execute after a full
+ * pipe programming occurs there are two possible issues:
+ *
+ * 1. Outdated cursor information is programmed, replacing the current update
+ * 2. The cursor update in firmware holds the cursor lock, preventing
+ * the current update from being latched atomically in the same frame
+ * as the rest of the update.
+ *
+ * This blank update, treated as a no-op, will allow the firmware to skip
+ * the programming.
+ */
+
+ if (dc->hwss.begin_cursor_offload_update)
+ dc->hwss.begin_cursor_offload_update(dc, pipe);
+
+ if (dc->hwss.commit_cursor_offload_update)
+ dc->hwss.commit_cursor_offload_update(dc, pipe);
+}
+
+void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (!top_pipe)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_start = write_idx;
+
+ if (pipe->plane_res.hubp)
+ pipe->plane_res.hubp->cursor_offload = true;
+
+ if (pipe->plane_res.dpp)
+ pipe->plane_res.dpp->cursor_offload = true;
+}
+
+void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ volatile struct dmub_shared_state_cursor_offload_stream_v1 *shared_stream;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (pipe->plane_res.hubp)
+ pipe->plane_res.hubp->cursor_offload = false;
+
+ if (pipe->plane_res.dpp)
+ pipe->plane_res.dpp->cursor_offload = false;
+
+ if (!top_pipe)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ shared_stream = &dc->ctx->dmub_srv->dmub->shared_state[DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1]
+ .data.cursor_offload_v1.offload_streams[stream_idx];
+
+ shared_stream->last_write_idx = write_idx;
+
+ cs->offload_streams[stream_idx].write_idx = write_idx;
+ cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_finish = write_idx;
+}
+
+void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ const struct hubp *hubp = pipe->plane_res.hubp;
+ const struct dpp *dpp = pipe->plane_res.dpp;
+ volatile struct dmub_cursor_offload_pipe_data_dcn30_v1 *p;
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (!top_pipe || !hubp || !dpp)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn30;
+
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR;
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot;
+ p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk;
+
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable;
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode;
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode;
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en;
+ p->CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000;
+ p->CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF;
+ p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS = dpp->att.fp_scale_bias.bits.fp_bias;
+ p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE = dpp->att.fp_scale_bias.bits.fp_scale;
+
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset;
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust;
+
+ cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx);
+}
+
+void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream)
+{
+ dc_dmub_srv_control_cursor_offload(dc, context, stream, true);
+}
+
+void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ dc_dmub_srv_program_cursor_now(dc, pipe);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index 0b1d6f608edd..1ff41dba556c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -101,4 +101,12 @@ bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
void dcn35_hardware_release(struct dc *dc);
+void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream);
+void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index f2f16a0bdb4f..5a66c9db2670 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -86,6 +86,12 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .abort_cursor_offload_update = dcn35_abort_cursor_offload_update,
+ .begin_cursor_offload_update = dcn35_begin_cursor_offload_update,
+ .commit_cursor_offload_update = dcn35_commit_cursor_offload_update,
+ .update_cursor_offload_pipe = dcn35_update_cursor_offload_pipe,
+ .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update,
+ .program_cursor_offload_now = dcn35_program_cursor_offload_now,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 7c276c319086..2fbc22afb89c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -26,9 +26,11 @@
#include "clk_mgr.h"
#include "dsc.h"
#include "link_service.h"
+#include "custom_float.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn10/dcn10_cm_common.h"
+#include "dcn10/dcn10_hubbub.h"
#include "dcn20/dcn20_optc.h"
#include "dcn30/dcn30_cm_common.h"
#include "dcn32/dcn32_hwseq.h"
@@ -36,6 +38,7 @@
#include "dcn401/dcn401_resource.h"
#include "dc_state_priv.h"
#include "link_enc_cfg.h"
+#include "../hw_sequencer.h"
#define DC_LOGGER_INIT(logger)
@@ -200,6 +203,9 @@ void dcn401_init_hw(struct dc *dc)
*/
struct dc_link *link = dc->links[i];
+ if (link->ep_type != DISPLAY_ENDPOINT_PHY)
+ continue;
+
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */
@@ -968,8 +974,6 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
}
}
- link_hwss->setup_stream_attribute(pipe_ctx);
-
if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
dc->res_pool->dccg->funcs->set_pixel_rate_div(
dc->res_pool->dccg,
@@ -1404,9 +1408,9 @@ void dcn401_prepare_bandwidth(struct dc *dc,
}
if (dc->debug.fams2_config.bits.enable) {
- dcn401_fams2_global_control_lock(dc, context, true);
+ dcn401_dmub_hw_control_lock(dc, context, true);
dcn401_fams2_update_config(dc, context, false);
- dcn401_fams2_global_control_lock(dc, context, false);
+ dcn401_dmub_hw_control_lock(dc, context, false);
}
if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) {
@@ -1425,9 +1429,9 @@ void dcn401_optimize_bandwidth(
/* enable fams2 if needed */
if (dc->debug.fams2_config.bits.enable) {
- dcn401_fams2_global_control_lock(dc, context, true);
+ dcn401_dmub_hw_control_lock(dc, context, true);
dcn401_fams2_update_config(dc, context, true);
- dcn401_fams2_global_control_lock(dc, context, false);
+ dcn401_dmub_hw_control_lock(dc, context, false);
}
/* program dchubbub watermarks */
@@ -1466,14 +1470,17 @@ void dcn401_optimize_bandwidth(
}
}
-void dcn401_fams2_global_control_lock(struct dc *dc,
+void dcn401_dmub_hw_control_lock(struct dc *dc,
struct dc_state *context,
bool lock)
{
/* use always for now */
union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
- if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
+ if (!dc->ctx || !dc->ctx->dmub_srv)
+ return;
+
+ if (!dc->debug.fams2_config.bits.enable && !dc_dmub_srv_is_cursor_offload_enabled(dc))
return;
hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
@@ -1483,12 +1490,12 @@ void dcn401_fams2_global_control_lock(struct dc *dc,
dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
}
-void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params)
+void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params)
{
- struct dc *dc = params->fams2_global_control_lock_fast_params.dc;
- bool lock = params->fams2_global_control_lock_fast_params.lock;
+ struct dc *dc = params->dmub_hw_control_lock_fast_params.dc;
+ bool lock = params->dmub_hw_control_lock_fast_params.lock;
- if (params->fams2_global_control_lock_fast_params.is_required) {
+ if (params->dmub_hw_control_lock_fast_params.is_required) {
union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
@@ -1595,6 +1602,143 @@ void dcn401_update_odm(struct dc *dc, struct dc_state *context,
dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true);
}
+static void dcn401_add_dsc_sequence_for_odm_change(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
+{
+ struct pipe_ctx *old_pipe;
+ struct pipe_ctx *new_pipe;
+ struct pipe_ctx *old_opp_heads[MAX_PIPES];
+ struct pipe_ctx *old_otg_master;
+ int old_opp_head_count = 0;
+ int i;
+
+ old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
+
+ if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
+ old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
+ &dc->current_state->res_ctx,
+ old_opp_heads);
+ } else {
+ old_otg_master = NULL;
+ }
+
+ /* Process new DSC configuration if DSC is enabled */
+ if (otg_master->stream_res.dsc && otg_master->stream->timing.flags.DSC) {
+ struct dc_stream_state *stream = otg_master->stream;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ int last_dsc_calc = 0;
+ bool should_use_dto_dscclk = (dc->res_pool->dccg->funcs->set_dto_dscclk != NULL) &&
+ stream->timing.pix_clk_100hz > 480000;
+
+ /* Count ODM pipes */
+ for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+
+ int num_slices_h = stream->timing.dsc_cfg.num_slices_h / opp_cnt;
+
+ /* Step 1: Set DTO DSCCLK for main DSC if needed */
+ if (should_use_dto_dscclk) {
+ hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
+ otg_master->stream_res.dsc->inst, num_slices_h);
+ }
+
+ /* Step 2: Calculate and set DSC config for main DSC */
+ last_dsc_calc = *seq_state->num_steps;
+ hwss_add_dsc_calculate_and_set_config(seq_state, otg_master, true, opp_cnt);
+
+ /* Step 3: Enable main DSC block */
+ hwss_add_dsc_enable_with_opp(seq_state, otg_master);
+
+ /* Step 4: Configure and enable ODM DSC blocks */
+ for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ if (!odm_pipe->stream_res.dsc)
+ continue;
+
+ /* Set DTO DSCCLK for ODM DSC if needed */
+ if (should_use_dto_dscclk) {
+ hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
+ odm_pipe->stream_res.dsc->inst, num_slices_h);
+ }
+
+ /* Calculate and set DSC config for ODM DSC */
+ last_dsc_calc = *seq_state->num_steps;
+ hwss_add_dsc_calculate_and_set_config(seq_state, odm_pipe, true, opp_cnt);
+
+ /* Enable ODM DSC block */
+ hwss_add_dsc_enable_with_opp(seq_state, odm_pipe);
+ }
+
+ /* Step 5: Configure DSC in timing generator */
+ hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg,
+ &seq_state->steps[last_dsc_calc].params.dsc_calculate_and_set_config_params.dsc_optc_cfg, true);
+ } else if (otg_master->stream_res.dsc && !otg_master->stream->timing.flags.DSC) {
+ /* Disable DSC in OPTC */
+ hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg, NULL, false);
+
+ hwss_add_dsc_disconnect(seq_state, otg_master->stream_res.dsc);
+ }
+
+ /* Disable DSC for old pipes that no longer need it */
+ if (old_otg_master && old_otg_master->stream_res.dsc) {
+ for (i = 0; i < old_opp_head_count; i++) {
+ old_pipe = old_opp_heads[i];
+ new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
+
+ /* If old pipe had DSC but new pipe doesn't, disable the old DSC */
+ if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) {
+ /* Then disconnect DSC block */
+ hwss_add_dsc_disconnect(seq_state, old_pipe->stream_res.dsc);
+ }
+ }
+ }
+}
+
+void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
+{
+ struct pipe_ctx *opp_heads[MAX_PIPES];
+ int opp_inst[MAX_PIPES] = {0};
+ int opp_head_count;
+ int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
+ int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
+ int i;
+
+ opp_head_count = resource_get_opp_heads_for_otg_master(
+ otg_master, &context->res_ctx, opp_heads);
+
+ for (i = 0; i < opp_head_count; i++)
+ opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
+
+ /* Add ODM combine/bypass operation to sequence */
+ if (opp_head_count > 1) {
+ hwss_add_optc_set_odm_combine(seq_state, otg_master->stream_res.tg, opp_inst,
+ opp_head_count, odm_slice_width, last_odm_slice_width);
+ } else {
+ hwss_add_optc_set_odm_bypass(seq_state, otg_master->stream_res.tg, &otg_master->stream->timing);
+ }
+
+ /* Add OPP operations to sequence */
+ for (i = 0; i < opp_head_count; i++) {
+ /* Add OPP pipe clock control operation */
+ hwss_add_opp_pipe_clock_control(seq_state, opp_heads[i]->stream_res.opp, true);
+
+ /* Add OPP program left edge extra pixel operation */
+ hwss_add_opp_program_left_edge_extra_pixel(seq_state, opp_heads[i]->stream_res.opp,
+ opp_heads[i]->stream->timing.pixel_encoding, resource_is_pipe_type(opp_heads[i], OTG_MASTER));
+ }
+
+ /* Add DSC update operations to sequence */
+ dcn401_add_dsc_sequence_for_odm_change(dc, context, otg_master, seq_state);
+
+ /* Add blank pixel data operation if needed */
+ if (!resource_is_pipe_type(otg_master, DPP_PIPE)) {
+ if (dc->hwseq->funcs.blank_pixel_data_sequence)
+ dc->hwseq->funcs.blank_pixel_data_sequence(
+ dc, otg_master, true, seq_state);
+ }
+}
+
void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
@@ -2083,6 +2227,157 @@ void dcn401_program_pipe(
}
}
+/*
+ * dcn401_program_pipe_sequence - Sequence-based version of dcn401_program_pipe
+ *
+ * This function creates a sequence-based version of the original dcn401_program_pipe
+ * function. Instead of directly calling hardware programming functions, it appends
+ * sequence steps to the provided block_sequence array that can later be executed
+ * as part of hwss_execute_sequence.
+ *
+ */
+void dcn401_program_pipe_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Only need to unblank on top pipe */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level) {
+ if (dc->hwseq->funcs.blank_pixel_data_sequence)
+ dc->hwseq->funcs.blank_pixel_data_sequence(dc, pipe_ctx,
+ !pipe_ctx->plane_state || !pipe_ctx->plane_state->visible,
+ seq_state);
+ }
+ }
+
+ /* Only update TG on top pipe */
+ if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
+ && !pipe_ctx->prev_odm_pipe) {
+
+ /* Step 1: Program global sync */
+ hwss_add_tg_program_global_sync(seq_state, pipe_ctx->stream_res.tg,
+ dcn401_calculate_vready_offset_for_group(pipe_ctx),
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
+
+ /* Step 2: Wait for VACTIVE state (if not phantom pipe) */
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ hwss_add_tg_wait_for_state(seq_state, pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ /* Step 3: Set VTG params */
+ hwss_add_tg_set_vtg_params(seq_state, pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
+
+ /* Step 4: Setup vupdate interrupt (if available) */
+ if (hws->funcs.setup_vupdate_interrupt)
+ dcn401_setup_vupdate_interrupt_sequence(dc, pipe_ctx, seq_state);
+ }
+
+ if (pipe_ctx->update_flags.bits.odm) {
+ if (hws->funcs.update_odm_sequence)
+ hws->funcs.update_odm_sequence(dc, context, pipe_ctx, seq_state);
+ }
+
+ if (pipe_ctx->update_flags.bits.enable) {
+ if (dc->hwss.enable_plane_sequence)
+ dc->hwss.enable_plane_sequence(dc, pipe_ctx, context, seq_state);
+ }
+
+ if (pipe_ctx->update_flags.bits.det_size) {
+ if (dc->res_pool->hubbub->funcs->program_det_size) {
+ hwss_add_hubp_program_det_size(seq_state, dc->res_pool->hubbub,
+ pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+ }
+
+ if (dc->res_pool->hubbub->funcs->program_det_segments) {
+ hwss_add_hubp_program_det_segments(seq_state, dc->res_pool->hubbub,
+ pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
+ }
+ }
+
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
+ pipe_ctx->plane_state->update_flags.raw ||
+ pipe_ctx->stream->update_flags.raw)) {
+
+ if (dc->hwss.update_dchubp_dpp_sequence)
+ dc->hwss.update_dchubp_dpp_sequence(dc, pipe_ctx, context, seq_state);
+ }
+
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.hdr_mult)) {
+
+ hws->funcs.set_hdr_multiplier_sequence(pipe_ctx, seq_state);
+ }
+
+ if (pipe_ctx->plane_state &&
+ (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d ||
+ pipe_ctx->update_flags.bits.enable)) {
+
+ hwss_add_dpp_set_input_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->plane_state);
+ }
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
+ */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf) {
+ hwss_add_dpp_set_output_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->stream);
+ }
+
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
+ */
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
+
+ hwss_add_opp_set_dyn_expansion(seq_state, pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth, pipe_ctx->stream->signal);
+
+ hwss_add_opp_program_fmt(seq_state, pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params, &pipe_ctx->stream->clamping);
+ }
+
+ /* Set ABM pipe after other pipe configurations done */
+ if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
+ if (pipe_ctx->stream_res.abm) {
+ hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
+
+ hwss_add_abm_set_level(seq_state, pipe_ctx->stream_res.abm, pipe_ctx->stream->abm_level);
+ }
+ }
+
+ if (pipe_ctx->update_flags.bits.test_pattern_changed) {
+ struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
+
+ hwss_add_opp_program_bit_depth_reduction(seq_state, odm_opp, true, pipe_ctx);
+
+ hwss_add_opp_set_disp_pattern_generator(seq_state,
+ odm_opp,
+ pipe_ctx->stream_res.test_pattern_params.test_pattern,
+ pipe_ctx->stream_res.test_pattern_params.color_space,
+ pipe_ctx->stream_res.test_pattern_params.color_depth,
+ (struct tg_color){0},
+ false,
+ pipe_ctx->stream_res.test_pattern_params.width,
+ pipe_ctx->stream_res.test_pattern_params.height,
+ pipe_ctx->stream_res.test_pattern_params.offset);
+ }
+
+}
+
void dcn401_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context)
@@ -2160,7 +2455,6 @@ void dcn401_program_front_end_for_ctx(
&& context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
-
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
@@ -2239,11 +2533,11 @@ void dcn401_program_front_end_for_ctx(
/* Avoid underflow by check of pipe line read when adding 2nd plane. */
if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
- !pipe->top_pipe &&
- pipe->stream &&
- pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
- dc->current_state->stream_status[0].plane_count == 1 &&
- context->stream_status[0].plane_count > 1) {
+ !pipe->top_pipe &&
+ pipe->stream &&
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
+ dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
}
}
@@ -2355,7 +2649,6 @@ void dcn401_post_unlock_program_front_end(
*/
if (hwseq->funcs.update_force_pstate)
dc->hwseq->funcs.update_force_pstate(dc, context);
-
/* Only program the MALL registers after all the main and phantom pipes
* are done programming.
*/
@@ -2669,3 +2962,1084 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
}
+
+void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ const struct hubp *hubp = pipe->plane_res.hubp;
+ const struct dpp *dpp = pipe->plane_res.dpp;
+ volatile struct dmub_cursor_offload_pipe_data_dcn401_v1 *p;
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (!top_pipe || !hubp || !dpp)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn401;
+
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR;
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot;
+ p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk;
+
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable;
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode;
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode;
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en;
+ p->CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000;
+ p->CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF;
+
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y =
+ dpp->att.fp_scale_bias_g_y.bits.fp_bias_g_y;
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y =
+ dpp->att.fp_scale_bias_g_y.bits.fp_scale_g_y;
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB =
+ dpp->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb;
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB =
+ dpp->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb;
+
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset;
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust;
+ p->HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR = hubp->use_mall_for_cursor;
+
+ cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx);
+}
+
+void dcn401_plane_atomic_power_down_sequence(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t org_ip_request_cntl = 0;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ /* Check and set DC_IP_REQUEST_CNTL if needed */
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ hwss_add_dc_ip_request_cntl(seq_state, dc, true);
+ }
+
+ /* DPP power gating control */
+ hwss_add_dpp_pg_control(seq_state, hws, dpp->inst, false);
+
+ /* HUBP power gating control */
+ hwss_add_hubp_pg_control(seq_state, hws, hubp->inst, false);
+
+ /* HUBP reset */
+ hwss_add_hubp_reset(seq_state, hubp);
+
+ /* DPP reset */
+ hwss_add_dpp_reset(seq_state, dpp);
+
+ /* Restore DC_IP_REQUEST_CNTL if it was originally 0 */
+ if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
+ hwss_add_dc_ip_request_cntl(seq_state, dc, false);
+
+ DC_LOG_DEBUG("Power gated front end %d\n", hubp->inst);
+
+ /* DPP root clock control */
+ hwss_add_dpp_root_clock_control(seq_state, hws, dpp->inst, false);
+}
+
+/* trigger HW to start disconnect plane from stream on the next vsync using block sequence */
+void dcn401_plane_atomic_disconnect_sequence(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ int dpp_id = pipe_ctx->plane_res.dpp->inst;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc *mpcc_to_remove = NULL;
+ struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
+
+ mpc_tree_params = &(opp->mpc_tree_params);
+ mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
+
+ /*Already reset*/
+ if (mpcc_to_remove == NULL)
+ return;
+
+ /* Step 1: Remove MPCC from MPC tree */
+ hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, mpcc_to_remove);
+
+ // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
+ // so don't wait for MPCC_IDLE in the programming sequence
+ if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM) {
+ /* Step 2: Set MPCC disconnect pending flag */
+ hwss_add_opp_set_mpcc_disconnect_pending(seq_state, opp, pipe_ctx->plane_res.mpcc_inst, true);
+ }
+
+ /* Step 3: Set optimized required flag */
+ hwss_add_dc_set_optimized_required(seq_state, dc, true);
+
+ /* Step 4: Disconnect HUBP if function exists */
+ if (hubp->funcs->hubp_disconnect)
+ hwss_add_hubp_disconnect(seq_state, hubp);
+
+ /* Step 5: Verify pstate change high if debug sanity checks are enabled */
+ if (dc->debug.sanity_checks)
+ dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
+}
+
+void dcn401_blank_pixel_data_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank,
+ struct block_sequence_state *seq_state)
+{
+ struct tg_color black_color = {0};
+ struct stream_resource *stream_res = &pipe_ctx->stream_res;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_color_space color_space = stream->output_color_space;
+ enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
+ enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
+ struct pipe_ctx *odm_pipe;
+ struct rect odm_slice_src;
+
+ if (stream->link->test_pattern_enabled)
+ return;
+
+ /* get opp dpg blank color */
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ if (blank) {
+ /* Set ABM immediate disable */
+ hwss_add_abm_set_immediate_disable(seq_state, dc, pipe_ctx);
+
+ if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
+ test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+ test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+ }
+ } else {
+ test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
+ }
+
+ odm_pipe = pipe_ctx;
+
+ /* Set display pattern generator for all ODM pipes */
+ while (odm_pipe->next_odm_pipe) {
+ odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
+
+ hwss_add_opp_set_disp_pattern_generator(seq_state,
+ odm_pipe->stream_res.opp,
+ test_pattern,
+ test_pattern_color_space,
+ stream->timing.display_color_depth,
+ black_color,
+ true,
+ odm_slice_src.width,
+ odm_slice_src.height,
+ odm_slice_src.x);
+
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
+
+ /* Set display pattern generator for final ODM pipe */
+ odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
+
+ hwss_add_opp_set_disp_pattern_generator(seq_state,
+ odm_pipe->stream_res.opp,
+ test_pattern,
+ test_pattern_color_space,
+ stream->timing.display_color_depth,
+ black_color,
+ true,
+ odm_slice_src.width,
+ odm_slice_src.height,
+ odm_slice_src.x);
+
+ /* Handle ABM level setting when not blanking */
+ if (!blank) {
+ if (stream_res->abm) {
+ /* Set pipe for ABM */
+ hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
+
+ /* Set ABM level */
+ hwss_add_abm_set_level(seq_state, stream_res->abm, stream->abm_level);
+ }
+ }
+}
+
+void dcn401_program_all_writeback_pipes_in_tree_sequence(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ int i_wb, i_pipe;
+
+ if (!stream || stream->num_wb_info > dc->res_pool->res_cap->num_dwb)
+ return;
+
+ /* For each writeback pipe */
+ for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) {
+ /* Get direct pointer to writeback info */
+ struct dc_writeback_info *wb_info = (struct dc_writeback_info *)&stream->writeback_info[i_wb];
+ int mpcc_inst = -1;
+
+ if (wb_info->wb_enabled) {
+ /* Get the MPCC instance for writeback_source_plane */
+ for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ if (pipe_ctx->plane_state == wb_info->writeback_source_plane) {
+ mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
+ break;
+ }
+ }
+
+ if (mpcc_inst == -1) {
+ /* Disable writeback pipe and disconnect from MPCC
+ * if source plane has been removed
+ */
+ dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
+ continue;
+ }
+
+ ASSERT(wb_info->dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+
+ if (dwb->funcs->is_enabled(dwb)) {
+ /* Writeback pipe already enabled, only need to update */
+ dcn401_update_writeback_sequence(dc, wb_info, context, seq_state);
+ } else {
+ /* Enable writeback pipe and connect to MPCC */
+ dcn401_enable_writeback_sequence(dc, wb_info, context, mpcc_inst, seq_state);
+ }
+ } else {
+ /* Disable writeback pipe and disconnect from MPCC */
+ dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
+ }
+ }
+}
+
+void dcn401_enable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ int mpcc_inst,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
+ return;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* Update DWBC with new parameters */
+ hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
+
+ /* Configure MCIF_WB buffer settings */
+ hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
+
+ /* Configure MCIF_WB arbitration */
+ hwss_add_mcif_wb_config_arb(seq_state, mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
+
+ /* Enable MCIF_WB */
+ hwss_add_mcif_wb_enable(seq_state, mcif_wb);
+
+ /* Set DWB MUX to connect writeback to MPCC */
+ hwss_add_mpc_set_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst, mpcc_inst);
+
+ /* Enable DWBC */
+ hwss_add_dwbc_enable(seq_state, dwb, &wb_info->dwb_params);
+}
+
+void dcn401_disable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ if (wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
+ return;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* Disable DWBC */
+ hwss_add_dwbc_disable(seq_state, dwb);
+
+ /* Disable DWB MUX */
+ hwss_add_mpc_disable_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst);
+
+ /* Disable MCIF_WB */
+ hwss_add_mcif_wb_disable(seq_state, mcif_wb);
+}
+
+void dcn401_update_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
+ return;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* Update writeback pipe */
+ hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
+
+ /* Update MCIF_WB buffer settings if needed */
+ hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
+}
+
+static int find_free_gsl_group(const struct dc *dc)
+{
+ if (dc->res_pool->gsl_groups.gsl_0 == 0)
+ return 1;
+ if (dc->res_pool->gsl_groups.gsl_1 == 0)
+ return 2;
+ if (dc->res_pool->gsl_groups.gsl_2 == 0)
+ return 3;
+
+ return 0;
+}
+
+void dcn401_setup_gsl_group_as_lock_sequence(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable,
+ struct block_sequence_state *seq_state)
+{
+ struct gsl_params gsl;
+ int group_idx;
+
+ memset(&gsl, 0, sizeof(struct gsl_params));
+
+ if (enable) {
+ /* return if group already assigned since GSL was set up
+ * for vsync flip, we would unassign so it can't be "left over"
+ */
+ if (pipe_ctx->stream_res.gsl_group > 0)
+ return;
+
+ group_idx = find_free_gsl_group(dc);
+ ASSERT(group_idx != 0);
+ pipe_ctx->stream_res.gsl_group = group_idx;
+
+ /* set gsl group reg field and mark resource used */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 1;
+ dc->res_pool->gsl_groups.gsl_0 = 1;
+ break;
+ case 2:
+ gsl.gsl1_en = 1;
+ dc->res_pool->gsl_groups.gsl_1 = 1;
+ break;
+ case 3:
+ gsl.gsl2_en = 1;
+ dc->res_pool->gsl_groups.gsl_2 = 1;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return; // invalid case
+ }
+ gsl.gsl_master_en = 1;
+ } else {
+ group_idx = pipe_ctx->stream_res.gsl_group;
+ if (group_idx == 0)
+ return; // if not in use, just return
+
+ pipe_ctx->stream_res.gsl_group = 0;
+
+ /* unset gsl group reg field and mark resource free */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 0;
+ dc->res_pool->gsl_groups.gsl_0 = 0;
+ break;
+ case 2:
+ gsl.gsl1_en = 0;
+ dc->res_pool->gsl_groups.gsl_1 = 0;
+ break;
+ case 3:
+ gsl.gsl2_en = 0;
+ dc->res_pool->gsl_groups.gsl_2 = 0;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ gsl.gsl_master_en = 0;
+ }
+
+ hwss_add_tg_set_gsl(seq_state, pipe_ctx->stream_res.tg, gsl);
+ hwss_add_tg_set_gsl_source_select(seq_state, pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+}
+
+void dcn401_disable_plane_sequence(
+ struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
+ struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
+
+ if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
+ return;
+
+ /* Wait for MPCC disconnect */
+ if (dc->hwss.wait_for_mpcc_disconnect_sequence)
+ dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, pipe_ctx, seq_state);
+
+ /* In flip immediate with pipe splitting case GSL is used for synchronization
+ * so we must disable it when the plane is disabled.
+ */
+ if (pipe_ctx->stream_res.gsl_group != 0)
+ dcn401_setup_gsl_group_as_lock_sequence(dc, pipe_ctx, false, seq_state);
+
+ /* Update HUBP mall sel */
+ if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs->hubp_update_mall_sel)
+ hwss_add_hubp_update_mall_sel(seq_state, pipe_ctx->plane_res.hubp, 0, false);
+
+ /* Set flip control GSL */
+ hwss_add_hubp_set_flip_control_gsl(seq_state, pipe_ctx->plane_res.hubp, false);
+
+ /* HUBP clock control */
+ hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, false);
+
+ /* DPP clock control */
+ hwss_add_dpp_dppclk_control(seq_state, pipe_ctx->plane_res.dpp, false, false);
+
+ /* Plane atomic power down */
+ if (dc->hwseq->funcs.plane_atomic_power_down_sequence)
+ dc->hwseq->funcs.plane_atomic_power_down_sequence(dc, pipe_ctx->plane_res.dpp,
+ pipe_ctx->plane_res.hubp, seq_state);
+
+ pipe_ctx->stream = NULL;
+ memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
+ memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
+ pipe_ctx->top_pipe = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ pipe_ctx->prev_odm_pipe = NULL;
+ pipe_ctx->next_odm_pipe = NULL;
+ pipe_ctx->plane_state = NULL;
+
+ /* Turn back off the phantom OTG after the phantom plane is fully disabled */
+ if (is_phantom && tg && tg->funcs->disable_phantom_crtc)
+ hwss_add_disable_phantom_crtc(seq_state, tg);
+}
+
+void dcn401_post_unlock_reset_opp_sequence(
+ struct dc *dc,
+ struct pipe_ctx *opp_head,
+ struct block_sequence_state *seq_state)
+{
+ struct display_stream_compressor *dsc = opp_head->stream_res.dsc;
+ struct dccg *dccg = dc->res_pool->dccg;
+
+ /* Wait for all DPP pipes in current mpc blending tree completes double
+ * buffered disconnection before resetting OPP
+ */
+ if (dc->hwss.wait_for_mpcc_disconnect_sequence)
+ dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, opp_head, seq_state);
+
+ if (dsc) {
+ bool *is_ungated = NULL;
+ /* Check DSC power gate status */
+ if (dc->hwseq && dc->hwseq->funcs.dsc_pg_status)
+ hwss_add_dsc_pg_status(seq_state, dc->hwseq, dsc->inst, false);
+
+ /* Seamless update specific where we will postpone non
+ * double buffered DSCCLK disable logic in post unlock
+ * sequence after DSC is disconnected from OPP but not
+ * yet power gated.
+ */
+
+ /* DSC wait disconnect pending clear */
+ hwss_add_dsc_wait_disconnect_pending_clear(seq_state, dsc, is_ungated);
+
+ /* DSC disable */
+ hwss_add_dsc_disable(seq_state, dsc, is_ungated);
+
+ /* Set reference DSCCLK */
+ if (dccg && dccg->funcs->set_ref_dscclk)
+ hwss_add_dccg_set_ref_dscclk(seq_state, dccg, dsc->inst, 0);
+ }
+}
+
+void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (REG(DC_IP_REQUEST_CNTL))
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, enable ? 1 : 0);
+}
+
+void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t org_ip_request_cntl = 0;
+
+ if (!pipe_ctx->plane_res.dpp || !pipe_ctx->plane_res.hubp || !pipe_ctx->stream_res.opp)
+ return;
+
+ if (REG(DC_IP_REQUEST_CNTL))
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+
+ /* Step 1: DPP root clock control - enable clock */
+ if (hws->funcs.dpp_root_clock_control)
+ hwss_add_dpp_root_clock_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ /* Step 2: Enable DC IP request (if needed) */
+ if (hws->funcs.dc_ip_request_cntl)
+ hwss_add_dc_ip_request_cntl(seq_state, dc, true);
+
+ /* Step 3: DPP power gating control - power on */
+ if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.dpp_pg_control)
+ hwss_add_dpp_pg_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ /* Step 4: HUBP power gating control - power on */
+ if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.hubp_pg_control)
+ hwss_add_hubp_pg_control(seq_state, hws, pipe_ctx->plane_res.hubp->inst, true);
+
+ /* Step 5: Disable DC IP request (restore state) */
+ if (org_ip_request_cntl == 0 && hws->funcs.dc_ip_request_cntl)
+ hwss_add_dc_ip_request_cntl(seq_state, dc, false);
+
+ /* Step 6: HUBP clock control - enable DCFCLK */
+ if (pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl)
+ hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, true);
+
+ /* Step 7: HUBP initialization */
+ if (pipe_ctx->plane_res.hubp->funcs->hubp_init)
+ hwss_add_hubp_init(seq_state, pipe_ctx->plane_res.hubp);
+
+ /* Step 8: OPP pipe clock control - enable */
+ if (pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control)
+ hwss_add_opp_pipe_clock_control(seq_state, pipe_ctx->stream_res.opp, true);
+
+ /* Step 9: VM system aperture settings */
+ if (dc->vm_pa_config.valid && pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings) {
+ hwss_add_hubp_set_vm_system_aperture_settings(seq_state, pipe_ctx->plane_res.hubp, 0,
+ dc->vm_pa_config.system_aperture.start_addr, dc->vm_pa_config.system_aperture.end_addr);
+ }
+
+ /* Step 10: Flip interrupt setup */
+ if (!pipe_ctx->top_pipe
+ && pipe_ctx->plane_state
+ && pipe_ctx->plane_state->flip_int_enabled
+ && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) {
+ hwss_add_hubp_set_flip_int(seq_state, pipe_ctx->plane_res.hubp);
+ }
+}
+
+void dcn401_update_dchubp_dpp_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dccg *dccg = dc->res_pool->dccg;
+ bool viewport_changed = false;
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx);
+
+ if (!hubp || !dpp || !plane_state)
+ return;
+
+ /* Step 1: DPP DPPCLK control */
+ if (pipe_ctx->update_flags.bits.dppclk)
+ hwss_add_dpp_dppclk_control(seq_state, dpp, false, true);
+
+ /* Step 2: DCCG update DPP DTO */
+ if (pipe_ctx->update_flags.bits.enable)
+ hwss_add_dccg_update_dpp_dto(seq_state, dccg, dpp->inst, pipe_ctx->plane_res.bw.dppclk_khz);
+
+ /* Step 3: HUBP VTG selection */
+ if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
+ hwss_add_hubp_vtg_sel(seq_state, hubp, pipe_ctx->stream_res.tg->inst);
+
+ /* Step 4: HUBP setup (choose setup2 or setup) */
+ if (hubp->funcs->hubp_setup2) {
+ hwss_add_hubp_setup2(seq_state, hubp, &pipe_ctx->hubp_regs,
+ &pipe_ctx->global_sync, &pipe_ctx->stream->timing);
+ } else if (hubp->funcs->hubp_setup) {
+ hwss_add_hubp_setup(seq_state, hubp, &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs, &pipe_ctx->rq_regs, &pipe_ctx->pipe_dlg_param);
+ }
+ }
+
+ /* Step 5: Set unbounded requesting */
+ if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
+ hwss_add_hubp_set_unbounded_requesting(seq_state, hubp, pipe_ctx->unbounded_req);
+
+ /* Step 6: HUBP interdependent setup */
+ if (pipe_ctx->update_flags.bits.hubp_interdependent) {
+ if (hubp->funcs->hubp_setup_interdependent2)
+ hwss_add_hubp_setup_interdependent2(seq_state, hubp, &pipe_ctx->hubp_regs);
+ else if (hubp->funcs->hubp_setup_interdependent)
+ hwss_add_hubp_setup_interdependent(seq_state, hubp, &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs);
+ }
+
+ /* Step 7: DPP setup - input CSC and format setup */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.input_csc_change ||
+ plane_state->update_flags.bits.color_space_change ||
+ plane_state->update_flags.bits.coeff_reduction_change) {
+ hwss_add_dpp_setup_dpp(seq_state, pipe_ctx);
+
+ /* Step 8: DPP cursor matrix setup */
+ if (dpp->funcs->set_cursor_matrix) {
+ hwss_add_dpp_set_cursor_matrix(seq_state, dpp, plane_state->color_space,
+ &plane_state->cursor_csc_color_matrix);
+ }
+
+ /* Step 9: DPP program bias and scale */
+ if (dpp->funcs->dpp_program_bias_and_scale)
+ hwss_add_dpp_program_bias_and_scale(seq_state, pipe_ctx);
+ }
+
+ /* Step 10: MPCC updates */
+ if (pipe_ctx->update_flags.bits.mpcc ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.global_alpha_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change) {
+
+ /* Check if update_mpcc_sequence is implemented and prefer it over single MPC_UPDATE_MPCC step */
+ if (hws->funcs.update_mpcc_sequence)
+ hws->funcs.update_mpcc_sequence(dc, pipe_ctx, seq_state);
+ }
+
+ /* Step 11: DPP scaler setup */
+ if (pipe_ctx->update_flags.bits.scaler ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
+ pipe_ctx->stream->update_flags.bits.scaling) {
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
+ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP);
+ hwss_add_dpp_set_scaler(seq_state, pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+ }
+
+ /* Step 12: HUBP viewport programming */
+ if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
+ (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
+ hwss_add_hubp_mem_program_viewport(seq_state, hubp,
+ &pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c);
+ viewport_changed = true;
+ }
+
+ /* Step 13: HUBP program mcache if available */
+ if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
+ hwss_add_hubp_program_mcache_id(seq_state, hubp, &pipe_ctx->mcache_regs);
+
+ /* Step 14: Cursor attribute setup */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
+ pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+
+ hwss_add_abort_cursor_offload_update(seq_state, dc, pipe_ctx);
+
+ hwss_add_set_cursor_attribute(seq_state, dc, pipe_ctx);
+
+ /* Step 15: Cursor position setup */
+ hwss_add_set_cursor_position(seq_state, dc, pipe_ctx);
+
+ /* Step 16: Cursor SDR white level */
+ if (dc->hwss.set_cursor_sdr_white_level)
+ hwss_add_set_cursor_sdr_white_level(seq_state, dc, pipe_ctx);
+ }
+
+ /* Step 17: Gamut remap and output CSC */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.gamut_remap ||
+ plane_state->update_flags.bits.gamut_remap_change ||
+ pipe_ctx->stream->update_flags.bits.out_csc) {
+
+ /* Gamut remap */
+ hwss_add_dpp_program_gamut_remap(seq_state, pipe_ctx);
+
+ /* Output CSC */
+ hwss_add_program_output_csc(seq_state, dc, pipe_ctx, pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix, hubp->opp_id);
+ }
+
+ /* Step 18: HUBP surface configuration */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->update_flags.bits.opp_changed ||
+ plane_state->update_flags.bits.pixel_format_change ||
+ plane_state->update_flags.bits.horizontal_mirror_change ||
+ plane_state->update_flags.bits.rotation_change ||
+ plane_state->update_flags.bits.swizzle_change ||
+ plane_state->update_flags.bits.dcc_change ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.plane_size_change) {
+ struct plane_size size = plane_state->plane_size;
+
+ size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+ hwss_add_hubp_program_surface_config(seq_state, hubp,
+ plane_state->format, &plane_state->tiling_info, size,
+ plane_state->rotation, &plane_state->dcc,
+ plane_state->horizontal_mirror, 0);
+ hubp->power_gated = false;
+ }
+
+ /* Step 19: Update plane address (with SubVP support) */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.addr_update) {
+
+ /* SubVP save surface address if needed */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_mall_type == SUBVP_MAIN) {
+ hwss_add_dmub_subvp_save_surf_addr(seq_state, dc->ctx->dmub_srv,
+ &pipe_ctx->plane_state->address, pipe_ctx->subvp_index);
+ }
+
+ /* Update plane address */
+ hwss_add_hubp_update_plane_addr(seq_state, dc, pipe_ctx);
+ }
+
+ /* Step 20: HUBP set blank - enable plane */
+ if (pipe_ctx->update_flags.bits.enable)
+ hwss_add_hubp_set_blank(seq_state, hubp, false);
+
+ /* Step 21: Phantom HUBP post enable */
+ if (pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable)
+ hwss_add_phantom_hubp_post_enable(seq_state, hubp);
+}
+
+void dcn401_update_mpcc_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct mpcc_blnd_cfg blnd_cfg = {0};
+ bool per_pixel_alpha;
+ int mpcc_id;
+ struct mpcc *new_mpcc;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
+
+ if (!hubp || !pipe_ctx->plane_state)
+ return;
+
+ per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
+
+ /* Initialize blend configuration */
+ blnd_cfg.overlap_only = false;
+ blnd_cfg.global_gain = 0xff;
+
+ if (per_pixel_alpha) {
+ blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
+ if (pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ }
+ } else {
+ blnd_cfg.pre_multiplied_alpha = false;
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
+ if (pipe_ctx->plane_state->global_alpha)
+ blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
+ else
+ blnd_cfg.global_alpha = 0xff;
+
+ blnd_cfg.background_color_bpc = 4;
+ blnd_cfg.bottom_gain_mode = 0;
+ blnd_cfg.top_gain = 0x1f000;
+ blnd_cfg.bottom_inside_gain = 0x1f000;
+ blnd_cfg.bottom_outside_gain = 0x1f000;
+
+ if (pipe_ctx->plane_state->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
+ blnd_cfg.pre_multiplied_alpha = false;
+
+ /* MPCC instance is equal to HUBP instance */
+ mpcc_id = hubp->inst;
+
+ /* Step 1: Update blending if no full update needed */
+ if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
+ !pipe_ctx->update_flags.bits.mpcc) {
+
+ /* Update blending configuration */
+ hwss_add_mpc_update_blending(seq_state, mpc, blnd_cfg, mpcc_id);
+
+ /* Update visual confirm color */
+ hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
+ return;
+ }
+
+ /* Step 2: Get existing MPCC for DPP */
+ new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
+
+ /* Step 3: Remove MPCC if being used */
+ if (new_mpcc != NULL) {
+ hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, new_mpcc);
+ } else {
+ /* Step 4: Assert MPCC idle (debug only) */
+ if (dc->debug.sanity_checks)
+ hwss_add_mpc_assert_idle_mpcc(seq_state, mpc, mpcc_id);
+ }
+
+ /* Step 5: Insert new plane into MPC tree */
+ hwss_add_mpc_insert_plane(seq_state, mpc, mpc_tree_params, blnd_cfg, NULL, NULL, hubp->inst, mpcc_id);
+
+ /* Step 6: Update visual confirm color */
+ hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
+
+ /* Step 7: Set HUBP OPP and MPCC IDs */
+ hubp->opp_id = pipe_ctx->stream_res.opp->inst;
+ hubp->mpcc_id = mpcc_id;
+}
+
+static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
+{
+ int i;
+
+ for (i = 0; i < res_pool->pipe_count; i++) {
+ if (res_pool->hubps[i]->inst == mpcc_inst)
+ return res_pool->hubps[i];
+ }
+ ASSERT(false);
+ return NULL;
+}
+
+void dcn401_wait_for_mpcc_disconnect_sequence(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ int mpcc_inst;
+
+ if (dc->debug.sanity_checks)
+ dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
+
+ if (!pipe_ctx->stream_res.opp)
+ return;
+
+ for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
+ if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+ struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+
+ if (pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) {
+ hwss_add_mpc_assert_idle_mpcc(seq_state, res_pool->mpc, mpcc_inst);
+ }
+ pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ if (hubp)
+ hwss_add_hubp_set_blank(seq_state, hubp, true);
+ }
+ }
+
+ if (dc->debug.sanity_checks)
+ dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
+}
+
+void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
+
+ if (start_line < 0)
+ start_line = 0;
+
+ if (tg->funcs->setup_vertical_interrupt2)
+ hwss_add_tg_setup_vertical_interrupt2(seq_state, tg, start_line);
+}
+
+void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
+ uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
+ struct custom_float_format fmt;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
+ convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
+
+ hwss_add_dpp_set_hdr_multiplier(seq_state, pipe_ctx->plane_res.dpp, hw_mult);
+}
+
+void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ int i;
+ unsigned int num_ways = dcn401_calculate_cab_allocation(dc, context);
+ bool cache_cursor = false;
+
+ // Don't force p-state disallow -- can't block dummy p-state
+
+ // Update MALL_SEL register for each pipe (break down update_mall_sel call)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = pipe->plane_res.hubp;
+
+ if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
+ int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
+
+ switch (hubp->curs_attr.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ default:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (cursor_size > 16384)
+ cache_cursor = true;
+
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ hwss_add_hubp_update_mall_sel(seq_state, hubp, 1, false);
+ } else {
+ // MALL not supported with Stereo3D
+ uint32_t mall_sel = (num_ways <= dc->caps.cache_num_ways &&
+ pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
+ pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO &&
+ !pipe->plane_state->address.tmz_surface) ? 2 : 0;
+ hwss_add_hubp_update_mall_sel(seq_state, hubp, mall_sel, cache_cursor);
+ }
+ }
+ }
+
+ // Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = pipe->plane_res.hubp;
+
+ if (pipe->stream && hubp && hubp->funcs->hubp_prepare_subvp_buffering) {
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
+ hwss_add_hubp_prepare_subvp_buffering(seq_state, hubp, true);
+ }
+ }
+}
+
+void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ if (!hubbub->funcs->verify_allow_pstate_change_high)
+ return;
+
+ if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
+ /* Attempt hardware workaround force recovery */
+ dcn401_hw_wa_force_recovery_sequence(dc, seq_state);
+ }
+}
+
+bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state)
+{
+ struct hubp *hubp;
+ unsigned int i;
+
+ if (!dc->debug.recovery_enabled)
+ return false;
+
+ /* Step 1: Set HUBP_BLANK_EN=1 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
+ hwss_add_hubp_set_blank_en(seq_state, hubp, true);
+ }
+ }
+
+ /* Step 2: DCHUBBUB_GLOBAL_SOFT_RESET=1 */
+ hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, true);
+
+ /* Step 3: Set HUBP_DISABLE=1 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->hubp_disable_control)
+ hwss_add_hubp_disable_control(seq_state, hubp, true);
+ }
+ }
+
+ /* Step 4: Set HUBP_DISABLE=0 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->hubp_disable_control)
+ hwss_add_hubp_disable_control(seq_state, hubp, false);
+ }
+ }
+
+ /* Step 5: DCHUBBUB_GLOBAL_SOFT_RESET=0 */
+ hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, false);
+
+ /* Step 6: Set HUBP_BLANK_EN=0 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
+ hwss_add_hubp_set_blank_en(seq_state, hubp, false);
+ }
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 2621b7725267..f78162ab859b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -9,6 +9,7 @@
#include "dc.h"
#include "dc_stream.h"
#include "hw_sequencer_private.h"
+#include "hwss/hw_sequencer.h"
#include "dcn401/dcn401_dccg.h"
struct dc;
@@ -73,15 +74,17 @@ void dcn401_optimize_bandwidth(
struct dc *dc,
struct dc_state *context);
-void dcn401_fams2_global_control_lock(struct dc *dc,
+void dcn401_dmub_hw_control_lock(struct dc *dc,
struct dc_state *context,
bool lock);
void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable);
-void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params);
+void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params);
void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings);
void dcn401_hardware_release(struct dc *dc);
void dcn401_update_odm(struct dc *dc, struct dc_state *context,
struct pipe_ctx *otg_master);
+void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *otg_master, struct block_sequence_state *seq_state);
void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy);
void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master);
void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock);
@@ -97,6 +100,11 @@ void dcn401_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+void dcn401_program_pipe_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx);
void dcn401_program_front_end_for_ctx(struct dc *dc, struct dc_state *context);
void dcn401_post_unlock_program_front_end(struct dc *dc, struct dc_state *context);
@@ -109,5 +117,97 @@ void dcn401_detect_pipe_changes(
void dcn401_plane_atomic_power_down(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
+void dcn401_plane_atomic_power_down_sequence(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp,
+ struct block_sequence_state *seq_state);
+void dcn401_plane_atomic_disconnect_sequence(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+void dcn401_blank_pixel_data_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank,
+ struct block_sequence_state *seq_state);
void dcn401_initialize_min_clocks(struct dc *dc);
+void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe);
+
+void dcn401_program_all_writeback_pipes_in_tree_sequence(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_enable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ int mpcc_inst,
+ struct block_sequence_state *seq_state);
+
+void dcn401_disable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct block_sequence_state *seq_state);
+
+void dcn401_update_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_setup_gsl_group_as_lock_sequence(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable,
+ struct block_sequence_state *seq_state);
+
+void dcn401_disable_plane_sequence(
+ struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_post_unlock_reset_opp_sequence(
+ struct dc *dc,
+ struct pipe_ctx *opp_head,
+ struct block_sequence_state *seq_state);
+
+void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable);
+
+void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_update_dchubp_dpp_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_update_mpcc_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_wait_for_mpcc_disconnect_sequence(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state);
+
+bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state);
+
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index d6e11b7e4fce..162096ce0bdf 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -9,6 +9,7 @@
#include "dcn30/dcn30_hwseq.h"
#include "dcn31/dcn31_hwseq.h"
#include "dcn32/dcn32_hwseq.h"
+#include "dcn35/dcn35_hwseq.h"
#include "dcn401/dcn401_hwseq.h"
#include "dcn401_init.h"
@@ -38,6 +39,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
+ .disable_plane_sequence = dcn401_disable_plane_sequence,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn401_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock,
@@ -53,6 +55,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .wait_for_mpcc_disconnect_sequence = dcn401_wait_for_mpcc_disconnect_sequence,
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
@@ -60,6 +63,12 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.set_cursor_position = dcn401_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .abort_cursor_offload_update = dcn35_abort_cursor_offload_update,
+ .begin_cursor_offload_update = dcn35_begin_cursor_offload_update,
+ .commit_cursor_offload_update = dcn35_commit_cursor_offload_update,
+ .update_cursor_offload_pipe = dcn401_update_cursor_offload_pipe,
+ .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update,
+ .program_cursor_offload_now = dcn35_program_cursor_offload_now,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
@@ -95,55 +104,70 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
.wait_for_dcc_meta_propagation = dcn401_wait_for_dcc_meta_propagation,
.is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
- .fams2_global_control_lock = dcn401_fams2_global_control_lock,
+ .dmub_hw_control_lock = dcn401_dmub_hw_control_lock,
.fams2_update_config = dcn401_fams2_update_config,
- .fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast,
+ .dmub_hw_control_lock_fast = dcn401_dmub_hw_control_lock_fast,
.program_outstanding_updates = dcn401_program_outstanding_updates,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
.detect_pipe_changes = dcn401_detect_pipe_changes,
.enable_plane = dcn20_enable_plane,
+ .enable_plane_sequence = dcn401_enable_plane_sequence,
.update_dchubp_dpp = dcn20_update_dchubp_dpp,
+ .update_dchubp_dpp_sequence = dcn401_update_dchubp_dpp_sequence,
.post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
+ .post_unlock_reset_opp_sequence = dcn401_post_unlock_reset_opp_sequence,
.get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
.init_pipes = dcn10_init_pipes,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .plane_atomic_disconnect_sequence = dcn401_plane_atomic_disconnect_sequence,
.update_mpcc = dcn20_update_mpcc,
+ .update_mpcc_sequence = dcn401_update_mpcc_sequence,
.set_input_transfer_func = dcn32_set_input_transfer_func,
.set_output_transfer_func = dcn401_set_output_transfer_func,
.power_down = dce110_power_down,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.blank_pixel_data = dcn20_blank_pixel_data,
+ .blank_pixel_data_sequence = dcn401_blank_pixel_data_sequence,
.reset_hw_ctx_wrap = dcn401_reset_hw_ctx_wrap,
.enable_stream_timing = dcn401_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .setup_vupdate_interrupt_sequence = dcn401_setup_vupdate_interrupt_sequence,
.did_underflow_occur = dcn10_did_underflow_occur,
.init_blank = dcn32_init_blank,
.disable_vga = dcn20_disable_vga,
.bios_golden_init = dcn10_bios_golden_init,
.plane_atomic_disable = dcn20_plane_atomic_disable,
.plane_atomic_power_down = dcn401_plane_atomic_power_down,
+ .plane_atomic_power_down_sequence = dcn401_plane_atomic_power_down_sequence,
.enable_power_gating_plane = dcn32_enable_power_gating_plane,
.hubp_pg_control = dcn32_hubp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ .program_all_writeback_pipes_in_tree_sequence = dcn401_program_all_writeback_pipes_in_tree_sequence,
.update_odm = dcn401_update_odm,
+ .update_odm_sequence = dcn401_update_odm_sequence,
.dsc_pg_control = dcn32_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .set_hdr_multiplier_sequence = dcn401_set_hdr_multiplier_sequence,
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .verify_allow_pstate_change_high_sequence = dcn401_verify_allow_pstate_change_high_sequence,
.wait_for_blank_complete = dcn20_wait_for_blank_complete,
.dccg_init = dcn20_dccg_init,
.set_mcm_luts = dcn401_set_mcm_luts,
.program_mall_pipe_config = dcn32_program_mall_pipe_config,
+ .program_mall_pipe_config_sequence = dcn401_program_mall_pipe_config_sequence,
.update_mall_sel = dcn32_update_mall_sel,
.calculate_dccg_k1_k2_values = NULL,
.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
.reset_back_end_for_pipe = dcn401_reset_back_end_for_pipe,
.populate_mcm_luts = NULL,
.perform_3dlut_wa_unlock = dcn401_perform_3dlut_wa_unlock,
+ .program_pipe_sequence = dcn401_program_pipe_sequence,
+ .dc_ip_request_cntl = dcn401_dc_ip_request_cntl,
};
void dcn401_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 1723bbcf2c46..8ed9eea40c56 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -31,6 +31,8 @@
#include "inc/hw/opp.h"
#include "inc/hw/link_encoder.h"
#include "inc/core_status.h"
+#include "inc/hw/hw_shared.h"
+#include "dsc/dsc.h"
struct pipe_ctx;
struct dc_state;
@@ -48,6 +50,8 @@ struct dc_dmub_cmd;
struct pg_block_update;
struct drr_params;
struct dc_underflow_debug_data;
+struct dsc_optc_config;
+struct vm_system_aperture_param;
struct subvp_pipe_control_lock_fast_params {
struct dc *dc;
@@ -62,7 +66,7 @@ struct pipe_control_lock_params {
};
struct set_flip_control_gsl_params {
- struct pipe_ctx *pipe_ctx;
+ struct hubp *hubp;
bool flip_immediate;
};
@@ -148,12 +152,587 @@ struct wait_for_dcc_meta_propagation_params {
const struct pipe_ctx *top_pipe_to_program;
};
-struct fams2_global_control_lock_fast_params {
+struct dmub_hw_control_lock_fast_params {
struct dc *dc;
bool is_required;
bool lock;
};
+struct program_surface_config_params {
+ struct hubp *hubp;
+ enum surface_pixel_format format;
+ struct dc_tiling_info *tiling_info;
+ struct plane_size plane_size;
+ enum dc_rotation_angle rotation;
+ struct dc_plane_dcc_param *dcc;
+ bool horizontal_mirror;
+ int compat_level;
+};
+
+struct program_mcache_id_and_split_coordinate {
+ struct hubp *hubp;
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs;
+};
+
+struct program_cursor_update_now_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct hubp_wait_pipe_read_start_params {
+ struct hubp *hubp;
+};
+
+struct apply_update_flags_for_phantom_params {
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct update_phantom_vp_position_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_state *context;
+};
+
+struct set_odm_combine_params {
+ struct timing_generator *tg;
+ int opp_inst[MAX_PIPES];
+ int opp_head_count;
+ int odm_slice_width;
+ int last_odm_slice_width;
+};
+
+struct set_odm_bypass_params {
+ struct timing_generator *tg;
+ const struct dc_crtc_timing *timing;
+};
+
+struct opp_pipe_clock_control_params {
+ struct output_pixel_processor *opp;
+ bool enable;
+};
+
+struct opp_program_left_edge_extra_pixel_params {
+ struct output_pixel_processor *opp;
+ enum dc_pixel_encoding pixel_encoding;
+ bool is_otg_master;
+};
+
+struct dccg_set_dto_dscclk_params {
+ struct dccg *dccg;
+ int inst;
+ int num_slices_h;
+};
+
+struct dsc_set_config_params {
+ struct display_stream_compressor *dsc;
+ struct dsc_config *dsc_cfg;
+ struct dsc_optc_config *dsc_optc_cfg;
+};
+
+struct dsc_enable_params {
+ struct display_stream_compressor *dsc;
+ int opp_inst;
+};
+
+struct tg_set_dsc_config_params {
+ struct timing_generator *tg;
+ struct dsc_optc_config *dsc_optc_cfg;
+ bool enable;
+};
+
+struct dsc_disconnect_params {
+ struct display_stream_compressor *dsc;
+};
+
+struct dsc_read_state_params {
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state *dsc_state;
+};
+
+struct dsc_calculate_and_set_config_params {
+ struct pipe_ctx *pipe_ctx;
+ struct dsc_optc_config dsc_optc_cfg;
+ bool enable;
+ int opp_cnt;
+};
+
+struct dsc_enable_with_opp_params {
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct program_tg_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_state *context;
+};
+
+struct tg_program_global_sync_params {
+ struct timing_generator *tg;
+ int vready_offset;
+ unsigned int vstartup_lines;
+ unsigned int vupdate_offset_pixels;
+ unsigned int vupdate_vupdate_width_pixels;
+ unsigned int pstate_keepout_start_lines;
+};
+
+struct tg_wait_for_state_params {
+ struct timing_generator *tg;
+ enum crtc_state state;
+};
+
+struct tg_set_vtg_params_params {
+ struct timing_generator *tg;
+ struct dc_crtc_timing *timing;
+ bool program_fp2;
+};
+
+struct tg_set_gsl_params {
+ struct timing_generator *tg;
+ struct gsl_params gsl;
+};
+
+struct tg_set_gsl_source_select_params {
+ struct timing_generator *tg;
+ int group_idx;
+ uint32_t gsl_ready_signal;
+};
+
+struct setup_vupdate_interrupt_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct tg_setup_vertical_interrupt2_params {
+ struct timing_generator *tg;
+ int start_line;
+};
+
+struct dpp_set_hdr_multiplier_params {
+ struct dpp *dpp;
+ uint32_t hw_mult;
+};
+
+struct program_det_size_params {
+ struct hubbub *hubbub;
+ unsigned int hubp_inst;
+ unsigned int det_buffer_size_kb;
+};
+
+struct program_det_segments_params {
+ struct hubbub *hubbub;
+ unsigned int hubp_inst;
+ unsigned int det_size;
+};
+
+struct update_dchubp_dpp_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_state *context;
+};
+
+struct opp_set_dyn_expansion_params {
+ struct output_pixel_processor *opp;
+ enum dc_color_space color_space;
+ enum dc_color_depth color_depth;
+ enum signal_type signal;
+};
+
+struct opp_program_fmt_params {
+ struct output_pixel_processor *opp;
+ struct bit_depth_reduction_params *fmt_bit_depth;
+ struct clamping_and_pixel_encoding_params *clamping;
+};
+
+struct opp_program_bit_depth_reduction_params {
+ struct output_pixel_processor *opp;
+ bool use_default_params;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct opp_set_disp_pattern_generator_params {
+ struct output_pixel_processor *opp;
+ enum controller_dp_test_pattern test_pattern;
+ enum controller_dp_color_space color_space;
+ enum dc_color_depth color_depth;
+ struct tg_color solid_color;
+ bool use_solid_color;
+ int width;
+ int height;
+ int offset;
+};
+
+struct set_abm_pipe_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_abm_level_params {
+ struct abm *abm;
+ unsigned int abm_level;
+};
+
+struct set_abm_immediate_disable_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_disp_pattern_generator_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ enum controller_dp_test_pattern test_pattern;
+ enum controller_dp_color_space color_space;
+ enum dc_color_depth color_depth;
+ const struct tg_color *solid_color;
+ int width;
+ int height;
+ int offset;
+};
+
+struct mpc_update_blending_params {
+ struct mpc *mpc;
+ struct mpcc_blnd_cfg blnd_cfg;
+ int mpcc_id;
+};
+
+struct mpc_assert_idle_mpcc_params {
+ struct mpc *mpc;
+ int mpcc_id;
+};
+
+struct mpc_insert_plane_params {
+ struct mpc *mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc_blnd_cfg blnd_cfg;
+ struct mpcc_sm_cfg *sm_cfg;
+ struct mpcc *insert_above_mpcc;
+ int dpp_id;
+ int mpcc_id;
+};
+
+struct mpc_remove_mpcc_params {
+ struct mpc *mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc *mpcc_to_remove;
+};
+
+struct opp_set_mpcc_disconnect_pending_params {
+ struct output_pixel_processor *opp;
+ int mpcc_inst;
+ bool pending;
+};
+
+struct dc_set_optimized_required_params {
+ struct dc *dc;
+ bool optimized_required;
+};
+
+struct hubp_disconnect_params {
+ struct hubp *hubp;
+};
+
+struct hubbub_force_pstate_change_control_params {
+ struct hubbub *hubbub;
+ bool enable;
+ bool wait;
+};
+
+struct tg_enable_crtc_params {
+ struct timing_generator *tg;
+};
+
+struct hubp_wait_flip_pending_params {
+ struct hubp *hubp;
+ unsigned int timeout_us;
+ unsigned int polling_interval_us;
+};
+
+struct tg_wait_double_buffer_pending_params {
+ struct timing_generator *tg;
+ unsigned int timeout_us;
+ unsigned int polling_interval_us;
+};
+
+struct update_force_pstate_params {
+ struct dc *dc;
+ struct dc_state *context;
+};
+
+struct hubbub_apply_dedcn21_147_wa_params {
+ struct hubbub *hubbub;
+};
+
+struct hubbub_allow_self_refresh_control_params {
+ struct hubbub *hubbub;
+ bool allow;
+ bool *disallow_self_refresh_applied;
+};
+
+struct tg_get_frame_count_params {
+ struct timing_generator *tg;
+ unsigned int *frame_count;
+};
+
+struct mpc_set_dwb_mux_params {
+ struct mpc *mpc;
+ int dwb_id;
+ int mpcc_id;
+};
+
+struct mpc_disable_dwb_mux_params {
+ struct mpc *mpc;
+ unsigned int dwb_id;
+};
+
+struct mcif_wb_config_buf_params {
+ struct mcif_wb *mcif_wb;
+ struct mcif_buf_params *mcif_buf_params;
+ unsigned int dest_height;
+};
+
+struct mcif_wb_config_arb_params {
+ struct mcif_wb *mcif_wb;
+ struct mcif_arb_params *mcif_arb_params;
+};
+
+struct mcif_wb_enable_params {
+ struct mcif_wb *mcif_wb;
+};
+
+struct mcif_wb_disable_params {
+ struct mcif_wb *mcif_wb;
+};
+
+struct dwbc_enable_params {
+ struct dwbc *dwb;
+ struct dc_dwb_params *dwb_params;
+};
+
+struct dwbc_disable_params {
+ struct dwbc *dwb;
+};
+
+struct dwbc_update_params {
+ struct dwbc *dwb;
+ struct dc_dwb_params *dwb_params;
+};
+
+struct hubp_update_mall_sel_params {
+ struct hubp *hubp;
+ uint32_t mall_sel;
+ bool cache_cursor;
+};
+
+struct hubp_prepare_subvp_buffering_params {
+ struct hubp *hubp;
+ bool enable;
+};
+
+struct hubp_set_blank_en_params {
+ struct hubp *hubp;
+ bool enable;
+};
+
+struct hubp_disable_control_params {
+ struct hubp *hubp;
+ bool disable;
+};
+
+struct hubbub_soft_reset_params {
+ struct hubbub *hubbub;
+ void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset);
+ bool reset;
+};
+
+struct hubp_clk_cntl_params {
+ struct hubp *hubp;
+ bool enable;
+};
+
+struct hubp_init_params {
+ struct hubp *hubp;
+};
+
+struct hubp_set_vm_system_aperture_settings_params {
+ struct hubp *hubp;
+ //struct vm_system_aperture_param apt;
+ PHYSICAL_ADDRESS_LOC sys_default;
+ PHYSICAL_ADDRESS_LOC sys_low;
+ PHYSICAL_ADDRESS_LOC sys_high;
+};
+
+struct hubp_set_flip_int_params {
+ struct hubp *hubp;
+};
+
+struct dpp_dppclk_control_params {
+ struct dpp *dpp;
+ bool dppclk_div;
+ bool enable;
+};
+
+struct disable_phantom_crtc_params {
+ struct timing_generator *tg;
+};
+
+struct dpp_pg_control_params {
+ struct dce_hwseq *hws;
+ unsigned int dpp_inst;
+ bool power_on;
+};
+
+struct hubp_pg_control_params {
+ struct dce_hwseq *hws;
+ unsigned int hubp_inst;
+ bool power_on;
+};
+
+struct hubp_reset_params {
+ struct hubp *hubp;
+};
+
+struct dpp_reset_params {
+ struct dpp *dpp;
+};
+
+struct dpp_root_clock_control_params {
+ struct dce_hwseq *hws;
+ unsigned int dpp_inst;
+ bool clock_on;
+};
+
+struct dc_ip_request_cntl_params {
+ struct dc *dc;
+ bool enable;
+};
+
+struct dsc_pg_status_params {
+ struct dce_hwseq *hws;
+ int dsc_inst;
+ bool is_ungated;
+};
+
+struct dsc_wait_disconnect_pending_clear_params {
+ struct display_stream_compressor *dsc;
+ bool *is_ungated;
+};
+
+struct dsc_disable_params {
+ struct display_stream_compressor *dsc;
+ bool *is_ungated;
+};
+
+struct dccg_set_ref_dscclk_params {
+ struct dccg *dccg;
+ int dsc_inst;
+ bool *is_ungated;
+};
+
+struct dccg_update_dpp_dto_params {
+ struct dccg *dccg;
+ int dpp_inst;
+ int dppclk_khz;
+};
+
+struct hubp_vtg_sel_params {
+ struct hubp *hubp;
+ uint32_t otg_inst;
+};
+
+struct hubp_setup2_params {
+ struct hubp *hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs;
+ union dml2_global_sync_programming *global_sync;
+ struct dc_crtc_timing *timing;
+};
+
+struct hubp_setup_params {
+ struct hubp *hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs;
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest;
+};
+
+struct hubp_set_unbounded_requesting_params {
+ struct hubp *hubp;
+ bool unbounded_req;
+};
+
+struct hubp_setup_interdependent2_params {
+ struct hubp *hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs;
+};
+
+struct hubp_setup_interdependent_params {
+ struct hubp *hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs;
+};
+
+struct dpp_set_cursor_matrix_params {
+ struct dpp *dpp;
+ enum dc_color_space color_space;
+ struct dc_csc_transform *cursor_csc_color_matrix;
+};
+
+struct mpc_update_mpcc_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct dpp_set_scaler_params {
+ struct dpp *dpp;
+ const struct scaler_data *scl_data;
+};
+
+struct hubp_mem_program_viewport_params {
+ struct hubp *hubp;
+ const struct rect *viewport;
+ const struct rect *viewport_c;
+};
+
+struct hubp_program_mcache_id_and_split_coordinate_params {
+ struct hubp *hubp;
+ struct mcache_regs_struct *mcache_regs;
+};
+
+struct abort_cursor_offload_update_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_cursor_attribute_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_cursor_position_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_cursor_sdr_white_level_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct program_output_csc_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ enum dc_color_space colorspace;
+ uint16_t *matrix;
+ int opp_id;
+};
+
+struct hubp_set_blank_params {
+ struct hubp *hubp;
+ bool blank;
+};
+
+struct phantom_hubp_post_enable_params {
+ struct hubp *hubp;
+};
+
union block_sequence_params {
struct update_plane_addr_params update_plane_addr_params;
struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params;
@@ -173,7 +752,108 @@ union block_sequence_params {
struct set_ocsc_default_params set_ocsc_default_params;
struct subvp_save_surf_addr subvp_save_surf_addr;
struct wait_for_dcc_meta_propagation_params wait_for_dcc_meta_propagation_params;
- struct fams2_global_control_lock_fast_params fams2_global_control_lock_fast_params;
+ struct dmub_hw_control_lock_fast_params dmub_hw_control_lock_fast_params;
+ struct program_surface_config_params program_surface_config_params;
+ struct program_mcache_id_and_split_coordinate program_mcache_id_and_split_coordinate;
+ struct program_cursor_update_now_params program_cursor_update_now_params;
+ struct hubp_wait_pipe_read_start_params hubp_wait_pipe_read_start_params;
+ struct apply_update_flags_for_phantom_params apply_update_flags_for_phantom_params;
+ struct update_phantom_vp_position_params update_phantom_vp_position_params;
+ struct set_odm_combine_params set_odm_combine_params;
+ struct set_odm_bypass_params set_odm_bypass_params;
+ struct opp_pipe_clock_control_params opp_pipe_clock_control_params;
+ struct opp_program_left_edge_extra_pixel_params opp_program_left_edge_extra_pixel_params;
+ struct dccg_set_dto_dscclk_params dccg_set_dto_dscclk_params;
+ struct dsc_set_config_params dsc_set_config_params;
+ struct dsc_enable_params dsc_enable_params;
+ struct tg_set_dsc_config_params tg_set_dsc_config_params;
+ struct dsc_disconnect_params dsc_disconnect_params;
+ struct dsc_read_state_params dsc_read_state_params;
+ struct dsc_calculate_and_set_config_params dsc_calculate_and_set_config_params;
+ struct dsc_enable_with_opp_params dsc_enable_with_opp_params;
+ struct program_tg_params program_tg_params;
+ struct tg_program_global_sync_params tg_program_global_sync_params;
+ struct tg_wait_for_state_params tg_wait_for_state_params;
+ struct tg_set_vtg_params_params tg_set_vtg_params_params;
+ struct tg_setup_vertical_interrupt2_params tg_setup_vertical_interrupt2_params;
+ struct dpp_set_hdr_multiplier_params dpp_set_hdr_multiplier_params;
+ struct tg_set_gsl_params tg_set_gsl_params;
+ struct tg_set_gsl_source_select_params tg_set_gsl_source_select_params;
+ struct setup_vupdate_interrupt_params setup_vupdate_interrupt_params;
+ struct program_det_size_params program_det_size_params;
+ struct program_det_segments_params program_det_segments_params;
+ struct update_dchubp_dpp_params update_dchubp_dpp_params;
+ struct opp_set_dyn_expansion_params opp_set_dyn_expansion_params;
+ struct opp_program_fmt_params opp_program_fmt_params;
+ struct opp_program_bit_depth_reduction_params opp_program_bit_depth_reduction_params;
+ struct opp_set_disp_pattern_generator_params opp_set_disp_pattern_generator_params;
+ struct set_abm_pipe_params set_abm_pipe_params;
+ struct set_abm_level_params set_abm_level_params;
+ struct set_abm_immediate_disable_params set_abm_immediate_disable_params;
+ struct set_disp_pattern_generator_params set_disp_pattern_generator_params;
+ struct mpc_remove_mpcc_params mpc_remove_mpcc_params;
+ struct opp_set_mpcc_disconnect_pending_params opp_set_mpcc_disconnect_pending_params;
+ struct dc_set_optimized_required_params dc_set_optimized_required_params;
+ struct hubp_disconnect_params hubp_disconnect_params;
+ struct hubbub_force_pstate_change_control_params hubbub_force_pstate_change_control_params;
+ struct tg_enable_crtc_params tg_enable_crtc_params;
+ struct hubp_wait_flip_pending_params hubp_wait_flip_pending_params;
+ struct tg_wait_double_buffer_pending_params tg_wait_double_buffer_pending_params;
+ struct update_force_pstate_params update_force_pstate_params;
+ struct hubbub_apply_dedcn21_147_wa_params hubbub_apply_dedcn21_147_wa_params;
+ struct hubbub_allow_self_refresh_control_params hubbub_allow_self_refresh_control_params;
+ struct tg_get_frame_count_params tg_get_frame_count_params;
+ struct mpc_set_dwb_mux_params mpc_set_dwb_mux_params;
+ struct mpc_disable_dwb_mux_params mpc_disable_dwb_mux_params;
+ struct mcif_wb_config_buf_params mcif_wb_config_buf_params;
+ struct mcif_wb_config_arb_params mcif_wb_config_arb_params;
+ struct mcif_wb_enable_params mcif_wb_enable_params;
+ struct mcif_wb_disable_params mcif_wb_disable_params;
+ struct dwbc_enable_params dwbc_enable_params;
+ struct dwbc_disable_params dwbc_disable_params;
+ struct dwbc_update_params dwbc_update_params;
+ struct hubp_update_mall_sel_params hubp_update_mall_sel_params;
+ struct hubp_prepare_subvp_buffering_params hubp_prepare_subvp_buffering_params;
+ struct hubp_set_blank_en_params hubp_set_blank_en_params;
+ struct hubp_disable_control_params hubp_disable_control_params;
+ struct hubbub_soft_reset_params hubbub_soft_reset_params;
+ struct hubp_clk_cntl_params hubp_clk_cntl_params;
+ struct hubp_init_params hubp_init_params;
+ struct hubp_set_vm_system_aperture_settings_params hubp_set_vm_system_aperture_settings_params;
+ struct hubp_set_flip_int_params hubp_set_flip_int_params;
+ struct dpp_dppclk_control_params dpp_dppclk_control_params;
+ struct disable_phantom_crtc_params disable_phantom_crtc_params;
+ struct dpp_pg_control_params dpp_pg_control_params;
+ struct hubp_pg_control_params hubp_pg_control_params;
+ struct hubp_reset_params hubp_reset_params;
+ struct dpp_reset_params dpp_reset_params;
+ struct dpp_root_clock_control_params dpp_root_clock_control_params;
+ struct dc_ip_request_cntl_params dc_ip_request_cntl_params;
+ struct dsc_pg_status_params dsc_pg_status_params;
+ struct dsc_wait_disconnect_pending_clear_params dsc_wait_disconnect_pending_clear_params;
+ struct dsc_disable_params dsc_disable_params;
+ struct dccg_set_ref_dscclk_params dccg_set_ref_dscclk_params;
+ struct dccg_update_dpp_dto_params dccg_update_dpp_dto_params;
+ struct hubp_vtg_sel_params hubp_vtg_sel_params;
+ struct hubp_setup2_params hubp_setup2_params;
+ struct hubp_setup_params hubp_setup_params;
+ struct hubp_set_unbounded_requesting_params hubp_set_unbounded_requesting_params;
+ struct hubp_setup_interdependent2_params hubp_setup_interdependent2_params;
+ struct hubp_setup_interdependent_params hubp_setup_interdependent_params;
+ struct dpp_set_cursor_matrix_params dpp_set_cursor_matrix_params;
+ struct mpc_update_mpcc_params mpc_update_mpcc_params;
+ struct mpc_update_blending_params mpc_update_blending_params;
+ struct mpc_assert_idle_mpcc_params mpc_assert_idle_mpcc_params;
+ struct mpc_insert_plane_params mpc_insert_plane_params;
+ struct dpp_set_scaler_params dpp_set_scaler_params;
+ struct hubp_mem_program_viewport_params hubp_mem_program_viewport_params;
+ struct abort_cursor_offload_update_params abort_cursor_offload_update_params;
+ struct set_cursor_attribute_params set_cursor_attribute_params;
+ struct set_cursor_position_params set_cursor_position_params;
+ struct set_cursor_sdr_white_level_params set_cursor_sdr_white_level_params;
+ struct program_output_csc_params program_output_csc_params;
+ struct hubp_set_blank_params hubp_set_blank_params;
+ struct phantom_hubp_post_enable_params phantom_hubp_post_enable_params;
};
enum block_sequence_func {
@@ -189,13 +869,111 @@ enum block_sequence_func {
DPP_SETUP_DPP,
DPP_PROGRAM_BIAS_AND_SCALE,
DPP_SET_OUTPUT_TRANSFER_FUNC,
+ DPP_SET_HDR_MULTIPLIER,
MPC_UPDATE_VISUAL_CONFIRM,
MPC_POWER_ON_MPC_MEM_PWR,
MPC_SET_OUTPUT_CSC,
MPC_SET_OCSC_DEFAULT,
DMUB_SUBVP_SAVE_SURF_ADDR,
HUBP_WAIT_FOR_DCC_META_PROP,
- DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST,
+ DMUB_HW_CONTROL_LOCK_FAST,
+ HUBP_PROGRAM_SURFACE_CONFIG,
+ HUBP_PROGRAM_MCACHE_ID,
+ PROGRAM_CURSOR_UPDATE_NOW,
+ HUBP_WAIT_PIPE_READ_START,
+ HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM,
+ HWS_UPDATE_PHANTOM_VP_POSITION,
+ OPTC_SET_ODM_COMBINE,
+ OPTC_SET_ODM_BYPASS,
+ OPP_PIPE_CLOCK_CONTROL,
+ OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL,
+ DCCG_SET_DTO_DSCCLK,
+ DSC_SET_CONFIG,
+ DSC_ENABLE,
+ TG_SET_DSC_CONFIG,
+ DSC_DISCONNECT,
+ DSC_READ_STATE,
+ DSC_CALCULATE_AND_SET_CONFIG,
+ DSC_ENABLE_WITH_OPP,
+ TG_PROGRAM_GLOBAL_SYNC,
+ TG_WAIT_FOR_STATE,
+ TG_SET_VTG_PARAMS,
+ TG_SETUP_VERTICAL_INTERRUPT2,
+ HUBP_PROGRAM_DET_SIZE,
+ HUBP_PROGRAM_DET_SEGMENTS,
+ OPP_SET_DYN_EXPANSION,
+ OPP_PROGRAM_FMT,
+ OPP_PROGRAM_BIT_DEPTH_REDUCTION,
+ OPP_SET_DISP_PATTERN_GENERATOR,
+ ABM_SET_PIPE,
+ ABM_SET_LEVEL,
+ ABM_SET_IMMEDIATE_DISABLE,
+ MPC_REMOVE_MPCC,
+ OPP_SET_MPCC_DISCONNECT_PENDING,
+ DC_SET_OPTIMIZED_REQUIRED,
+ HUBP_DISCONNECT,
+ HUBBUB_FORCE_PSTATE_CHANGE_CONTROL,
+ TG_ENABLE_CRTC,
+ TG_SET_GSL,
+ TG_SET_GSL_SOURCE_SELECT,
+ HUBP_WAIT_FLIP_PENDING,
+ TG_WAIT_DOUBLE_BUFFER_PENDING,
+ UPDATE_FORCE_PSTATE,
+ PROGRAM_MALL_PIPE_CONFIG,
+ HUBBUB_APPLY_DEDCN21_147_WA,
+ HUBBUB_ALLOW_SELF_REFRESH_CONTROL,
+ TG_GET_FRAME_COUNT,
+ MPC_SET_DWB_MUX,
+ MPC_DISABLE_DWB_MUX,
+ MCIF_WB_CONFIG_BUF,
+ MCIF_WB_CONFIG_ARB,
+ MCIF_WB_ENABLE,
+ MCIF_WB_DISABLE,
+ DWBC_ENABLE,
+ DWBC_DISABLE,
+ DWBC_UPDATE,
+ HUBP_UPDATE_MALL_SEL,
+ HUBP_PREPARE_SUBVP_BUFFERING,
+ HUBP_SET_BLANK_EN,
+ HUBP_DISABLE_CONTROL,
+ HUBBUB_SOFT_RESET,
+ HUBP_CLK_CNTL,
+ HUBP_INIT,
+ HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS,
+ HUBP_SET_FLIP_INT,
+ DPP_DPPCLK_CONTROL,
+ DISABLE_PHANTOM_CRTC,
+ DSC_PG_STATUS,
+ DSC_WAIT_DISCONNECT_PENDING_CLEAR,
+ DSC_DISABLE,
+ DCCG_SET_REF_DSCCLK,
+ DPP_PG_CONTROL,
+ HUBP_PG_CONTROL,
+ HUBP_RESET,
+ DPP_RESET,
+ DPP_ROOT_CLOCK_CONTROL,
+ DC_IP_REQUEST_CNTL,
+ DCCG_UPDATE_DPP_DTO,
+ HUBP_VTG_SEL,
+ HUBP_SETUP2,
+ HUBP_SETUP,
+ HUBP_SET_UNBOUNDED_REQUESTING,
+ HUBP_SETUP_INTERDEPENDENT2,
+ HUBP_SETUP_INTERDEPENDENT,
+ DPP_SET_CURSOR_MATRIX,
+ MPC_UPDATE_BLENDING,
+ MPC_ASSERT_IDLE_MPCC,
+ MPC_INSERT_PLANE,
+ DPP_SET_SCALER,
+ HUBP_MEM_PROGRAM_VIEWPORT,
+ ABORT_CURSOR_OFFLOAD_UPDATE,
+ SET_CURSOR_ATTRIBUTE,
+ SET_CURSOR_POSITION,
+ SET_CURSOR_SDR_WHITE_LEVEL,
+ PROGRAM_OUTPUT_CSC,
+ HUBP_SET_LEGACY_TILING_COMPAT_LEVEL,
+ HUBP_SET_BLANK,
+ PHANTOM_HUBP_POST_ENABLE,
/* This must be the last value in this enum, add new ones above */
HWSS_BLOCK_SEQUENCE_FUNC_COUNT
};
@@ -205,6 +983,11 @@ struct block_sequence {
enum block_sequence_func func;
};
+struct block_sequence_state {
+ struct block_sequence *steps;
+ unsigned int *num_steps;
+};
+
#define MAX_HWSS_BLOCK_SEQUENCE_SIZE (HWSS_BLOCK_SEQUENCE_FUNC_COUNT * MAX_PIPES)
struct hw_sequencer_funcs {
@@ -222,6 +1005,8 @@ struct hw_sequencer_funcs {
enum dc_status (*apply_ctx_to_hw)(struct dc *dc,
struct dc_state *context);
void (*disable_plane)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
+ void (*disable_plane_sequence)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank);
void (*apply_ctx_for_surface)(struct dc *dc,
const struct dc_stream_state *stream,
@@ -239,6 +1024,10 @@ struct hw_sequencer_funcs {
void (*wait_for_mpcc_disconnect)(struct dc *dc,
struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx);
+ void (*wait_for_mpcc_disconnect_sequence)(struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*edp_backlight_control)(
struct dc_link *link,
bool enable);
@@ -310,6 +1099,13 @@ struct hw_sequencer_funcs {
void (*set_cursor_position)(struct pipe_ctx *pipe);
void (*set_cursor_attribute)(struct pipe_ctx *pipe);
void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
+ void (*abort_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*begin_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*commit_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*update_cursor_offload_pipe)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*notify_cursor_offload_drr_update)(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream);
+ void (*program_cursor_offload_now)(struct dc *dc, const struct pipe_ctx *pipe);
/* Colour Related */
void (*program_gamut_remap)(struct pipe_ctx *pipe_ctx);
@@ -452,13 +1248,13 @@ struct hw_sequencer_funcs {
const struct dc_state *new_ctx);
void (*wait_for_dcc_meta_propagation)(const struct dc *dc,
const struct pipe_ctx *top_pipe_to_program);
- void (*fams2_global_control_lock)(struct dc *dc,
+ void (*dmub_hw_control_lock)(struct dc *dc,
struct dc_state *context,
bool lock);
void (*fams2_update_config)(struct dc *dc,
struct dc_state *context,
bool enable);
- void (*fams2_global_control_lock_fast)(union block_sequence_params *params);
+ void (*dmub_hw_control_lock_fast)(union block_sequence_params *params);
void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
void (*program_outstanding_updates)(struct dc *dc,
struct dc_state *context);
@@ -471,11 +1267,23 @@ struct hw_sequencer_funcs {
void (*enable_plane)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+ void (*enable_plane_sequence)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
void (*update_dchubp_dpp)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+ void (*update_dchubp_dpp_sequence)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
void (*post_unlock_reset_opp)(struct dc *dc,
struct pipe_ctx *opp_head);
+ void (*post_unlock_reset_opp_sequence)(
+ struct dc *dc,
+ struct pipe_ctx *opp_head,
+ struct block_sequence_state *seq_state);
void (*get_underflow_debug_data)(const struct dc *dc,
struct timing_generator *tg,
struct dc_underflow_debug_data *out_data);
@@ -588,4 +1396,630 @@ void hwss_set_ocsc_default(union block_sequence_params *params);
void hwss_subvp_save_surf_addr(union block_sequence_params *params);
+void hwss_program_surface_config(union block_sequence_params *params);
+
+void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params);
+
+void hwss_set_odm_combine(union block_sequence_params *params);
+
+void hwss_set_odm_bypass(union block_sequence_params *params);
+
+void hwss_opp_pipe_clock_control(union block_sequence_params *params);
+
+void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params);
+
+void hwss_blank_pixel_data(union block_sequence_params *params);
+
+void hwss_dccg_set_dto_dscclk(union block_sequence_params *params);
+
+void hwss_dsc_set_config(union block_sequence_params *params);
+
+void hwss_dsc_enable(union block_sequence_params *params);
+
+void hwss_tg_set_dsc_config(union block_sequence_params *params);
+
+void hwss_dsc_disconnect(union block_sequence_params *params);
+
+void hwss_dsc_read_state(union block_sequence_params *params);
+
+void hwss_dsc_calculate_and_set_config(union block_sequence_params *params);
+
+void hwss_dsc_enable_with_opp(union block_sequence_params *params);
+
+void hwss_program_tg(union block_sequence_params *params);
+
+void hwss_tg_program_global_sync(union block_sequence_params *params);
+
+void hwss_tg_wait_for_state(union block_sequence_params *params);
+
+void hwss_tg_set_vtg_params(union block_sequence_params *params);
+
+void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params);
+
+void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params);
+
+void hwss_program_det_size(union block_sequence_params *params);
+
+void hwss_program_det_segments(union block_sequence_params *params);
+
+void hwss_opp_set_dyn_expansion(union block_sequence_params *params);
+
+void hwss_opp_program_fmt(union block_sequence_params *params);
+
+void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params);
+
+void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params);
+
+void hwss_set_abm_pipe(union block_sequence_params *params);
+
+void hwss_set_abm_level(union block_sequence_params *params);
+
+void hwss_set_abm_immediate_disable(union block_sequence_params *params);
+
+void hwss_mpc_remove_mpcc(union block_sequence_params *params);
+
+void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params);
+
+void hwss_dc_set_optimized_required(union block_sequence_params *params);
+
+void hwss_hubp_disconnect(union block_sequence_params *params);
+
+void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params);
+
+void hwss_tg_enable_crtc(union block_sequence_params *params);
+
+void hwss_tg_set_gsl(union block_sequence_params *params);
+
+void hwss_tg_set_gsl_source_select(union block_sequence_params *params);
+
+void hwss_hubp_wait_flip_pending(union block_sequence_params *params);
+
+void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params);
+
+void hwss_update_force_pstate(union block_sequence_params *params);
+
+void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params);
+
+void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params);
+
+void hwss_tg_get_frame_count(union block_sequence_params *params);
+
+void hwss_mpc_set_dwb_mux(union block_sequence_params *params);
+
+void hwss_mpc_disable_dwb_mux(union block_sequence_params *params);
+
+void hwss_mcif_wb_config_buf(union block_sequence_params *params);
+
+void hwss_mcif_wb_config_arb(union block_sequence_params *params);
+
+void hwss_mcif_wb_enable(union block_sequence_params *params);
+
+void hwss_mcif_wb_disable(union block_sequence_params *params);
+
+void hwss_dwbc_enable(union block_sequence_params *params);
+
+void hwss_dwbc_disable(union block_sequence_params *params);
+
+void hwss_dwbc_update(union block_sequence_params *params);
+
+void hwss_hubp_update_mall_sel(union block_sequence_params *params);
+
+void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params);
+
+void hwss_hubp_set_blank_en(union block_sequence_params *params);
+
+void hwss_hubp_disable_control(union block_sequence_params *params);
+
+void hwss_hubbub_soft_reset(union block_sequence_params *params);
+
+void hwss_hubp_clk_cntl(union block_sequence_params *params);
+
+void hwss_hubp_init(union block_sequence_params *params);
+
+void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params);
+
+void hwss_hubp_set_flip_int(union block_sequence_params *params);
+
+void hwss_dpp_dppclk_control(union block_sequence_params *params);
+
+void hwss_disable_phantom_crtc(union block_sequence_params *params);
+
+void hwss_dsc_pg_status(union block_sequence_params *params);
+
+void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params);
+
+void hwss_dsc_disable(union block_sequence_params *params);
+
+void hwss_dccg_set_ref_dscclk(union block_sequence_params *params);
+
+void hwss_dpp_pg_control(union block_sequence_params *params);
+
+void hwss_hubp_pg_control(union block_sequence_params *params);
+
+void hwss_hubp_reset(union block_sequence_params *params);
+
+void hwss_dpp_reset(union block_sequence_params *params);
+
+void hwss_dpp_root_clock_control(union block_sequence_params *params);
+
+void hwss_dc_ip_request_cntl(union block_sequence_params *params);
+
+void hwss_dccg_update_dpp_dto(union block_sequence_params *params);
+
+void hwss_hubp_vtg_sel(union block_sequence_params *params);
+
+void hwss_hubp_setup2(union block_sequence_params *params);
+
+void hwss_hubp_setup(union block_sequence_params *params);
+
+void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params);
+
+void hwss_hubp_setup_interdependent2(union block_sequence_params *params);
+
+void hwss_hubp_setup_interdependent(union block_sequence_params *params);
+
+void hwss_dpp_set_cursor_matrix(union block_sequence_params *params);
+
+void hwss_mpc_update_mpcc(union block_sequence_params *params);
+
+void hwss_mpc_update_blending(union block_sequence_params *params);
+
+void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params);
+
+void hwss_mpc_insert_plane(union block_sequence_params *params);
+
+void hwss_dpp_set_scaler(union block_sequence_params *params);
+
+void hwss_hubp_mem_program_viewport(union block_sequence_params *params);
+
+void hwss_abort_cursor_offload_update(union block_sequence_params *params);
+
+void hwss_set_cursor_attribute(union block_sequence_params *params);
+
+void hwss_set_cursor_position(union block_sequence_params *params);
+
+void hwss_set_cursor_sdr_white_level(union block_sequence_params *params);
+
+void hwss_program_output_csc(union block_sequence_params *params);
+
+void hwss_hubp_set_legacy_tiling_compat_level(union block_sequence_params *params);
+
+void hwss_hubp_set_blank(union block_sequence_params *params);
+
+void hwss_phantom_hubp_post_enable(union block_sequence_params *params);
+
+void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, bool lock);
+
+void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state,
+ struct hubp *hubp, bool flip_immediate);
+
+void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
+
+void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_plane_state *plane_state);
+
+void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_stream_state *stream);
+
+void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, int mpcc_id);
+
+void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state,
+ struct mpc *mpc, int mpcc_id, bool power_on);
+
+void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state,
+ struct mpc *mpc, int opp_id, const uint16_t *regval, enum mpc_output_csc_mode ocsc_mode);
+
+void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state,
+ struct mpc *mpc, int opp_id, enum dc_color_space colorspace, enum mpc_output_csc_mode ocsc_mode);
+
+void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state,
+ struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
+
+void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state,
+ struct dc_dmub_srv *dc_dmub_srv, struct dc_plane_address *addr, uint8_t subvp_index);
+
+void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *top_pipe_to_program);
+
+void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state,
+ struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count,
+ int odm_slice_width, int last_odm_slice_width);
+
+void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state,
+ struct timing_generator *optc, struct dc_crtc_timing *timing);
+
+void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int vready_offset,
+ unsigned int vstartup_lines,
+ unsigned int vupdate_offset_pixels,
+ unsigned int vupdate_vupdate_width_pixels,
+ unsigned int pstate_keepout_start_lines);
+
+void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, enum crtc_state state);
+
+void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, struct dc_crtc_timing *dc_crtc_timing, bool program_fp2);
+
+void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int start_line);
+
+void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state,
+ struct dpp *dpp, uint32_t hw_mult);
+
+void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_buffer_size_kb);
+
+void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state,
+ struct hubp *hubp, struct dml2_hubp_pipe_mcache_regs *mcache_regs);
+
+void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub, bool enable, bool wait);
+
+void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_size);
+
+void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth, enum signal_type signal);
+
+void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping);
+
+void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_abm_set_level(struct block_sequence_state *seq_state,
+ struct abm *abm, uint32_t abm_level);
+
+void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg);
+
+void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state,
+ struct hubp *hubp, unsigned int timeout_us, unsigned int polling_interval_us);
+
+void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, unsigned int timeout_us, unsigned int polling_interval_us);
+
+void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg, int inst, int num_slices_h);
+
+void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt);
+
+void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove);
+
+void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, int mpcc_inst, bool pending);
+
+void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc);
+
+void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state,
+ struct dc *dc, bool optimized_required);
+
+void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ struct tg_color solid_color,
+ bool use_solid_color,
+ int width,
+ int height,
+ int offset);
+
+void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool use_default_params,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ bool enable);
+
+void hwss_add_dwbc_update(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params);
+
+void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_buf_params *mcif_buf_params,
+ unsigned int dest_height);
+
+void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_arb_params *mcif_arb_params);
+
+void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb);
+
+void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb);
+
+void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int dwb_id,
+ int mpcc_id);
+
+void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ unsigned int dwb_id);
+
+void hwss_add_dwbc_enable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params);
+
+void hwss_add_dwbc_disable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb);
+
+void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct gsl_params gsl);
+
+void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int group_idx,
+ uint32_t gsl_ready_signal);
+
+void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t mall_sel,
+ bool cache_cursor);
+
+void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable);
+
+void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable);
+
+void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool disable);
+
+void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset),
+ bool reset);
+
+void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable);
+
+void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ bool dppclk_div,
+ bool enable);
+
+void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg);
+
+void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ int dsc_inst,
+ bool is_ungated);
+
+void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated);
+
+void hwss_add_dsc_disable(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated);
+
+void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dsc_inst,
+ bool *is_ungated);
+
+void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool clock_on);
+
+void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on);
+
+void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on);
+
+void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool blank);
+
+void hwss_add_hubp_init(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_hubp_reset(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_dpp_reset(struct block_sequence_state *seq_state,
+ struct dpp *dpp);
+
+void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool enable);
+
+void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint64_t sys_default,
+ uint64_t sys_low,
+ uint64_t sys_high);
+
+void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dpp_inst,
+ int dppclk_khz);
+
+void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t otg_inst);
+
+void hwss_add_hubp_setup2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs,
+ union dml2_global_sync_programming *global_sync,
+ struct dc_crtc_timing *timing);
+
+void hwss_add_hubp_setup(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+
+void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool unbounded_req);
+
+void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs);
+
+void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
+void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ struct dc_tiling_info *tiling_info,
+ struct plane_size plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ int compat_level);
+
+void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ enum dc_color_space color_space,
+ struct dc_csc_transform *cursor_csc_color_matrix);
+
+void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg blnd_cfg,
+ int mpcc_id);
+
+void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int mpcc_id);
+
+void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpc_tree *mpc_tree_params,
+ struct mpcc_blnd_cfg blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id);
+
+void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ const struct scaler_data *scl_data);
+
+void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c);
+
+void hwss_add_abort_cursor_offload_update(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_set_cursor_position(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_program_output_csc(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id);
+
+void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_update_force_pstate(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct dc_state *context);
+
+void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub);
+
+void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ bool allow,
+ bool *disallow_self_refresh_applied);
+
+void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ unsigned int *frame_count);
+
+void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct dsc_optc_config *dsc_optc_cfg,
+ bool enable);
+
+void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum dc_pixel_encoding pixel_encoding,
+ bool is_otg_master);
+
#endif /* __DC_HW_SEQUENCER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 1e2d247fbbac..406db231bc72 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -27,6 +27,7 @@
#define __DC_HW_SEQUENCER_PRIVATE_H__
#include "dc_types.h"
+#include "hw_sequencer.h"
enum pipe_gating_control {
PIPE_GATING_CONTROL_DISABLE = 0,
@@ -80,7 +81,13 @@ struct hwseq_private_funcs {
void (*plane_atomic_disconnect)(struct dc *dc,
struct dc_state *state,
struct pipe_ctx *pipe_ctx);
+ void (*plane_atomic_disconnect_sequence)(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*update_mpcc_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
bool (*set_input_transfer_func)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
@@ -97,6 +104,10 @@ struct hwseq_private_funcs {
void (*blank_pixel_data)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool blank);
+ void (*blank_pixel_data_sequence)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank,
+ struct block_sequence_state *seq_state);
enum dc_status (*enable_stream_timing)(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
@@ -105,6 +116,8 @@ struct hwseq_private_funcs {
bool enable);
void (*setup_vupdate_interrupt)(struct dc *dc,
struct pipe_ctx *pipe_ctx);
+ void (*setup_vupdate_interrupt_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*init_blank)(struct dc *dc, struct timing_generator *tg);
void (*disable_vga)(struct dce_hwseq *hws);
@@ -112,6 +125,10 @@ struct hwseq_private_funcs {
void (*plane_atomic_power_down)(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
+ void (*plane_atomic_power_down_sequence)(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp,
+ struct block_sequence_state *seq_state);
void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*enable_power_gating_plane)(struct dce_hwseq *hws,
bool enable);
@@ -140,15 +157,31 @@ struct hwseq_private_funcs {
unsigned int dsc_inst);
void (*update_odm)(struct dc *dc, struct dc_state *context,
struct pipe_ctx *pipe_ctx);
+ void (*update_odm_sequence)(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *pipe_ctx, struct block_sequence_state *seq_state);
void (*program_all_writeback_pipes_in_tree)(struct dc *dc,
const struct dc_stream_state *stream,
struct dc_state *context);
+ void (*program_all_writeback_pipes_in_tree_sequence)(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
bool (*s0i3_golden_init_wa)(struct dc *dc);
void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx);
+ void (*set_hdr_multiplier_sequence)(struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*verify_allow_pstate_change_high)(struct dc *dc);
+ void (*verify_allow_pstate_change_high_sequence)(struct dc *dc,
+ struct block_sequence_state *seq_state);
void (*program_pipe)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+ void (*program_pipe_sequence)(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
bool (*wait_for_blank_complete)(struct output_pixel_processor *opp);
void (*dccg_init)(struct dce_hwseq *hws);
bool (*set_blend_lut)(struct pipe_ctx *pipe_ctx,
@@ -163,6 +196,8 @@ struct hwseq_private_funcs {
void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context);
void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context);
+ void (*program_mall_pipe_config_sequence)(struct dc *dc, struct dc_state *context,
+ struct block_sequence_state *seq_state);
void (*update_force_pstate)(struct dc *dc, struct dc_state *context);
void (*update_mall_sel)(struct dc *dc, struct dc_state *context);
unsigned int (*calculate_dccg_k1_k2_values)(struct pipe_ctx *pipe_ctx,
@@ -186,6 +221,7 @@ struct hwseq_private_funcs {
void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx);
void (*wait_for_pipe_update_if_needed)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only);
void (*set_wait_for_update_needed_for_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*dc_ip_request_cntl)(struct dc *dc, bool enable);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index d11893f8c916..5ed2cd344804 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -58,8 +58,8 @@
#include "transform.h"
#include "dpp.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
-#include "dml2/dml21/inc/dml_top_types.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_types.h"
struct resource_pool;
struct dc_state;
@@ -274,7 +274,7 @@ struct resource_pool {
/* An array for accessing the link encoder objects that have been created.
* Index in array corresponds to engine ID - viz. 0: ENGINE_ID_DIGA
*/
- struct link_encoder *link_encoders[MAX_DIG_LINK_ENCODERS];
+ struct link_encoder *link_encoders[MAX_LINK_ENCODERS];
/* Number of DIG link encoder objects created - i.e. number of valid
* entries in link_encoders array.
*/
@@ -514,7 +514,7 @@ struct pipe_ctx {
struct link_enc_cfg_context {
enum link_enc_cfg_mode mode;
struct link_enc_assignment link_enc_assignments[MAX_PIPES];
- enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS];
+ enum engine_id link_enc_avail[MAX_LINK_ENCODERS];
struct link_enc_assignment transient_assignments[MAX_PIPES];
};
@@ -526,8 +526,8 @@ struct resource_context {
uint8_t dp_clock_source_ref_count;
bool is_dsc_acquired[MAX_PIPES];
struct link_enc_cfg_context link_enc_cfg_ctx;
- unsigned int dio_link_enc_to_link_idx[MAX_DIG_LINK_ENCODERS];
- int dio_link_enc_ref_cnts[MAX_DIG_LINK_ENCODERS];
+ unsigned int dio_link_enc_to_link_idx[MAX_LINK_ENCODERS];
+ int dio_link_enc_ref_cnts[MAX_LINK_ENCODERS];
bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS];
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
index 45645f9fd86c..7ce2f417f86a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
@@ -57,9 +57,9 @@ struct cursor_attribute_cache_hubp {
} size;
union reg_cursor_settings_cfg {
struct {
- uint32_t dst_y_offset: 8;
- uint32_t chunk_hdl_adjust: 2;
- uint32_t reserved: 22;
+ uint32_t dst_y_offset: 8;
+ uint32_t chunk_hdl_adjust: 2;
+ uint32_t reserved: 22;
} bits;
uint32_t raw;
} settings;
@@ -83,12 +83,34 @@ union reg_cur0_control_cfg {
} bits;
uint32_t raw;
};
+
struct cursor_position_cache_dpp {
union reg_cur0_control_cfg cur0_ctl;
};
struct cursor_attribute_cache_dpp {
union reg_cur0_control_cfg cur0_ctl;
+ union reg_cur0_fp_scale_bias {
+ struct {
+ uint32_t fp_bias: 16;
+ uint32_t fp_scale: 16;
+ } bits;
+ uint32_t raw;
+ } fp_scale_bias;
+ union reg_cur0_fp_scale_bias_g_y {
+ struct {
+ uint32_t fp_bias_g_y: 16;
+ uint32_t fp_scale_g_y: 16;
+ } bits;
+ uint32_t raw;
+ } fp_scale_bias_g_y;
+ union reg_cur0_fp_scale_bias_rb_crcb {
+ struct {
+ uint32_t fp_bias_rb_crcb: 16;
+ uint32_t fp_scale_rb_crcb: 16;
+ } bits;
+ uint32_t raw;
+ } fp_scale_bias_rb_crcb;
};
struct cursor_attributes_cfg {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 61c4d2a7db1c..500a601e99b5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -71,6 +71,125 @@ enum pixel_rate_div {
PIXEL_RATE_DIV_NA = 0xF
};
+struct dcn_dccg_reg_state {
+ uint32_t dc_mem_global_pwr_req_cntl;
+ uint32_t dccg_audio_dtbclk_dto_modulo;
+ uint32_t dccg_audio_dtbclk_dto_phase;
+ uint32_t dccg_audio_dto_source;
+ uint32_t dccg_audio_dto0_module;
+ uint32_t dccg_audio_dto0_phase;
+ uint32_t dccg_audio_dto1_module;
+ uint32_t dccg_audio_dto1_phase;
+ uint32_t dccg_cac_status;
+ uint32_t dccg_cac_status2;
+ uint32_t dccg_disp_cntl_reg;
+ uint32_t dccg_ds_cntl;
+ uint32_t dccg_ds_dto_incr;
+ uint32_t dccg_ds_dto_modulo;
+ uint32_t dccg_ds_hw_cal_interval;
+ uint32_t dccg_gate_disable_cntl;
+ uint32_t dccg_gate_disable_cntl2;
+ uint32_t dccg_gate_disable_cntl3;
+ uint32_t dccg_gate_disable_cntl4;
+ uint32_t dccg_gate_disable_cntl5;
+ uint32_t dccg_gate_disable_cntl6;
+ uint32_t dccg_global_fgcg_rep_cntl;
+ uint32_t dccg_gtc_cntl;
+ uint32_t dccg_gtc_current;
+ uint32_t dccg_gtc_dto_incr;
+ uint32_t dccg_gtc_dto_modulo;
+ uint32_t dccg_perfmon_cntl;
+ uint32_t dccg_perfmon_cntl2;
+ uint32_t dccg_soft_reset;
+ uint32_t dccg_test_clk_sel;
+ uint32_t dccg_vsync_cnt_ctrl;
+ uint32_t dccg_vsync_cnt_int_ctrl;
+ uint32_t dccg_vsync_otg0_latch_value;
+ uint32_t dccg_vsync_otg1_latch_value;
+ uint32_t dccg_vsync_otg2_latch_value;
+ uint32_t dccg_vsync_otg3_latch_value;
+ uint32_t dccg_vsync_otg4_latch_value;
+ uint32_t dccg_vsync_otg5_latch_value;
+ uint32_t dispclk_cgtt_blk_ctrl_reg;
+ uint32_t dispclk_freq_change_cntl;
+ uint32_t dp_dto_dbuf_en;
+ uint32_t dp_dto0_modulo;
+ uint32_t dp_dto0_phase;
+ uint32_t dp_dto1_modulo;
+ uint32_t dp_dto1_phase;
+ uint32_t dp_dto2_modulo;
+ uint32_t dp_dto2_phase;
+ uint32_t dp_dto3_modulo;
+ uint32_t dp_dto3_phase;
+ uint32_t dpiaclk_540m_dto_modulo;
+ uint32_t dpiaclk_540m_dto_phase;
+ uint32_t dpiaclk_810m_dto_modulo;
+ uint32_t dpiaclk_810m_dto_phase;
+ uint32_t dpiaclk_dto_cntl;
+ uint32_t dpiasymclk_cntl;
+ uint32_t dppclk_cgtt_blk_ctrl_reg;
+ uint32_t dppclk_ctrl;
+ uint32_t dppclk_dto_ctrl;
+ uint32_t dppclk0_dto_param;
+ uint32_t dppclk1_dto_param;
+ uint32_t dppclk2_dto_param;
+ uint32_t dppclk3_dto_param;
+ uint32_t dprefclk_cgtt_blk_ctrl_reg;
+ uint32_t dprefclk_cntl;
+ uint32_t dpstreamclk_cntl;
+ uint32_t dscclk_dto_ctrl;
+ uint32_t dscclk0_dto_param;
+ uint32_t dscclk1_dto_param;
+ uint32_t dscclk2_dto_param;
+ uint32_t dscclk3_dto_param;
+ uint32_t dtbclk_dto_dbuf_en;
+ uint32_t dtbclk_dto0_modulo;
+ uint32_t dtbclk_dto0_phase;
+ uint32_t dtbclk_dto1_modulo;
+ uint32_t dtbclk_dto1_phase;
+ uint32_t dtbclk_dto2_modulo;
+ uint32_t dtbclk_dto2_phase;
+ uint32_t dtbclk_dto3_modulo;
+ uint32_t dtbclk_dto3_phase;
+ uint32_t dtbclk_p_cntl;
+ uint32_t force_symclk_disable;
+ uint32_t hdmicharclk0_clock_cntl;
+ uint32_t hdmistreamclk_cntl;
+ uint32_t hdmistreamclk0_dto_param;
+ uint32_t microsecond_time_base_div;
+ uint32_t millisecond_time_base_div;
+ uint32_t otg_pixel_rate_div;
+ uint32_t otg0_phypll_pixel_rate_cntl;
+ uint32_t otg0_pixel_rate_cntl;
+ uint32_t otg1_phypll_pixel_rate_cntl;
+ uint32_t otg1_pixel_rate_cntl;
+ uint32_t otg2_phypll_pixel_rate_cntl;
+ uint32_t otg2_pixel_rate_cntl;
+ uint32_t otg3_phypll_pixel_rate_cntl;
+ uint32_t otg3_pixel_rate_cntl;
+ uint32_t phyasymclk_clock_cntl;
+ uint32_t phybsymclk_clock_cntl;
+ uint32_t phycsymclk_clock_cntl;
+ uint32_t phydsymclk_clock_cntl;
+ uint32_t phyesymclk_clock_cntl;
+ uint32_t phyplla_pixclk_resync_cntl;
+ uint32_t phypllb_pixclk_resync_cntl;
+ uint32_t phypllc_pixclk_resync_cntl;
+ uint32_t phyplld_pixclk_resync_cntl;
+ uint32_t phyplle_pixclk_resync_cntl;
+ uint32_t refclk_cgtt_blk_ctrl_reg;
+ uint32_t socclk_cgtt_blk_ctrl_reg;
+ uint32_t symclk_cgtt_blk_ctrl_reg;
+ uint32_t symclk_psp_cntl;
+ uint32_t symclk32_le_cntl;
+ uint32_t symclk32_se_cntl;
+ uint32_t symclka_clock_enable;
+ uint32_t symclkb_clock_enable;
+ uint32_t symclkc_clock_enable;
+ uint32_t symclkd_clock_enable;
+ uint32_t symclke_clock_enable;
+};
+
struct dccg {
struct dc_context *ctx;
const struct dccg_funcs *funcs;
@@ -81,7 +200,6 @@ struct dccg {
//int audio_dtbclk_khz;/* TODO needs to be removed */
//int ref_dtbclk_khz;/* TODO needs to be removed */
};
-
struct dtbclk_dto_params {
const struct dc_crtc_timing *timing;
int otg_inst;
@@ -214,6 +332,7 @@ struct dccg_funcs {
void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst, uint32_t num_slices_h);
void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating);
+ void (*dccg_read_reg_state)(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state);
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 843a18287c83..1ddfa30411c8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -137,6 +137,14 @@ struct dcn_hubbub_state {
uint32_t dram_state_cntl;
};
+struct dcn_hubbub_reg_state {
+ uint32_t det0_ctrl;
+ uint32_t det1_ctrl;
+ uint32_t det2_ctrl;
+ uint32_t det3_ctrl;
+ uint32_t compbuf_ctrl;
+};
+
struct hubbub_system_latencies {
uint32_t max_latency_ns;
uint32_t avg_latency_ns;
@@ -216,6 +224,8 @@ struct hubbub_funcs {
void (*init_watermarks)(struct hubbub *hubbub);
+ void (*hubbub_read_reg_state)(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state);
+
/**
* @program_det_size:
*
@@ -242,17 +252,39 @@ struct hubbub_funcs {
void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase);
void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst);
bool (*program_arbiter)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower);
- void (*get_det_sizes)(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes);
- uint32_t (*compbuf_config_error)(struct hubbub *hubbub);
- struct hubbub_perfmon_funcs{
- void (*start_system_latency_measurement)(struct hubbub *hubbub);
- void (*get_system_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, struct hubbub_system_latencies *latencies);
- void (*start_in_order_bandwidth_measurement)(struct hubbub *hubbub);
- void (*get_in_order_bandwidth_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *bandwidth_mbps);
- void (*start_urgent_ramp_latency_measurement)(struct hubbub *hubbub, const struct hubbub_urgent_latency_params *params);
- void (*get_urgent_ramp_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *latency_ns);
+ void (*dchvm_init)(struct hubbub *hubbub);
+
+ struct hubbub_perfmon_funcs {
void (*reset)(struct hubbub *hubbub);
+ void (*start_measuring_max_memory_latency_ns)(
+ struct hubbub *hubbub);
+ uint32_t (*get_max_memory_latency_ns)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t *sample_count);
+ void (*start_measuring_average_memory_latency_ns)(
+ struct hubbub *hubbub);
+ uint32_t (*get_average_memory_latency_ns)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t *sample_count);
+ void (*start_measuring_urgent_ramp_latency_ns)(
+ struct hubbub *hubbub,
+ const struct hubbub_urgent_latency_params *params);
+ uint32_t (*get_urgent_ramp_latency_ns)(struct hubbub *hubbub,
+ uint32_t refclk_mhz);
+ void (*start_measuring_unbounded_bandwidth_mbps)(
+ struct hubbub *hubbub);
+ uint32_t (*get_unbounded_bandwidth_mbps)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t *duration_ns);
+ void (*start_measuring_average_bandwidth_mbps)(
+ struct hubbub *hubbub);
+ uint32_t (*get_average_bandwidth_mbps)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t min_duration_ns,
+ uint32_t *duration_ns);
} perfmon;
+
+ struct hubbub_qos_funcs {
+ void (*force_display_nominal_profile)(struct hubbub *hubbub);
+ void (*force_display_urgent_profile)(struct hubbub *hubbub);
+ void (*reset_display_qos_profile)(struct hubbub *hubbub);
+ } qos;
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 1b7c085dc2cc..d88b57d4f512 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -65,7 +65,6 @@ union defer_reg_writes {
} bits;
uint32_t raw;
};
-
struct dpp {
const struct dpp_funcs *funcs;
struct dc_context *ctx;
@@ -84,6 +83,7 @@ struct dpp {
struct pwl_params shaper_params;
bool cm_bypass_mode;
+ bool cursor_offload;
struct cursor_position_cache_dpp pos;
struct cursor_attribute_cache_dpp att;
@@ -202,6 +202,19 @@ struct dcn_dpp_state {
uint32_t gamcor_mode;
};
+struct dcn_dpp_reg_state {
+ uint32_t recout_start;
+ uint32_t recout_size;
+ uint32_t scl_horz_filter_scale_ratio;
+ uint32_t scl_vert_filter_scale_ratio;
+ uint32_t scl_mode;
+ uint32_t cm_control;
+ uint32_t dpp_control;
+ uint32_t dscl_control;
+ uint32_t obuf_control;
+ uint32_t mpc_size;
+};
+
struct CM_bias_params {
uint32_t cm_bias_cr_r;
uint32_t cm_bias_y_g;
@@ -225,6 +238,8 @@ struct dpp_funcs {
void (*dpp_read_state)(struct dpp *dpp, struct dcn_dpp_state *s);
+ void (*dpp_read_reg_state)(struct dpp *dpp, struct dcn_dpp_reg_state *dpp_reg_state);
+
void (*dpp_reset)(struct dpp *dpp);
void (*dpp_set_scaler)(struct dpp *dpp,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 2b874d2cc61c..a79019365af8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -41,8 +41,8 @@
#include "mem_input.h"
#include "cursor_reg_cache.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
-#include "dml2/dml21/inc/dml_top_types.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_types.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
@@ -126,11 +126,13 @@ struct hubp {
int mpcc_id;
struct dc_cursor_attributes curs_attr;
struct dc_cursor_position curs_pos;
+ bool cursor_offload;
bool power_gated;
struct cursor_position_cache_hubp pos;
struct cursor_attribute_cache_hubp att;
struct cursor_rect cur_rect;
+ bool use_mall_for_cursor;
};
struct surface_flip_registers {
@@ -236,6 +238,7 @@ struct hubp_funcs {
void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
void (*hubp_read_state)(struct hubp *hubp);
+ void (*hubp_read_reg_state)(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state);
void (*hubp_clear_underflow)(struct hubp *hubp);
void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 41c76ba9ba56..a61d12ec61bc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -44,12 +44,67 @@
*/
#define MAX_PIPES 6
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
-#define MAX_LINKS (MAX_PIPES * 2 +2)
+
+#define MAX_DPIA 6
+#define MAX_CONNECTOR 6
+#define MAX_VIRTUAL_LINKS 4
+
+#define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS)
+
+/**
+ * define MAX_DIG_LINK_ENCODERS - maximum number of digital encoders
+ *
+ * Digital encoders are ENGINE_ID_DIGA...G, there are at most 7,
+ * although not every GPU may have that many.
+ */
#define MAX_DIG_LINK_ENCODERS 7
+
+/**
+ * define MAX_DAC_LINK_ENCODERS - maximum number of analog link encoders
+ *
+ * Analog encoders are ENGINE_ID_DACA/B, there are at most 2,
+ * although not every GPU may have that many. Modern GPUs typically
+ * don't have analog encoders.
+ */
+#define MAX_DAC_LINK_ENCODERS 2
+
+/**
+ * define MAX_LINK_ENCODERS - maximum number link encoders in total
+ *
+ * This includes both analog and digital encoders.
+ */
+#define MAX_LINK_ENCODERS (MAX_DIG_LINK_ENCODERS + MAX_DAC_LINK_ENCODERS)
+
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4
#define MAX_HPO_DP2_LINK_ENCODERS 4
+/* Pipe topology snapshot structures */
+#define MAX_TOPOLOGY_SNAPSHOTS 4
+
+struct pipe_topology_line {
+ bool is_phantom_pipe;
+ int plane_idx;
+ int slice_idx;
+ int stream_idx;
+ int dpp_inst;
+ int opp_inst;
+ int tg_inst;
+};
+
+struct pipe_topology_snapshot {
+ struct pipe_topology_line pipe_log_lines[MAX_PIPES];
+ int line_count;
+ uint64_t timestamp_us;
+ int stream_count;
+ int phantom_stream_count;
+};
+
+struct pipe_topology_history {
+ struct pipe_topology_snapshot snapshots[MAX_TOPOLOGY_SNAPSHOTS];
+ int current_snapshot_index;
+};
+
struct gamma_curve {
uint32_t offset;
uint32_t segments_num;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 08c16ba52a51..df512920a9fa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -47,6 +47,7 @@ struct encoder_init_data {
enum hpd_source_id hpd_source;
/* TODO: in DAL2, here was pointer to EventManagerInterface */
struct graphics_object_id encoder;
+ enum engine_id analog_engine;
struct dc_context *ctx;
enum transmitter transmitter;
};
@@ -83,6 +84,7 @@ struct link_encoder {
struct graphics_object_id connector;
uint32_t output_signals;
enum engine_id preferred_engine;
+ enum engine_id analog_engine;
struct encoder_feature_support features;
enum transmitter transmitter;
enum hpd_source_id hpd_source;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 42fbc70f7056..d468bc85566a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -29,7 +29,7 @@
#include "include/grph_object_id.h"
#include "dml/display_mode_structs.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
struct dchub_init_data;
struct cstate_pstate_watermarks_st {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 22960ee03dee..a8d1abe20f62 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -350,6 +350,15 @@ struct mpcc_state {
struct mpc_rmcm_regs rmcm_regs;
};
+struct dcn_mpc_reg_state {
+ uint32_t mpcc_bot_sel;
+ uint32_t mpcc_control;
+ uint32_t mpcc_status;
+ uint32_t mpcc_top_sel;
+ uint32_t mpcc_opp_id;
+ uint32_t mpcc_ogam_control;
+};
+
/**
* struct mpc_funcs - funcs
*/
@@ -373,6 +382,24 @@ struct mpc_funcs {
struct mpc *mpc,
int mpcc_inst,
struct mpcc_state *s);
+ /**
+ * @mpc_read_reg_state:
+ *
+ * Read MPC register state for debugging underflow purposes.
+ *
+ * Parameters:
+ *
+ * - [in] mpc - MPC context
+ * - [out] reg_state - MPC register state structure
+ *
+ * Return:
+ *
+ * void
+ */
+ void (*mpc_read_reg_state)(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct dcn_mpc_reg_state *mpc_reg_state);
/**
* @insert_plane:
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 747679cb4944..e1428a83ecbc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -297,6 +297,16 @@ struct oppbuf_params {
uint32_t num_segment_padded_pixels;
};
+struct dcn_opp_reg_state {
+ uint32_t dpg_control;
+ uint32_t fmt_control;
+ uint32_t oppbuf_control;
+ uint32_t opp_pipe_control;
+ uint32_t opp_pipe_crc_control;
+ uint32_t opp_abm_control;
+ uint32_t dscrm_dsc_forward_config;
+};
+
struct opp_funcs {
@@ -368,6 +378,9 @@ struct opp_funcs {
struct output_pixel_processor *opp,
enum dc_pixel_encoding pixel_encoding,
bool is_primary);
+
+ void (*opp_read_reg_state)(
+ struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index f2de2cf23859..da7bf59c4b9d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -175,6 +175,135 @@ struct dcn_otg_state {
uint32_t otg_double_buffer_control;
};
+struct dcn_optc_reg_state {
+ uint32_t optc_bytes_per_pixel;
+ uint32_t optc_data_format_control;
+ uint32_t optc_data_source_select;
+ uint32_t optc_input_clock_control;
+ uint32_t optc_input_global_control;
+ uint32_t optc_input_spare_register;
+ uint32_t optc_memory_config;
+ uint32_t optc_rsmu_underflow;
+ uint32_t optc_underflow_threshold;
+ uint32_t optc_width_control;
+
+ uint32_t otg_3d_structure_control;
+ uint32_t otg_clock_control;
+ uint32_t otg_control;
+ uint32_t otg_count_control;
+ uint32_t otg_count_reset;
+ uint32_t otg_crc_cntl;
+ uint32_t otg_crc_sig_blue_control_mask;
+ uint32_t otg_crc_sig_red_green_mask;
+ uint32_t otg_crc0_data_b;
+ uint32_t otg_crc0_data_rg;
+ uint32_t otg_crc0_windowa_x_control;
+ uint32_t otg_crc0_windowa_x_control_readback;
+ uint32_t otg_crc0_windowa_y_control;
+ uint32_t otg_crc0_windowa_y_control_readback;
+ uint32_t otg_crc0_windowb_x_control;
+ uint32_t otg_crc0_windowb_x_control_readback;
+ uint32_t otg_crc0_windowb_y_control;
+ uint32_t otg_crc0_windowb_y_control_readback;
+ uint32_t otg_crc1_data_b;
+ uint32_t otg_crc1_data_rg;
+ uint32_t otg_crc1_windowa_x_control;
+ uint32_t otg_crc1_windowa_x_control_readback;
+ uint32_t otg_crc1_windowa_y_control;
+ uint32_t otg_crc1_windowa_y_control_readback;
+ uint32_t otg_crc1_windowb_x_control;
+ uint32_t otg_crc1_windowb_x_control_readback;
+ uint32_t otg_crc1_windowb_y_control;
+ uint32_t otg_crc1_windowb_y_control_readback;
+ uint32_t otg_crc2_data_b;
+ uint32_t otg_crc2_data_rg;
+ uint32_t otg_crc3_data_b;
+ uint32_t otg_crc3_data_rg;
+ uint32_t otg_dlpc_control;
+ uint32_t otg_double_buffer_control;
+ uint32_t otg_drr_control2;
+ uint32_t otg_drr_control;
+ uint32_t otg_drr_timing_int_status;
+ uint32_t otg_drr_trigger_window;
+ uint32_t otg_drr_v_total_change;
+ uint32_t otg_drr_v_total_reach_range;
+ uint32_t otg_dsc_start_position;
+ uint32_t otg_force_count_now_cntl;
+ uint32_t otg_global_control0;
+ uint32_t otg_global_control1;
+ uint32_t otg_global_control2;
+ uint32_t otg_global_control3;
+ uint32_t otg_global_control4;
+ uint32_t otg_global_sync_status;
+ uint32_t otg_gsl_control;
+ uint32_t otg_gsl_vsync_gap;
+ uint32_t otg_gsl_window_x;
+ uint32_t otg_gsl_window_y;
+ uint32_t otg_h_blank_start_end;
+ uint32_t otg_h_sync_a;
+ uint32_t otg_h_sync_a_cntl;
+ uint32_t otg_h_timing_cntl;
+ uint32_t otg_h_total;
+ uint32_t otg_interlace_control;
+ uint32_t otg_interlace_status;
+ uint32_t otg_interrupt_control;
+ uint32_t otg_long_vblank_status;
+ uint32_t otg_m_const_dto0;
+ uint32_t otg_m_const_dto1;
+ uint32_t otg_manual_force_vsync_next_line;
+ uint32_t otg_master_en;
+ uint32_t otg_master_update_lock;
+ uint32_t otg_master_update_mode;
+ uint32_t otg_nom_vert_position;
+ uint32_t otg_pipe_update_status;
+ uint32_t otg_pixel_data_readback0;
+ uint32_t otg_pixel_data_readback1;
+ uint32_t otg_request_control;
+ uint32_t otg_snapshot_control;
+ uint32_t otg_snapshot_frame;
+ uint32_t otg_snapshot_position;
+ uint32_t otg_snapshot_status;
+ uint32_t otg_spare_register;
+ uint32_t otg_static_screen_control;
+ uint32_t otg_status;
+ uint32_t otg_status_frame_count;
+ uint32_t otg_status_hv_count;
+ uint32_t otg_status_position;
+ uint32_t otg_status_vf_count;
+ uint32_t otg_stereo_control;
+ uint32_t otg_stereo_force_next_eye;
+ uint32_t otg_stereo_status;
+ uint32_t otg_trig_manual_control;
+ uint32_t otg_triga_cntl;
+ uint32_t otg_triga_manual_trig;
+ uint32_t otg_trigb_cntl;
+ uint32_t otg_trigb_manual_trig;
+ uint32_t otg_update_lock;
+ uint32_t otg_v_blank_start_end;
+ uint32_t otg_v_count_stop_control;
+ uint32_t otg_v_count_stop_control2;
+ uint32_t otg_v_sync_a;
+ uint32_t otg_v_sync_a_cntl;
+ uint32_t otg_v_total;
+ uint32_t otg_v_total_control;
+ uint32_t otg_v_total_int_status;
+ uint32_t otg_v_total_max;
+ uint32_t otg_v_total_mid;
+ uint32_t otg_v_total_min;
+ uint32_t otg_vert_sync_control;
+ uint32_t otg_vertical_interrupt0_control;
+ uint32_t otg_vertical_interrupt0_position;
+ uint32_t otg_vertical_interrupt1_control;
+ uint32_t otg_vertical_interrupt1_position;
+ uint32_t otg_vertical_interrupt2_control;
+ uint32_t otg_vertical_interrupt2_position;
+ uint32_t otg_vready_param;
+ uint32_t otg_vstartup_param;
+ uint32_t otg_vsync_nom_int_status;
+ uint32_t otg_vupdate_keepout;
+ uint32_t otg_vupdate_param;
+};
+
/**
* struct timing_generator - Entry point to Output Timing Generator feature.
*/
@@ -381,6 +510,7 @@ struct timing_generator_funcs {
void (*set_vupdate_keepout)(struct timing_generator *tg, bool enable);
bool (*wait_update_lock_status)(struct timing_generator *tg, bool locked);
void (*read_otg_state)(struct timing_generator *tg, struct dcn_otg_state *s);
+ void (*optc_read_reg_state)(struct timing_generator *tg, struct dcn_optc_reg_state *optc_reg_state);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_service.h b/drivers/gpu/drm/amd/display/dc/inc/link_service.h
index 1e34e84160aa..6f94e48a24d1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_service.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_service.h
@@ -292,12 +292,12 @@ struct link_service {
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
bool (*edp_set_coasting_vtotal)(
- struct dc_link *link, uint32_t coasting_vtotal);
+ struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number);
bool (*edp_replay_residency)(const struct dc_link *link,
unsigned int *residency, const bool is_start,
const enum pr_residency_mode mode);
bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
- const unsigned int *power_opts, uint32_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number);
bool (*edp_wait_for_t12)(struct dc_link *link);
bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 4e26a16a8743..79746d931471 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -49,6 +49,7 @@ struct resource_caps {
int num_video_plane;
int num_audio;
int num_stream_encoder;
+ int num_analog_stream_encoder;
int num_pll;
int num_dwb;
int num_ddc;
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 9e33bf937a69..1045c268672e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -78,6 +78,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
struct audio_output audio_output[MAX_PIPES];
struct dc_stream_state *streams_on_link[MAX_PIPES];
int num_streams_on_link = 0;
+ struct dc *dc = (struct dc *)link->dc;
needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
@@ -150,7 +151,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {
stream_update.stream = streams_on_link[i];
stream_update.dpms_off = &dpms_off;
- dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, streams_on_link[i], &stream_update);
+ dc_update_planes_and_stream(dc, NULL, 0, streams_on_link[i], &stream_update);
}
}
}
@@ -876,7 +877,7 @@ bool dp_set_test_pattern(
return false;
if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) {
- if (should_use_dmub_lock(pipe_ctx->stream->link)) {
+ if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -924,7 +925,7 @@ bool dp_set_test_pattern(
CRTC_STATE_VACTIVE);
if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) {
- if (should_use_dmub_lock(pipe_ctx->stream->link)) {
+ if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index 892907991f91..befa67b2b2ae 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -58,8 +58,9 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
return;
}
- link_enc->funcs->connect_dig_be_to_fe(link_enc,
- pipe_ctx->stream_res.stream_enc->id, true);
+ if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
+ link_enc->funcs->connect_dig_be_to_fe(link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
@@ -98,10 +99,13 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
if (stream_enc->funcs->enable_stream)
stream_enc->funcs->enable_stream(stream_enc,
pipe_ctx->stream->signal, false);
- link_enc->funcs->connect_dig_be_to_fe(
- link_enc,
- pipe_ctx->stream_res.stream_enc->id,
- false);
+
+ if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
+ link_enc->funcs->connect_dig_be_to_fe(
+ link_enc,
+ pipe_ctx->stream_res.stream_enc->id,
+ false);
+
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(
pipe_ctx->stream->link,
@@ -115,7 +119,8 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- if (!dc_is_virtual_signal(stream->signal))
+ if (!dc_is_virtual_signal(stream->signal) &&
+ !dc_is_rgb_signal(stream->signal))
stream_encoder->funcs->setup_stereo_sync(
stream_encoder,
pipe_ctx->stream_res.tg->inst,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 85303167a553..6d31f4967f1a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -270,6 +270,10 @@ static void read_scdc_caps(struct ddc_service *ddc_service,
uint8_t slave_address = HDMI_SCDC_ADDRESS;
uint8_t offset = HDMI_SCDC_MANUFACTURER_OUI;
+ if (ddc_service->link->local_sink &&
+ !ddc_service->link->local_sink->edid_caps.scdc_present)
+ return;
+
link_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), sink->scdc_caps.manufacturer_OUI.byte,
sizeof(sink->scdc_caps.manufacturer_OUI.byte));
@@ -858,6 +862,94 @@ static void verify_link_capability(struct dc_link *link, struct dc_sink *sink,
verify_link_capability_non_destructive(link);
}
+/**
+ * link_detect_evaluate_edid_header() - Evaluate if an EDID header is acceptable.
+ *
+ * Evaluates an 8-byte EDID header to check if it's good enough
+ * for the purpose of determining whether a display is connected
+ * without reading the full EDID.
+ *
+ * @edid_header: The first 8 bytes of the EDID read from DDC.
+ *
+ * Return: true if the header looks valid (>= 6 of 8 bytes match the
+ * expected 00/FF pattern), false otherwise.
+ */
+static bool link_detect_evaluate_edid_header(uint8_t edid_header[8])
+{
+ int edid_header_score = 0;
+ int i;
+
+ for (i = 0; i < 8; ++i)
+ edid_header_score += edid_header[i] == ((i == 0 || i == 7) ? 0x00 : 0xff);
+
+ return edid_header_score >= 6;
+}
+
+/**
+ * link_detect_ddc_probe() - Probe the DDC to see if a display is connected.
+ *
+ * Detect whether a display is connected to DDC without reading full EDID.
+ * Reads only the EDID header (the first 8 bytes of EDID) from DDC and
+ * evaluates whether that matches.
+ *
+ * @link: DC link whose DDC/I2C is probed for the EDID header.
+ *
+ * Return: true if the EDID header was read and passes validation,
+ * false otherwise.
+ */
+static bool link_detect_ddc_probe(struct dc_link *link)
+{
+ if (!link->ddc)
+ return false;
+
+ uint8_t edid_header[8] = {0};
+ bool ddc_probed = i2c_read(link->ddc, 0x50, edid_header, sizeof(edid_header));
+
+ if (!ddc_probed)
+ return false;
+
+ if (!link_detect_evaluate_edid_header(edid_header))
+ return false;
+
+ return true;
+}
+
+/**
+ * link_detect_dac_load_detect() - Performs DAC load detection.
+ *
+ * Load detection can be used to detect the presence of an
+ * analog display when we can't read DDC. This causes a visible
+ * visual glitch so it should be used sparingly.
+ *
+ * @link: DC link to test using the DAC load-detect path.
+ *
+ * Return: true if the VBIOS load-detect call reports OK, false
+ * otherwise.
+ */
+static bool link_detect_dac_load_detect(struct dc_link *link)
+{
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct link_encoder *link_enc = link->link_enc;
+ enum engine_id engine_id = link_enc->preferred_engine;
+ enum dal_device_type device_type = DEVICE_TYPE_CRT;
+ enum bp_result bp_result;
+ uint32_t enum_id;
+
+ switch (engine_id) {
+ case ENGINE_ID_DACB:
+ enum_id = 2;
+ break;
+ case ENGINE_ID_DACA:
+ default:
+ engine_id = ENGINE_ID_DACA;
+ enum_id = 1;
+ break;
+ }
+
+ bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id);
+ return bp_result == BP_RESULT_OK;
+}
+
/*
* detect_link_and_local_sink() - Detect if a sink is attached to a given link
*
@@ -942,6 +1034,12 @@ static bool detect_link_and_local_sink(struct dc_link *link,
break;
}
+ case SIGNAL_TYPE_RGB: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_RGB;
+ break;
+ }
+
case SIGNAL_TYPE_LVDS: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_LVDS;
@@ -1066,7 +1164,30 @@ static bool detect_link_and_local_sink(struct dc_link *link,
DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n");
break;
case EDID_NO_RESPONSE:
+ /* Analog connectors without EDID:
+ * - old monitor that actually doesn't have EDID
+ * - cheap DVI-A cable or adapter that doesn't connect DDC
+ */
+ if (dc_connector_supports_analog(link->link_id.id)) {
+ /* If we didn't do DAC load detection yet, do it now
+ * to verify there really is a display connected.
+ */
+ if (link->type != dc_connection_dac_load &&
+ !link_detect_dac_load_detect(link)) {
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ link_disconnect_sink(link);
+ return false;
+ }
+
+ DC_LOG_INFO("%s detected analog display without EDID\n", __func__);
+ link->type = dc_connection_dac_load;
+ sink->edid_caps.analog = true;
+ break;
+ }
+
DC_LOG_ERROR("No EDID read.\n");
+
/*
* Abort detection for non-DP connectors if we have
* no EDID
@@ -1133,14 +1254,23 @@ static bool detect_link_and_local_sink(struct dc_link *link,
sink = prev_sink;
prev_sink = NULL;
}
- query_hdcp_capability(sink->sink_signal, link);
+
+ if (!sink->edid_caps.analog)
+ query_hdcp_capability(sink->sink_signal, link);
}
+ /* DVI-I connector connected to analog display. */
+ if ((link->link_id.id == CONNECTOR_ID_DUAL_LINK_DVII ||
+ link->link_id.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
+ sink->edid_caps.analog)
+ sink->sink_signal = SIGNAL_TYPE_RGB;
+
/* HDMI-DVI Dongle */
if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
!sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
else if (dc_is_dvi_signal(sink->sink_signal) &&
+ dc_is_dvi_signal(link->connector_signal) &&
aud_support->hdmi_audio_native &&
sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
@@ -1232,6 +1362,36 @@ static bool detect_link_and_local_sink(struct dc_link *link,
return true;
}
+/**
+ * link_detect_analog() - Determines if an analog sink is connected.
+ *
+ * @link: DC link to evaluate (must support analog signalling).
+ * @type: Updated with the detected connection type:
+ * dc_connection_single (analog via DDC),
+ * dc_connection_dac_load (via load-detect),
+ * or dc_connection_none.
+ *
+ * Return: true if detection completed.
+ */
+static bool link_detect_analog(struct dc_link *link, enum dc_connection_type *type)
+{
+ /* Don't care about connectors that don't support an analog signal. */
+ ASSERT(dc_connector_supports_analog(link->link_id.id));
+
+ if (link_detect_ddc_probe(link)) {
+ *type = dc_connection_single;
+ return true;
+ }
+
+ if (link_detect_dac_load_detect(link)) {
+ *type = dc_connection_dac_load;
+ return true;
+ }
+
+ *type = dc_connection_none;
+ return true;
+}
+
/*
* link_detect_connection_type() - Determine if there is a sink connected
*
@@ -1248,6 +1408,17 @@ bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type *
return true;
}
+ /* Ignore the HPD pin (if any) for analog connectors.
+ * Instead rely on DDC and DAC.
+ *
+ * - VGA connectors don't have any HPD at all.
+ * - Some DVI-A cables don't connect the HPD pin.
+ * - Some DVI-A cables pull up the HPD pin.
+ * (So it's high even when no display is connected.)
+ */
+ if (dc_connector_supports_analog(link->link_id.id))
+ return link_detect_analog(link, type);
+
if (link->connector_signal == SIGNAL_TYPE_EDP) {
/*in case it is not on*/
if (!link->dc->config.edp_no_power_sequencing)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 83419e1a9036..6ae134147617 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -841,6 +841,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
if (should_use_dto_dscclk)
dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
@@ -970,6 +971,7 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
dsc_cfg.color_depth = stream->timing.display_color_depth;
dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps));
@@ -2224,7 +2226,11 @@ static enum dc_status enable_link(
{
enum dc_status status = DC_ERROR_UNEXPECTED;
struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
+ struct dc_link *link = NULL;
+
+ if (stream == NULL)
+ return DC_ERROR_UNEXPECTED;
+ link = stream->link;
/* There's some scenarios where driver is unloaded with display
* still enabled. When driver is reloaded, it may cause a display
@@ -2256,6 +2262,9 @@ static enum dc_status enable_link(
enable_link_lvds(pipe_ctx);
status = DC_OK;
break;
+ case SIGNAL_TYPE_RGB:
+ status = DC_OK;
+ break;
case SIGNAL_TYPE_VIRTUAL:
status = enable_link_virtual(pipe_ctx);
break;
@@ -2458,6 +2467,7 @@ void link_set_dpms_on(
struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
bool apply_edp_fast_boot_optimization =
pipe_ctx->stream->apply_edp_fast_boot_optimization;
@@ -2502,6 +2512,8 @@ void link_set_dpms_on(
pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
}
+ link_hwss->setup_stream_attribute(pipe_ctx);
+
pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
// Enable VPG before building infoframe
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 31a73867cd4c..a6e2b0821969 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -451,6 +451,46 @@ static enum channel_id get_ddc_line(struct dc_link *link)
return channel;
}
+static enum engine_id find_analog_engine(struct dc_link *link)
+{
+ struct dc_bios *bp = link->ctx->dc_bios;
+ struct graphics_object_id encoder = {0};
+ enum bp_result bp_result = BP_RESULT_OK;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ bp_result = bp->funcs->get_src_obj(bp, link->link_id, i, &encoder);
+
+ if (bp_result != BP_RESULT_OK)
+ return ENGINE_ID_UNKNOWN;
+
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ return ENGINE_ID_DACA;
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return ENGINE_ID_DACB;
+ }
+ }
+
+ return ENGINE_ID_UNKNOWN;
+}
+
+static bool transmitter_supported(const enum transmitter transmitter)
+{
+ return transmitter != TRANSMITTER_UNKNOWN &&
+ transmitter != TRANSMITTER_NUTMEG_CRT &&
+ transmitter != TRANSMITTER_TRAVIS_CRT &&
+ transmitter != TRANSMITTER_TRAVIS_LCD;
+}
+
+static bool analog_engine_supported(const enum engine_id engine_id)
+{
+ return engine_id == ENGINE_ID_DACA ||
+ engine_id == ENGINE_ID_DACB;
+}
+
static bool construct_phy(struct dc_link *link,
const struct link_init_data *init_params)
{
@@ -482,10 +522,23 @@ static bool construct_phy(struct dc_link *link,
link->link_id =
bios->funcs->get_connector_id(bios, init_params->connector_index);
+ /* Determine early if the link has any supported encoders,
+ * so that we avoid initializing DDC and HPD, etc.
+ */
+ bp_funcs->get_src_obj(bios, link->link_id, 0, &enc_init_data.encoder);
+ enc_init_data.transmitter = translate_encoder_to_transmitter(enc_init_data.encoder);
+ enc_init_data.analog_engine = find_analog_engine(link);
+
link->ep_type = DISPLAY_ENDPOINT_PHY;
DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
+ if (!transmitter_supported(enc_init_data.transmitter) &&
+ !analog_engine_supported(enc_init_data.analog_engine)) {
+ DC_LOG_WARNING("link_id %d has unsupported encoder\n", link->link_id.id);
+ goto unsupported_fail;
+ }
+
if (bios->funcs->get_disp_connector_caps_info) {
bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info);
link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY;
@@ -530,6 +583,9 @@ static bool construct_phy(struct dc_link *link,
case CONNECTOR_ID_DUAL_LINK_DVII:
link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
break;
+ case CONNECTOR_ID_VGA:
+ link->connector_signal = SIGNAL_TYPE_RGB;
+ break;
case CONNECTOR_ID_DISPLAY_PORT:
case CONNECTOR_ID_MXM:
case CONNECTOR_ID_USBC:
@@ -611,16 +667,12 @@ static bool construct_phy(struct dc_link *link,
dal_ddc_get_line(get_ddc_pin(link->ddc));
enc_init_data.ctx = dc_ctx;
- bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
- &enc_init_data.encoder);
enc_init_data.connector = link->link_id;
enc_init_data.channel = get_ddc_line(link);
enc_init_data.hpd_source = get_hpd_line(link);
link->hpd_src = enc_init_data.hpd_source;
- enc_init_data.transmitter =
- translate_encoder_to_transmitter(enc_init_data.encoder);
link->link_enc =
link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data);
@@ -735,6 +787,7 @@ static bool construct_phy(struct dc_link *link,
link->psr_settings.psr_vtotal_control_support = false;
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED;
DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
return true;
@@ -753,6 +806,7 @@ create_fail:
link->hpd_gpio = NULL;
}
+unsupported_fail:
DC_LOG_DC("BIOS object table - %s failed.\n", __func__);
return false;
}
@@ -816,9 +870,7 @@ static bool construct_dpia(struct dc_link *link,
/* TODO: Create link encoder */
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
-
- /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */
- link->wa_flags.dp_mot_reset_segment = true;
+ link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
index 267180e7bc48..5d2bcce2f669 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
@@ -549,7 +549,8 @@ void write_scdc_data(struct ddc_service *ddc_service,
/*Lower than 340 Scramble bit from SCDC caps*/
if (ddc_service->link->local_sink &&
- ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ (ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite ||
+ !ddc_service->link->local_sink->edid_caps.scdc_present))
return;
link_query_ddc_data(ddc_service, slave_address, &offset,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index b12c11bd6a14..ad90a0106938 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -357,7 +357,9 @@ bool dp_should_enable_fec(const struct dc_link *link)
{
bool force_disable = false;
- if (link->fec_state == dc_link_fec_enabled)
+ if (link->dc->debug.disable_fec)
+ force_disable = true;
+ else if (link->fec_state == dc_link_fec_enabled)
force_disable = false;
else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
link->local_sink &&
@@ -424,6 +426,21 @@ static enum dc_link_rate get_link_rate_from_max_link_bw(
return link_rate;
}
+static enum dc_lane_count get_lttpr_max_lane_count(struct dc_link *link)
+{
+ enum dc_lane_count lttpr_max_lane_count = LANE_COUNT_UNKNOWN;
+
+ if (link->dpcd_caps.lttpr_caps.max_lane_count <= LANE_COUNT_DP_MAX)
+ lttpr_max_lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+
+ /* if bw_allocation is enabled and nrd_max_lane_count is set, use it */
+ if (link->dpia_bw_alloc_config.bw_alloc_enabled &&
+ link->dpia_bw_alloc_config.nrd_max_lane_count > 0)
+ lttpr_max_lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
+
+ return lttpr_max_lane_count;
+}
+
static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
{
@@ -438,6 +455,11 @@ static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
break;
}
+ /* if bw_allocation is enabled and nrd_max_link_rate is set, use it */
+ if (link->dpia_bw_alloc_config.bw_alloc_enabled &&
+ link->dpia_bw_alloc_config.nrd_max_link_rate > 0)
+ lttpr_max_link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
+
if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20)
lttpr_max_link_rate = LINK_RATE_UHBR20;
else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5)
@@ -1691,7 +1713,7 @@ static bool retrieve_link_cap(struct dc_link *link)
union edp_configuration_cap edp_config_cap;
union dp_downstream_port_present ds_port = { 0 };
enum dc_status status = DC_ERROR_UNEXPECTED;
- uint32_t read_dpcd_retry_cnt = 3;
+ uint32_t read_dpcd_retry_cnt = 20;
int i;
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
const uint32_t post_oui_delay = 30; // 30ms
@@ -1734,12 +1756,13 @@ static bool retrieve_link_cap(struct dc_link *link)
}
dpcd_set_source_specific_data(link);
- /* Sink may need to configure internals based on vendor, so allow some
- * time before proceeding with possibly vendor specific transactions
- */
- msleep(post_oui_delay);
for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ /*
+ * Sink may need to configure internals based on vendor, so allow some
+ * time before proceeding with possibly vendor specific transactions
+ */
+ msleep(post_oui_delay);
status = core_link_read_dpcd(
link,
DP_DPCD_REV,
@@ -1845,6 +1868,12 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.is_mst_capable = read_is_mst_supported(link);
DC_LOG_DC("%s: MST_Support: %s\n", __func__, str_yes_no(link->dpcd_caps.is_mst_capable));
+ /* Some MST docks seem to NAK I2C writes to segment pointer with mot=0. */
+ if (link->dpcd_caps.is_mst_capable)
+ link->wa_flags.dp_mot_reset_segment = true;
+ else
+ link->wa_flags.dp_mot_reset_segment = false;
+
get_active_converter_info(ds_port.byte, link);
dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
@@ -2063,6 +2092,11 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw,
sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw));
+ core_link_read_dpcd(link,
+ DP_PANEL_REPLAY_CAPABILITY_SUPPORT,
+ &link->dpcd_caps.pr_caps_supported.raw,
+ sizeof(link->dpcd_caps.pr_caps_supported.raw));
+
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
if (status != DC_OK)
@@ -2241,6 +2275,7 @@ const struct dc_link_settings *dp_get_verified_link_cap(
struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
{
struct dc_link_settings max_link_cap = {0};
+ enum dc_lane_count lttpr_max_lane_count;
enum dc_link_rate lttpr_max_link_rate;
enum dc_link_rate cable_max_link_rate;
struct resource_context *res_ctx = &link->dc->current_state->res_ctx;
@@ -2305,8 +2340,11 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
/* Some LTTPR devices do not report valid DPCD revisions, if so, do not take it's link cap into consideration. */
if (link->dpcd_caps.lttpr_caps.revision.raw >= DPCD_REV_14) {
- if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
- max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+ lttpr_max_lane_count = get_lttpr_max_lane_count(link);
+
+ if (lttpr_max_lane_count < max_link_cap.lane_count)
+ max_link_cap.lane_count = lttpr_max_lane_count;
+
lttpr_max_link_rate = get_lttpr_max_link_rate(link);
if (lttpr_max_link_rate < max_link_cap.link_rate)
@@ -2412,6 +2450,11 @@ bool dp_verify_link_cap_with_retries(
dp_trace_detect_lt_init(link);
+ DC_LOG_HW_LINK_TRAINING("%s: Link[%d] LinkRate=0x%x LaneCount=%d",
+ __func__, link->link_index,
+ known_limit_link_setting->link_rate,
+ known_limit_link_setting->lane_count);
+
if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
link->dc->debug.usbc_combo_phy_reset_wa)
apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting);
@@ -2448,6 +2491,11 @@ bool dp_verify_link_cap_with_retries(
dp_trace_lt_fail_count_update(link, fail_count, true);
dp_trace_set_lt_end_timestamp(link, true);
+ DC_LOG_HW_LINK_TRAINING("%s: Link[%d] Exit. is_success=%d fail_count=%d",
+ __func__, link->link_index,
+ success,
+ fail_count);
+
return success;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 8a3c18ae97a7..c958d3f600c8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -225,11 +225,6 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
bool ret = false;
uint8_t val;
- if (link->dc->debug.dpia_debug.bits.enable_bw_allocation_mode == false) {
- DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode disabled", __func__, link->link_index);
- return false;
- }
-
val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
@@ -273,17 +268,28 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
*/
void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status)
{
- link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
-
if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) {
DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)",
__func__, link->link_index);
- } else if (status & DP_TUNNELING_BW_REQUEST_FAILED) {
+ }
+
+ if (status & DP_TUNNELING_BW_REQUEST_FAILED) {
DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d",
__func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw);
- } else if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) {
+ }
+
+ if (status & DP_TUNNELING_BW_ALLOC_CAP_CHANGED) {
+ link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
+
+ DC_LOG_DEBUG("%s: Granularity changed on link(%d) new granularity=%d",
+ __func__, link->link_index, link->dpia_bw_alloc_config.bw_granularity);
+ }
+
+ if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) {
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
+
DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d",
__func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 693477413347..4b01ab0a5a7f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -398,10 +398,12 @@ bool dp_should_allow_hpd_rx_irq(const struct dc_link *link)
* Don't handle RX IRQ unless one of following is met:
* 1) The link is established (cur_link_settings != unknown)
* 2) We know we're dealing with a branch device, SST or MST
+ * 3) The link is bw_alloc enabled.
*/
if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
- is_dp_branch_device(link))
+ is_dp_branch_device(link) ||
+ link->dpia_bw_alloc_config.bw_alloc_enabled)
return true;
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 5e806edbb9f6..c56e69eb27ef 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -949,7 +949,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
/* Set power optimization flag */
if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) {
if (replay != NULL && link->replay_settings.replay_feature_enabled &&
- replay->funcs->replay_set_power_opt) {
+ replay->funcs->replay_set_power_opt) {
replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst);
link->replay_settings.replay_power_opt_active = *power_opts;
}
@@ -984,7 +984,117 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state)
return true;
}
-bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
+static bool edp_setup_panel_replay(struct dc_link *link, const struct dc_stream_state *stream)
+{
+ /* To-do: Setup Replay */
+ struct dc *dc;
+ struct dmub_replay *replay;
+ int i;
+ unsigned int panel_inst;
+ struct replay_context replay_context = { 0 };
+ unsigned int lineTimeInNs = 0;
+
+ union panel_replay_enable_and_configuration_1 pr_config_1 = { 0 };
+ union panel_replay_enable_and_configuration_2 pr_config_2 = { 0 };
+
+ union dpcd_alpm_configuration alpm_config;
+
+ replay_context.controllerId = CONTROLLER_ID_UNDEFINED;
+
+ if (!link)
+ return false;
+
+ //Clear Panel Replay enable & config
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1,
+ (uint8_t *)&(pr_config_1.raw), sizeof(uint8_t));
+
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2,
+ (uint8_t *)&(pr_config_2.raw), sizeof(uint8_t));
+
+ if (!(link->replay_settings.config.replay_supported))
+ return false;
+
+ dc = link->ctx->dc;
+
+ //not sure should keep or not
+ replay = dc->res_pool->replay;
+
+ if (!replay)
+ return false;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ replay_context.aux_inst = link->ddc->ddc_pin->hw_info.ddc_channel;
+ replay_context.digbe_inst = link->link_enc->transmitter;
+ replay_context.digfe_inst = link->link_enc->preferred_engine;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ /* dmcu -1 for all controller id values,
+ * therefore +1 here
+ */
+ replay_context.controllerId =
+ dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg->inst + 1;
+ break;
+ }
+ }
+
+ lineTimeInNs =
+ ((stream->timing.h_total * 1000000) /
+ (stream->timing.pix_clk_100hz / 10)) + 1;
+
+ replay_context.line_time_in_ns = lineTimeInNs;
+
+ link->replay_settings.replay_feature_enabled =
+ replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
+
+ if (link->replay_settings.replay_feature_enabled) {
+ pr_config_1.bits.PANEL_REPLAY_ENABLE = 1;
+ pr_config_1.bits.PANEL_REPLAY_CRC_ENABLE = 1;
+ pr_config_1.bits.IRQ_HPD_ASSDP_MISSING = 1;
+ pr_config_1.bits.IRQ_HPD_VSCSDP_UNCORRECTABLE_ERROR = 1;
+ pr_config_1.bits.IRQ_HPD_RFB_ERROR = 1;
+ pr_config_1.bits.IRQ_HPD_ACTIVE_FRAME_CRC_ERROR = 1;
+ pr_config_1.bits.PANEL_REPLAY_SELECTIVE_UPDATE_ENABLE = 1;
+ pr_config_1.bits.PANEL_REPLAY_EARLY_TRANSPORT_ENABLE = 1;
+
+ pr_config_2.bits.SINK_REFRESH_RATE_UNLOCK_GRANTED = 0;
+ pr_config_2.bits.SU_Y_GRANULARITY_EXT_VALUE_ENABLED = 0;
+ pr_config_2.bits.SU_REGION_SCAN_LINE_CAPTURE_INDICATION = 0;
+
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1,
+ (uint8_t *)&(pr_config_1.raw), sizeof(uint8_t));
+
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2,
+ (uint8_t *)&(pr_config_2.raw), sizeof(uint8_t));
+
+ //ALPM Setup
+ memset(&alpm_config, 0, sizeof(alpm_config));
+ alpm_config.bits.ENABLE = link->replay_settings.config.alpm_mode != DC_ALPM_UNSUPPORTED ? 1 : 0;
+
+ if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) {
+ alpm_config.bits.ALPM_MODE_SEL = 1;
+ alpm_config.bits.ACDS_PERIOD_DURATION = 1;
+ }
+
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ DP_RECEIVER_ALPM_CONFIG,
+ &alpm_config.raw,
+ sizeof(alpm_config.raw));
+ }
+
+ return true;
+}
+
+static bool edp_setup_freesync_replay(struct dc_link *link, const struct dc_stream_state *stream)
{
/* To-do: Setup Replay */
struct dc *dc;
@@ -1080,6 +1190,18 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
return true;
}
+bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
+{
+ if (!link)
+ return false;
+ if (link->replay_settings.config.replay_version == DC_VESA_PANEL_REPLAY)
+ return edp_setup_panel_replay(link, stream);
+ else if (link->replay_settings.config.replay_version == DC_FREESYNC_REPLAY)
+ return edp_setup_freesync_replay(link, stream);
+ else
+ return false;
+}
+
/*
* This is general Interface for Replay to set an 32 bit variable to dmub
* replay_FW_Message_type: Indicates which instruction or variable pass to DMUB
@@ -1110,7 +1232,7 @@ bool edp_send_replay_cmd(struct dc_link *link,
return true;
}
-bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1122,9 +1244,11 @@ bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
- if (coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) {
- replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst);
+ if (coasting_vtotal && (link->replay_settings.coasting_vtotal != coasting_vtotal ||
+ link->replay_settings.frame_skip_number != frame_skip_number)) {
+ replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst, frame_skip_number);
link->replay_settings.coasting_vtotal = coasting_vtotal;
+ link->replay_settings.frame_skip_number = frame_skip_number;
}
return true;
@@ -1152,7 +1276,7 @@ bool edp_replay_residency(const struct dc_link *link,
}
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint32_t coasting_vtotal)
+ const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1163,13 +1287,16 @@ bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
/* Only both power and coasting vtotal changed, this func could return true */
if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts &&
- coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) {
+ (coasting_vtotal &&
+ (link->replay_settings.coasting_vtotal != coasting_vtotal ||
+ link->replay_settings.frame_skip_number != frame_skip_number))) {
if (link->replay_settings.replay_feature_enabled &&
replay->funcs->replay_set_power_opt_and_coasting_vtotal) {
replay->funcs->replay_set_power_opt_and_coasting_vtotal(replay,
- *power_opts, panel_inst, coasting_vtotal);
+ *power_opts, panel_inst, coasting_vtotal, frame_skip_number);
link->replay_settings.replay_power_opt_active = *power_opts;
link->replay_settings.coasting_vtotal = coasting_vtotal;
+ link->replay_settings.frame_skip_number = frame_skip_number;
} else
return false;
} else
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 62a6344e613e..dd79c7cd2828 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -59,12 +59,12 @@ bool edp_setup_replay(struct dc_link *link,
bool edp_send_replay_cmd(struct dc_link *link,
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
-bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal);
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number);
bool edp_replay_residency(const struct dc_link *link,
unsigned int *residency, const bool is_start, const enum pr_residency_mode mode);
bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint32_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number);
bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
index 85298b8a1b5e..6bfd2c1294e5 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
@@ -1514,6 +1514,21 @@ static void mpc3_read_mpcc_state(
MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut);
}
+void mpc3_read_reg_state(
+ struct mpc *mpc,
+ int mpcc_inst, struct dcn_mpc_reg_state *mpc_reg_state)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ mpc_reg_state->mpcc_bot_sel = REG_READ(MPCC_BOT_SEL[mpcc_inst]);
+ mpc_reg_state->mpcc_control = REG_READ(MPCC_CONTROL[mpcc_inst]);
+ mpc_reg_state->mpcc_ogam_control = REG_READ(MPCC_OGAM_CONTROL[mpcc_inst]);
+ mpc_reg_state->mpcc_opp_id = REG_READ(MPCC_OPP_ID[mpcc_inst]);
+ mpc_reg_state->mpcc_status = REG_READ(MPCC_STATUS[mpcc_inst]);
+ mpc_reg_state->mpcc_top_sel = REG_READ(MPCC_TOP_SEL[mpcc_inst]);
+
+}
+
static const struct mpc_funcs dcn30_mpc_funcs = {
.read_mpcc_state = mpc3_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
@@ -1544,6 +1559,7 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.release_rmu = mpcc3_release_rmu,
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .mpc_read_reg_state = mpc3_read_reg_state,
.set_bg_color = mpc1_set_bg_color,
.set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
index 103f29900a2c..e2f147d17178 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
@@ -1096,6 +1096,11 @@ void mpc3_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on);
+void mpc3_read_reg_state(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct dcn_mpc_reg_state *mpc_reg_state);
+
void mpc3_init_mpcc(struct mpcc *mpcc, int mpcc_inst);
enum dc_lut_mode mpc3_get_ogam_current(
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
index 6f0e017a8ae2..83bbbf34bcac 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
@@ -1020,6 +1020,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.release_rmu = NULL,
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .mpc_read_reg_state = mpc3_read_reg_state,
.set_bg_color = mpc1_set_bg_color,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index e1a0308dee57..eeac13fdd6f5 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -598,6 +598,7 @@ static const struct mpc_funcs dcn401_mpc_funcs = {
.release_rmu = NULL,
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .mpc_read_reg_state = mpc3_read_reg_state,
.set_bg_color = mpc1_set_bg_color,
.set_movable_cm_location = mpc401_set_movable_cm_location,
.update_3dlut_fast_load_select = mpc401_update_3dlut_fast_load_select,
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
index 71e9288d60ed..45d418636d0c 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
@@ -372,6 +372,17 @@ void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable)
REG_UPDATE(OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, regval);
}
+
+void opp1_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ opp_reg_state->fmt_control = REG_READ(FMT_CONTROL);
+ opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL);
+ opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL);
+ opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL);
+}
+
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
@@ -392,7 +403,8 @@ static const struct opp_funcs dcn10_opp_funcs = {
.opp_program_dpg_dimensions = NULL,
.dpg_is_blanked = NULL,
.dpg_is_pending = NULL,
- .opp_destroy = opp1_destroy
+ .opp_destroy = opp1_destroy,
+ .opp_read_reg_state = opp1_read_reg_state
};
void dcn10_opp_construct(struct dcn10_opp *oppn10,
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
index c87de68a509e..38d0d530a9b7 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
@@ -63,7 +63,8 @@
uint32_t OPPBUF_CONTROL1; \
uint32_t OPPBUF_3D_PARAMETERS_0; \
uint32_t OPPBUF_3D_PARAMETERS_1; \
- uint32_t OPP_PIPE_CONTROL
+ uint32_t OPP_PIPE_CONTROL; \
+ uint32_t OPP_PIPE_CRC_CONTROL
#define OPP_MASK_SH_LIST_DCN(mask_sh) \
OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \
@@ -153,7 +154,6 @@ struct dcn10_opp {
const struct dcn10_opp_registers *regs;
const struct dcn10_opp_shift *opp_shift;
const struct dcn10_opp_mask *opp_mask;
-
bool is_write_to_ram_a_safe;
};
@@ -188,4 +188,6 @@ void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable);
void opp1_destroy(struct output_pixel_processor **opp);
+void opp1_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
index f5fe0cac7cb0..ce826a5be4c7 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
@@ -377,6 +377,18 @@ uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp
return 0;
}
+void opp2_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+
+ opp_reg_state->dpg_control = REG_READ(DPG_CONTROL);
+ opp_reg_state->fmt_control = REG_READ(FMT_CONTROL);
+ opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL);
+ opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL);
+ opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL);
+ opp_reg_state->dscrm_dsc_forward_config = REG_READ(DSCRM_DSC_FORWARD_CONFIG);
+}
+
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
@@ -395,6 +407,7 @@ static struct opp_funcs dcn20_opp_funcs = {
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
.opp_get_left_edge_extra_pixel_count = opp2_get_left_edge_extra_pixel_count,
+ .opp_read_reg_state = opp2_read_reg_state
};
void dcn20_opp_construct(struct dcn20_opp *oppn20,
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
index 34936e6c49f3..fb0c047c1788 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
@@ -59,7 +59,8 @@
uint32_t DPG_COLOUR_G_Y; \
uint32_t DPG_COLOUR_R_CR; \
uint32_t DPG_RAMP_CONTROL; \
- uint32_t DPG_STATUS
+ uint32_t DPG_STATUS; \
+ uint32_t DSCRM_DSC_FORWARD_CONFIG
#define OPP_DPG_MASK_SH_LIST(mask_sh) \
OPP_SF(DPG0_DPG_CONTROL, DPG_EN, mask_sh), \
@@ -171,4 +172,7 @@ void opp2_program_left_edge_extra_pixel (
uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp,
enum dc_pixel_encoding pixel_encoding, bool is_primary);
+
+void opp2_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c
index 3542b51c9aac..e11c4e16402f 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c
@@ -51,3 +51,16 @@ void dcn35_opp_set_fgcg(struct dcn20_opp *oppn20, bool enable)
{
REG_UPDATE(OPP_TOP_CLK_CONTROL, OPP_FGCG_REP_DIS, !enable);
}
+
+void dcn35_opp_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+
+ opp_reg_state->dpg_control = REG_READ(DPG_CONTROL);
+ opp_reg_state->fmt_control = REG_READ(FMT_CONTROL);
+ opp_reg_state->opp_abm_control = REG_READ(OPP_ABM_CONTROL);
+ opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL);
+ opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL);
+ opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL);
+ opp_reg_state->dscrm_dsc_forward_config = REG_READ(DSCRM_DSC_FORWARD_CONFIG);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h
index a9a413527801..c6cace90e8f2 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h
@@ -31,7 +31,8 @@
#define OPP_REG_VARIABLE_LIST_DCN3_5 \
OPP_REG_VARIABLE_LIST_DCN2_0; \
- uint32_t OPP_TOP_CLK_CONTROL
+ uint32_t OPP_TOP_CLK_CONTROL; \
+ uint32_t OPP_ABM_CONTROL
#define OPP_MASK_SH_LIST_DCN35(mask_sh) \
OPP_MASK_SH_LIST_DCN20(mask_sh), \
@@ -64,4 +65,5 @@ void dcn35_opp_construct(struct dcn20_opp *oppn20,
void dcn35_opp_set_fgcg(struct dcn20_opp *oppn20, bool enable);
+void dcn35_opp_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index 8b2a8455eb56..803bcc25601c 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -209,7 +209,43 @@
uint32_t OPTC_WIDTH_CONTROL2; \
uint32_t OTG_PSTATE_REGISTER; \
uint32_t OTG_PIPE_UPDATE_STATUS; \
- uint32_t INTERRUPT_DEST
+ uint32_t INTERRUPT_DEST; \
+ uint32_t OPTC_INPUT_SPARE_REGISTER; \
+ uint32_t OPTC_RSMU_UNDERFLOW; \
+ uint32_t OPTC_UNDERFLOW_THRESHOLD; \
+ uint32_t OTG_COUNT_CONTROL; \
+ uint32_t OTG_COUNT_RESET; \
+ uint32_t OTG_CRC_SIG_BLUE_CONTROL_MASK; \
+ uint32_t OTG_CRC_SIG_RED_GREEN_MASK; \
+ uint32_t OTG_DLPC_CONTROL; \
+ uint32_t OTG_DRR_CONTROL2; \
+ uint32_t OTG_DRR_TIMING_INT_STATUS; \
+ uint32_t OTG_GLOBAL_CONTROL3; \
+ uint32_t OTG_GLOBAL_SYNC_STATUS; \
+ uint32_t OTG_GSL_VSYNC_GAP; \
+ uint32_t OTG_INTERLACE_STATUS; \
+ uint32_t OTG_INTERRUPT_CONTROL; \
+ uint32_t OTG_LONG_VBLANK_STATUS; \
+ uint32_t OTG_MANUAL_FORCE_VSYNC_NEXT_LINE; \
+ uint32_t OTG_MASTER_EN; \
+ uint32_t OTG_PIXEL_DATA_READBACK0; \
+ uint32_t OTG_PIXEL_DATA_READBACK1; \
+ uint32_t OTG_REQUEST_CONTROL; \
+ uint32_t OTG_SNAPSHOT_CONTROL; \
+ uint32_t OTG_SNAPSHOT_FRAME; \
+ uint32_t OTG_SNAPSHOT_POSITION; \
+ uint32_t OTG_SNAPSHOT_STATUS; \
+ uint32_t OTG_SPARE_REGISTER; \
+ uint32_t OTG_STATUS_HV_COUNT; \
+ uint32_t OTG_STATUS_VF_COUNT; \
+ uint32_t OTG_STEREO_FORCE_NEXT_EYE; \
+ uint32_t OTG_TRIG_MANUAL_CONTROL; \
+ uint32_t OTG_TRIGB_CNTL; \
+ uint32_t OTG_TRIGB_MANUAL_TRIG; \
+ uint32_t OTG_UPDATE_LOCK; \
+ uint32_t OTG_V_TOTAL_INT_STATUS; \
+ uint32_t OTG_VSYNC_NOM_INT_STATUS
+
struct dcn_optc_registers {
OPTC_REG_VARIABLE_LIST_DCN;
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
index 4f1830ba619f..c6417538090f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
@@ -315,6 +315,136 @@ void optc31_read_otg_state(struct timing_generator *optc,
s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
}
+void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_state *optc_reg_state)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ optc_reg_state->optc_bytes_per_pixel = REG_READ(OPTC_BYTES_PER_PIXEL);
+ optc_reg_state->optc_data_format_control = REG_READ(OPTC_DATA_FORMAT_CONTROL);
+ optc_reg_state->optc_data_source_select = REG_READ(OPTC_DATA_SOURCE_SELECT);
+ optc_reg_state->optc_input_clock_control = REG_READ(OPTC_INPUT_CLOCK_CONTROL);
+ optc_reg_state->optc_input_global_control = REG_READ(OPTC_INPUT_GLOBAL_CONTROL);
+ optc_reg_state->optc_input_spare_register = REG_READ(OPTC_INPUT_SPARE_REGISTER);
+ optc_reg_state->optc_memory_config = REG_READ(OPTC_MEMORY_CONFIG);
+ optc_reg_state->optc_rsmu_underflow = REG_READ(OPTC_RSMU_UNDERFLOW);
+ optc_reg_state->optc_underflow_threshold = REG_READ(OPTC_UNDERFLOW_THRESHOLD);
+ optc_reg_state->optc_width_control = REG_READ(OPTC_WIDTH_CONTROL);
+ optc_reg_state->otg_3d_structure_control = REG_READ(OTG_3D_STRUCTURE_CONTROL);
+ optc_reg_state->otg_clock_control = REG_READ(OTG_CLOCK_CONTROL);
+ optc_reg_state->otg_control = REG_READ(OTG_CONTROL);
+ optc_reg_state->otg_count_control = REG_READ(OTG_COUNT_CONTROL);
+ optc_reg_state->otg_count_reset = REG_READ(OTG_COUNT_RESET);
+ optc_reg_state->otg_crc_cntl = REG_READ(OTG_CRC_CNTL);
+ optc_reg_state->otg_crc_sig_blue_control_mask = REG_READ(OTG_CRC_SIG_BLUE_CONTROL_MASK);
+ optc_reg_state->otg_crc_sig_red_green_mask = REG_READ(OTG_CRC_SIG_RED_GREEN_MASK);
+ optc_reg_state->otg_crc0_data_b = REG_READ(OTG_CRC0_DATA_B);
+ optc_reg_state->otg_crc0_data_rg = REG_READ(OTG_CRC0_DATA_RG);
+ optc_reg_state->otg_crc0_windowa_x_control = REG_READ(OTG_CRC0_WINDOWA_X_CONTROL);
+ optc_reg_state->otg_crc0_windowa_x_control_readback = REG_READ(OTG_CRC0_WINDOWA_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc0_windowa_y_control = REG_READ(OTG_CRC0_WINDOWA_Y_CONTROL);
+ optc_reg_state->otg_crc0_windowa_y_control_readback = REG_READ(OTG_CRC0_WINDOWA_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc0_windowb_x_control = REG_READ(OTG_CRC0_WINDOWB_X_CONTROL);
+ optc_reg_state->otg_crc0_windowb_x_control_readback = REG_READ(OTG_CRC0_WINDOWB_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc0_windowb_y_control = REG_READ(OTG_CRC0_WINDOWB_Y_CONTROL);
+ optc_reg_state->otg_crc0_windowb_y_control_readback = REG_READ(OTG_CRC0_WINDOWB_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_data_b = REG_READ(OTG_CRC1_DATA_B);
+ optc_reg_state->otg_crc1_data_rg = REG_READ(OTG_CRC1_DATA_RG);
+ optc_reg_state->otg_crc1_windowa_x_control = REG_READ(OTG_CRC1_WINDOWA_X_CONTROL);
+ optc_reg_state->otg_crc1_windowa_x_control_readback = REG_READ(OTG_CRC1_WINDOWA_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_windowa_y_control = REG_READ(OTG_CRC1_WINDOWA_Y_CONTROL);
+ optc_reg_state->otg_crc1_windowa_y_control_readback = REG_READ(OTG_CRC1_WINDOWA_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_windowb_x_control = REG_READ(OTG_CRC1_WINDOWB_X_CONTROL);
+ optc_reg_state->otg_crc1_windowb_x_control_readback = REG_READ(OTG_CRC1_WINDOWB_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_windowb_y_control = REG_READ(OTG_CRC1_WINDOWB_Y_CONTROL);
+ optc_reg_state->otg_crc1_windowb_y_control_readback = REG_READ(OTG_CRC1_WINDOWB_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc2_data_b = REG_READ(OTG_CRC2_DATA_B);
+ optc_reg_state->otg_crc2_data_rg = REG_READ(OTG_CRC2_DATA_RG);
+ optc_reg_state->otg_crc3_data_b = REG_READ(OTG_CRC3_DATA_B);
+ optc_reg_state->otg_crc3_data_rg = REG_READ(OTG_CRC3_DATA_RG);
+ optc_reg_state->otg_dlpc_control = REG_READ(OTG_DLPC_CONTROL);
+ optc_reg_state->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
+ optc_reg_state->otg_drr_control2 = REG_READ(OTG_DRR_CONTROL2);
+ optc_reg_state->otg_drr_control = REG_READ(OTG_DRR_CONTROL);
+ optc_reg_state->otg_drr_timing_int_status = REG_READ(OTG_DRR_TIMING_INT_STATUS);
+ optc_reg_state->otg_drr_trigger_window = REG_READ(OTG_DRR_TRIGGER_WINDOW);
+ optc_reg_state->otg_drr_v_total_change = REG_READ(OTG_DRR_V_TOTAL_CHANGE);
+ optc_reg_state->otg_dsc_start_position = REG_READ(OTG_DSC_START_POSITION);
+ optc_reg_state->otg_force_count_now_cntl = REG_READ(OTG_FORCE_COUNT_NOW_CNTL);
+ optc_reg_state->otg_global_control0 = REG_READ(OTG_GLOBAL_CONTROL0);
+ optc_reg_state->otg_global_control1 = REG_READ(OTG_GLOBAL_CONTROL1);
+ optc_reg_state->otg_global_control2 = REG_READ(OTG_GLOBAL_CONTROL2);
+ optc_reg_state->otg_global_control3 = REG_READ(OTG_GLOBAL_CONTROL3);
+ optc_reg_state->otg_global_control4 = REG_READ(OTG_GLOBAL_CONTROL4);
+ optc_reg_state->otg_global_sync_status = REG_READ(OTG_GLOBAL_SYNC_STATUS);
+ optc_reg_state->otg_gsl_control = REG_READ(OTG_GSL_CONTROL);
+ optc_reg_state->otg_gsl_vsync_gap = REG_READ(OTG_GSL_VSYNC_GAP);
+ optc_reg_state->otg_gsl_window_x = REG_READ(OTG_GSL_WINDOW_X);
+ optc_reg_state->otg_gsl_window_y = REG_READ(OTG_GSL_WINDOW_Y);
+ optc_reg_state->otg_h_blank_start_end = REG_READ(OTG_H_BLANK_START_END);
+ optc_reg_state->otg_h_sync_a = REG_READ(OTG_H_SYNC_A);
+ optc_reg_state->otg_h_sync_a_cntl = REG_READ(OTG_H_SYNC_A_CNTL);
+ optc_reg_state->otg_h_timing_cntl = REG_READ(OTG_H_TIMING_CNTL);
+ optc_reg_state->otg_h_total = REG_READ(OTG_H_TOTAL);
+ optc_reg_state->otg_interlace_control = REG_READ(OTG_INTERLACE_CONTROL);
+ optc_reg_state->otg_interlace_status = REG_READ(OTG_INTERLACE_STATUS);
+ optc_reg_state->otg_interrupt_control = REG_READ(OTG_INTERRUPT_CONTROL);
+ optc_reg_state->otg_long_vblank_status = REG_READ(OTG_LONG_VBLANK_STATUS);
+ optc_reg_state->otg_m_const_dto0 = REG_READ(OTG_M_CONST_DTO0);
+ optc_reg_state->otg_m_const_dto1 = REG_READ(OTG_M_CONST_DTO1);
+ optc_reg_state->otg_manual_force_vsync_next_line = REG_READ(OTG_MANUAL_FORCE_VSYNC_NEXT_LINE);
+ optc_reg_state->otg_master_en = REG_READ(OTG_MASTER_EN);
+ optc_reg_state->otg_master_update_lock = REG_READ(OTG_MASTER_UPDATE_LOCK);
+ optc_reg_state->otg_master_update_mode = REG_READ(OTG_MASTER_UPDATE_MODE);
+ optc_reg_state->otg_nom_vert_position = REG_READ(OTG_NOM_VERT_POSITION);
+ optc_reg_state->otg_pipe_update_status = REG_READ(OTG_PIPE_UPDATE_STATUS);
+ optc_reg_state->otg_pixel_data_readback0 = REG_READ(OTG_PIXEL_DATA_READBACK0);
+ optc_reg_state->otg_pixel_data_readback1 = REG_READ(OTG_PIXEL_DATA_READBACK1);
+ optc_reg_state->otg_request_control = REG_READ(OTG_REQUEST_CONTROL);
+ optc_reg_state->otg_snapshot_control = REG_READ(OTG_SNAPSHOT_CONTROL);
+ optc_reg_state->otg_snapshot_frame = REG_READ(OTG_SNAPSHOT_FRAME);
+ optc_reg_state->otg_snapshot_position = REG_READ(OTG_SNAPSHOT_POSITION);
+ optc_reg_state->otg_snapshot_status = REG_READ(OTG_SNAPSHOT_STATUS);
+ optc_reg_state->otg_spare_register = REG_READ(OTG_SPARE_REGISTER);
+ optc_reg_state->otg_static_screen_control = REG_READ(OTG_STATIC_SCREEN_CONTROL);
+ optc_reg_state->otg_status = REG_READ(OTG_STATUS);
+ optc_reg_state->otg_status_frame_count = REG_READ(OTG_STATUS_FRAME_COUNT);
+ optc_reg_state->otg_status_hv_count = REG_READ(OTG_STATUS_HV_COUNT);
+ optc_reg_state->otg_status_position = REG_READ(OTG_STATUS_POSITION);
+ optc_reg_state->otg_status_vf_count = REG_READ(OTG_STATUS_VF_COUNT);
+ optc_reg_state->otg_stereo_control = REG_READ(OTG_STEREO_CONTROL);
+ optc_reg_state->otg_stereo_force_next_eye = REG_READ(OTG_STEREO_FORCE_NEXT_EYE);
+ optc_reg_state->otg_stereo_status = REG_READ(OTG_STEREO_STATUS);
+ optc_reg_state->otg_trig_manual_control = REG_READ(OTG_TRIG_MANUAL_CONTROL);
+ optc_reg_state->otg_triga_cntl = REG_READ(OTG_TRIGA_CNTL);
+ optc_reg_state->otg_triga_manual_trig = REG_READ(OTG_TRIGA_MANUAL_TRIG);
+ optc_reg_state->otg_trigb_cntl = REG_READ(OTG_TRIGB_CNTL);
+ optc_reg_state->otg_trigb_manual_trig = REG_READ(OTG_TRIGB_MANUAL_TRIG);
+ optc_reg_state->otg_update_lock = REG_READ(OTG_UPDATE_LOCK);
+ optc_reg_state->otg_v_blank_start_end = REG_READ(OTG_V_BLANK_START_END);
+ optc_reg_state->otg_v_count_stop_control = REG_READ(OTG_V_COUNT_STOP_CONTROL);
+ optc_reg_state->otg_v_count_stop_control2 = REG_READ(OTG_V_COUNT_STOP_CONTROL2);
+ optc_reg_state->otg_v_sync_a = REG_READ(OTG_V_SYNC_A);
+ optc_reg_state->otg_v_sync_a_cntl = REG_READ(OTG_V_SYNC_A_CNTL);
+ optc_reg_state->otg_v_total = REG_READ(OTG_V_TOTAL);
+ optc_reg_state->otg_v_total_control = REG_READ(OTG_V_TOTAL_CONTROL);
+ optc_reg_state->otg_v_total_int_status = REG_READ(OTG_V_TOTAL_INT_STATUS);
+ optc_reg_state->otg_v_total_max = REG_READ(OTG_V_TOTAL_MAX);
+ optc_reg_state->otg_v_total_mid = REG_READ(OTG_V_TOTAL_MID);
+ optc_reg_state->otg_v_total_min = REG_READ(OTG_V_TOTAL_MIN);
+ optc_reg_state->otg_vert_sync_control = REG_READ(OTG_VERT_SYNC_CONTROL);
+ optc_reg_state->otg_vertical_interrupt0_control = REG_READ(OTG_VERTICAL_INTERRUPT0_CONTROL);
+ optc_reg_state->otg_vertical_interrupt0_position = REG_READ(OTG_VERTICAL_INTERRUPT0_POSITION);
+ optc_reg_state->otg_vertical_interrupt1_control = REG_READ(OTG_VERTICAL_INTERRUPT1_CONTROL);
+ optc_reg_state->otg_vertical_interrupt1_position = REG_READ(OTG_VERTICAL_INTERRUPT1_POSITION);
+ optc_reg_state->otg_vertical_interrupt2_control = REG_READ(OTG_VERTICAL_INTERRUPT2_CONTROL);
+ optc_reg_state->otg_vertical_interrupt2_position = REG_READ(OTG_VERTICAL_INTERRUPT2_POSITION);
+ optc_reg_state->otg_vready_param = REG_READ(OTG_VREADY_PARAM);
+ optc_reg_state->otg_vstartup_param = REG_READ(OTG_VSTARTUP_PARAM);
+ optc_reg_state->otg_vsync_nom_int_status = REG_READ(OTG_VSYNC_NOM_INT_STATUS);
+ optc_reg_state->otg_vupdate_keepout = REG_READ(OTG_VUPDATE_KEEPOUT);
+ optc_reg_state->otg_vupdate_param = REG_READ(OTG_VUPDATE_PARAM);
+}
+
static const struct timing_generator_funcs dcn31_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -377,6 +507,7 @@ static const struct timing_generator_funcs dcn31_tg_funcs = {
.init_odm = optc3_init_odm,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
.read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn31_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
index 0f72c274f40b..98f7d2e299c5 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
@@ -274,4 +274,6 @@ void optc3_init_odm(struct timing_generator *optc);
void optc31_read_otg_state(struct timing_generator *optc,
struct dcn_otg_state *s);
+void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_state *optc_reg_state);
+
#endif /* __DC_OPTC_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
index 4a2caca37255..43ff957288b2 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
@@ -256,6 +256,7 @@ static const struct timing_generator_funcs dcn314_tg_funcs = {
.set_h_timing_div_manual_mode = optc314_set_h_timing_div_manual_mode,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
.read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn314_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index b2b226bcd871..3dcb0d0c931c 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -365,6 +365,7 @@ static const struct timing_generator_funcs dcn32_tg_funcs = {
.get_otg_double_buffer_pending = optc3_get_otg_update_pending,
.get_pipe_update_pending = optc3_get_pipe_update_pending,
.read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn32_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index 52d5ea98c86b..f699e95059f3 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -511,6 +511,7 @@ static const struct timing_generator_funcs dcn35_tg_funcs = {
.set_long_vtotal = optc35_set_long_vtotal,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
.read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn35_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index 5af13706e601..a8e978d1fae8 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -533,6 +533,7 @@ static const struct timing_generator_funcs dcn401_tg_funcs = {
.set_vupdate_keepout = optc401_set_vupdate_keepout,
.wait_update_lock_status = optc401_wait_update_lock_status,
.read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn401_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index c4b4dc3ad8c9..d40d91ec2035 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -78,6 +78,7 @@
#endif
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -225,6 +226,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = {
link_regs(4),
link_regs(5),
link_regs(6),
+ { .DAC_ENABLE = mmDAC_ENABLE },
};
#define stream_enc_regs(id)\
@@ -368,6 +370,7 @@ static const struct dce_abm_mask abm_mask = {
#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -375,6 +378,7 @@ static const struct bios_registers bios_regs = {
static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
.num_pll = 3,
.num_ddc = 6,
@@ -402,8 +406,10 @@ static const struct dc_plane_cap plane_cap = {
}
};
-static const struct dc_debug_options debug_defaults = {
- .enable_legacy_fast_update = true,
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
#define CTX ctx
@@ -484,6 +490,11 @@ static struct stream_encoder *dce100_stream_encoder_create(
if (!enc110)
return NULL;
+ if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) {
+ dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id);
+ return &enc110->base;
+ }
+
dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id], &se_shift, &se_mask);
return &enc110->base;
@@ -624,7 +635,20 @@ static struct link_encoder *dce100_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ if (!enc110)
+ return NULL;
+
+ if (enc_init_data->connector.id == CONNECTOR_ID_VGA) {
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[ENGINE_ID_DACA],
+ NULL,
+ NULL);
+ return &enc110->base;
+ }
+
+ if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
@@ -952,6 +976,10 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
int i;
int j = -1;
struct dc_link *link = stream->link;
+ enum engine_id preferred_engine = link->link_enc->preferred_engine;
+
+ if (dc_is_rgb_signal(stream->signal))
+ preferred_engine = link->link_enc->analog_engine;
for (i = 0; i < pool->stream_enc_count; i++) {
if (!res_ctx->is_stream_enc_acquired[i] &&
@@ -960,8 +988,7 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
* in daisy chain use case
*/
j = i;
- if (pool->stream_enc[i]->id ==
- link->link_enc->preferred_engine)
+ if (pool->stream_enc[i]->id == preferred_engine)
return pool->stream_enc[i];
}
}
@@ -1093,6 +1120,7 @@ static bool dce100_resource_construct(
dc->caps.disable_dp_clk_share = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index cccde5a6f3cd..cd54382c0af3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -82,6 +82,7 @@
#endif
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -377,6 +378,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -424,7 +426,9 @@ static const struct dc_plane_cap plane_cap = {
64
};
-static const struct dc_debug_options debug_defaults = {
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
.enable_legacy_fast_update = true,
};
@@ -1376,6 +1380,7 @@ static bool dce110_resource_construct(
dc->caps.is_apu = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 869a8e515fc0..3f0a6bc4dcc2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -76,6 +76,7 @@
#endif
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -385,6 +386,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -429,8 +431,10 @@ static const struct dc_plane_cap plane_cap = {
64
};
-static const struct dc_debug_options debug_defaults = {
- .enable_legacy_fast_update = true,
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
#define CTX ctx
@@ -1247,6 +1251,7 @@ static bool dce112_resource_construct(
dc->caps.dual_link_dvi = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index 540e04ec1e2d..b1570b6b1af3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -491,6 +491,7 @@ static struct dce_i2c_hw *dce120_i2c_hw_create(
return dce_i2c_hw;
}
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0 + NBIO_BASE(mmBIOS_SCRATCH_0_BASE_IDX),
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3 + NBIO_BASE(mmBIOS_SCRATCH_3_BASE_IDX),
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
};
@@ -526,8 +527,11 @@ static const struct dc_plane_cap plane_cap = {
};
static const struct dc_debug_options debug_defaults = {
- .disable_clock_gate = true,
- .enable_legacy_fast_update = true,
+ .disable_clock_gate = true,
+};
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
static struct clock_source *dce120_clock_source_create(
@@ -1089,6 +1093,7 @@ static bool dce120_resource_construct(
dc->caps.psp_setup_panel_mode = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
index b75be6ad64f6..f0152933bee2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
@@ -80,6 +80,7 @@
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -240,7 +241,9 @@ static const struct dce110_link_enc_registers link_enc_regs[] = {
link_regs(2),
link_regs(3),
link_regs(4),
- link_regs(5)
+ link_regs(5),
+ {0},
+ { .DAC_ENABLE = mmDAC_ENABLE },
};
#define stream_enc_regs(id)\
@@ -366,6 +369,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -373,6 +377,7 @@ static const struct bios_registers bios_regs = {
static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
.num_pll = 3,
.num_ddc = 6,
@@ -382,6 +387,7 @@ static const struct resource_caps res_cap_61 = {
.num_timing_generator = 4,
.num_audio = 6,
.num_stream_encoder = 6,
+ .num_analog_stream_encoder = 1,
.num_pll = 3,
.num_ddc = 6,
};
@@ -389,6 +395,7 @@ static const struct resource_caps res_cap_61 = {
static const struct resource_caps res_cap_64 = {
.num_timing_generator = 2,
.num_audio = 2,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 2,
.num_pll = 3,
.num_ddc = 2,
@@ -599,6 +606,11 @@ static struct stream_encoder *dce60_stream_encoder_create(
if (!enc110)
return NULL;
+ if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) {
+ dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id);
+ return &enc110->base;
+ }
+
dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
@@ -718,7 +730,20 @@ static struct link_encoder *dce60_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ if (!enc110)
+ return NULL;
+
+ if (enc_init_data->connector.id == CONNECTOR_ID_VGA) {
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[ENGINE_ID_DACA],
+ NULL,
+ NULL);
+ return &enc110->base;
+ }
+
+ if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 5b7769745202..8687104cabb7 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -78,6 +78,7 @@
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -241,6 +242,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = {
link_regs(4),
link_regs(5),
link_regs(6),
+ { .DAC_ENABLE = mmDAC_ENABLE },
};
#define stream_enc_regs(id)\
@@ -368,6 +370,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -375,6 +378,7 @@ static const struct bios_registers bios_regs = {
static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
.num_pll = 3,
.num_ddc = 6,
@@ -383,6 +387,7 @@ static const struct resource_caps res_cap = {
static const struct resource_caps res_cap_81 = {
.num_timing_generator = 4,
.num_audio = 7,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 7,
.num_pll = 3,
.num_ddc = 6,
@@ -391,6 +396,7 @@ static const struct resource_caps res_cap_81 = {
static const struct resource_caps res_cap_83 = {
.num_timing_generator = 2,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
.num_pll = 2,
.num_ddc = 2,
@@ -418,8 +424,10 @@ static const struct dc_plane_cap plane_cap = {
}
};
-static const struct dc_debug_options debug_defaults = {
- .enable_legacy_fast_update = true,
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
static const struct dce_dmcu_registers dmcu_regs = {
@@ -605,6 +613,11 @@ static struct stream_encoder *dce80_stream_encoder_create(
if (!enc110)
return NULL;
+ if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) {
+ dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id);
+ return &enc110->base;
+ }
+
dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
@@ -724,7 +737,20 @@ static struct link_encoder *dce80_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ if (!enc110)
+ return NULL;
+
+ if (enc_init_data->connector.id == CONNECTOR_ID_VGA) {
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[ENGINE_ID_DACA],
+ NULL,
+ NULL);
+ return &enc110->base;
+ }
+
+ if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
@@ -919,6 +945,7 @@ static bool dce80_construct(
dc->caps.dual_link_dvi = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
@@ -1320,6 +1347,7 @@ static bool dce83_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.is_apu = true;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index 652c05c35494..f12367adf145 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -556,10 +556,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.recovery_enabled = false, /*enable this by default after testing.*/
.max_downscale_src_width = 3840,
.underflow_assert_delay_us = 0xFFFFFFFF,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static void dcn10_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN10_DPP(*dpp));
@@ -1395,6 +1398,8 @@ static bool dcn10_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 0;
+ dc->debug = debug_defaults_drv;
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index 84b38d2d6967..6679c1a14f2f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -718,10 +718,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
void dcn20_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -733,7 +736,7 @@ struct dpp *dcn20_dpp_create(
uint32_t inst)
{
struct dcn20_dpp *dpp =
- kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
if (!dpp)
return NULL;
@@ -751,7 +754,7 @@ struct input_pixel_processor *dcn20_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn10_ipp *ipp =
- kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
if (!ipp) {
BREAK_TO_DEBUGGER();
@@ -768,7 +771,7 @@ struct output_pixel_processor *dcn20_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_opp *opp =
- kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
if (!opp) {
BREAK_TO_DEBUGGER();
@@ -785,7 +788,7 @@ struct dce_aux *dcn20_aux_engine_create(
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
- kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
@@ -823,7 +826,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
- kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
if (!dce_i2c_hw)
return NULL;
@@ -835,8 +838,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
}
struct mpc *dcn20_mpc_create(struct dc_context *ctx)
{
- struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
- GFP_ATOMIC);
+ struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), GFP_KERNEL);
if (!mpc20)
return NULL;
@@ -853,8 +855,7 @@ struct mpc *dcn20_mpc_create(struct dc_context *ctx)
struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
{
int i;
- struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
- GFP_ATOMIC);
+ struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL);
if (!hubbub)
return NULL;
@@ -882,7 +883,7 @@ struct timing_generator *dcn20_timing_generator_create(
uint32_t instance)
{
struct optc *tgn10 =
- kzalloc(sizeof(struct optc), GFP_ATOMIC);
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
@@ -962,7 +963,7 @@ static struct clock_source *dcn20_clock_source_create(
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
- kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
@@ -1061,7 +1062,7 @@ struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_dsc *dsc =
- kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
if (!dsc) {
BREAK_TO_DEBUGGER();
@@ -1198,7 +1199,7 @@ struct hubp *dcn20_hubp_create(
uint32_t inst)
{
struct dcn20_hubp *hubp2 =
- kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
if (!hubp2)
return NULL;
@@ -1668,6 +1669,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
return false;
@@ -2286,7 +2288,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
{
- struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
+ struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
if (!pp_smu)
return pp_smu;
@@ -2472,6 +2474,7 @@ static bool dcn20_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->caps.dp_hdmi21_pcon_support = true;
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -2765,7 +2768,7 @@ struct resource_pool *dcn20_create_resource_pool(
struct dc *dc)
{
struct dcn20_resource_pool *pool =
- kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
if (!pool)
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index e4a1338d21e0..055107843a70 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -614,10 +614,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
.enable_tri_buf = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static void dcn201_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN201_DPP(*dpp));
@@ -629,7 +632,7 @@ static struct dpp *dcn201_dpp_create(
uint32_t inst)
{
struct dcn201_dpp *dpp =
- kzalloc(sizeof(struct dcn201_dpp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_dpp), GFP_KERNEL);
if (!dpp)
return NULL;
@@ -646,7 +649,7 @@ static struct input_pixel_processor *dcn201_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn10_ipp *ipp =
- kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
if (!ipp) {
return NULL;
@@ -662,7 +665,7 @@ static struct output_pixel_processor *dcn201_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn201_opp *opp =
- kzalloc(sizeof(struct dcn201_opp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_opp), GFP_KERNEL);
if (!opp) {
return NULL;
@@ -677,7 +680,7 @@ static struct dce_aux *dcn201_aux_engine_create(struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
- kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
@@ -710,7 +713,7 @@ static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx,
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
- kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
if (!dce_i2c_hw)
return NULL;
@@ -723,8 +726,7 @@ static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx,
static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc)
{
- struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc),
- GFP_ATOMIC);
+ struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc), GFP_KERNEL);
if (!mpc201)
return NULL;
@@ -740,8 +742,7 @@ static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc)
static struct hubbub *dcn201_hubbub_create(struct dc_context *ctx)
{
- struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
- GFP_ATOMIC);
+ struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL);
if (!hubbub)
return NULL;
@@ -759,7 +760,7 @@ static struct timing_generator *dcn201_timing_generator_create(
uint32_t instance)
{
struct optc *tgn10 =
- kzalloc(sizeof(struct optc), GFP_ATOMIC);
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
@@ -793,7 +794,7 @@ static struct link_encoder *dcn201_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dcn20_link_encoder *enc20 =
- kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
struct dcn10_link_encoder *enc10;
if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
@@ -821,7 +822,7 @@ static struct clock_source *dcn201_clock_source_create(
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
- kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
@@ -856,7 +857,7 @@ static struct stream_encoder *dcn201_stream_encoder_create(
struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1 =
- kzalloc(sizeof(struct dcn10_stream_encoder), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
if (!enc1)
return NULL;
@@ -883,7 +884,7 @@ static const struct dce_hwseq_mask hwseq_mask = {
static struct dce_hwseq *dcn201_hwseq_create(
struct dc_context *ctx)
{
- struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_ATOMIC);
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
if (hws) {
hws->ctx = ctx;
@@ -983,7 +984,7 @@ static struct hubp *dcn201_hubp_create(
uint32_t inst)
{
struct dcn201_hubp *hubp201 =
- kzalloc(sizeof(struct dcn201_hubp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_hubp), GFP_KERNEL);
if (!hubp201)
return NULL;
@@ -1153,6 +1154,7 @@ static bool dcn201_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->debug = debug_defaults_drv;
+ dc->check_config = config_defaults;
/*a0 only, remove later*/
dc->work_arounds.no_connect_phy_config = true;
@@ -1303,7 +1305,7 @@ struct resource_pool *dcn201_create_resource_pool(
struct dc *dc)
{
struct dcn201_resource_pool *pool =
- kzalloc(sizeof(struct dcn201_resource_pool), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_resource_pool), GFP_KERNEL);
if (!pool)
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 918742a42ded..2060acd5ae09 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -626,10 +626,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.usbc_combo_phy_reset_wa = true,
.dmub_command_table = true,
.use_max_lb = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1458,6 +1461,7 @@ static bool dcn21_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->caps.dp_hdmi21_pcon_support = true;
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index ff63f59ff928..d0ebb733e802 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -727,10 +727,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = false,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -2374,6 +2377,7 @@ static bool dcn30_resource_construct(
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index 82a205a7c25c..3ad6a3d4858e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -701,10 +701,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = false,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static void dcn301_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1498,6 +1501,7 @@ static bool dcn301_resource_construct(
bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled);
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 61623cb518d9..c0d4a1dc94f8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -98,10 +98,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = false,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1290,6 +1293,7 @@ static bool dcn302_resource_construct(
&is_vbios_interop_enabled);
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 02b9a84f2db3..75e09c2c283e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -98,10 +98,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = false,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1234,6 +1237,7 @@ static bool dcn303_resource_construct(
bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled);
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 3ed7f50554e2..0d667b54ccf8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -888,12 +888,15 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.disable_z10 = true,
- .enable_legacy_fast_update = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
.dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1978,6 +1981,7 @@ static bool dcn31_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index d4917a35b991..3ccde75a4ecb 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -924,12 +924,15 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.seamless_boot_odm_combine = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
.disable_dsc_power_gate = true,
.min_disp_clk_khz = 100000,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1910,6 +1913,7 @@ static bool dcn314_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 82cc78c291d8..4e962f522f1b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -887,9 +887,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .enable_legacy_fast_update = true,
.psr_power_use_phy_fsm = 0,
.using_dml2 = false,
+ .min_disp_clk_khz = 100000,
+};
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
static const struct dc_panel_config panel_config_defaults = {
@@ -1939,6 +1943,7 @@ static bool dcn315_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index 636110e48d01..5a95dd54cb42 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -882,10 +882,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1815,6 +1818,7 @@ static bool dcn316_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 3965a7f1b64b..b276fec3e479 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -92,7 +92,7 @@
#include "dc_state_priv.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#define DC_LOGGER_INIT(logger)
@@ -738,10 +738,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dp_plus_plus_wa = true,
.fpo_vactive_min_active_margin_us = 200,
.fpo_vactive_max_blank_us = 1000,
- .enable_legacy_fast_update = false,
.disable_stutter_for_wm_program = true
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static struct dce_aux *dcn32_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
@@ -1844,7 +1847,7 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc,
dc_state_set_stream_cursor_subvp_limit(stream, context, true);
status = DC_FAIL_HW_CURSOR_SUPPORT;
}
- };
+ }
}
if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) {
@@ -2197,7 +2200,8 @@ static bool dcn32_resource_construct(
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
/* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/
dc->caps.max_cursor_size = 64;
- dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4)
+ /* floor(sqrt(buf_size_bytes / bpp ) * bpp, fixed_req_size) / bpp = max_width */
+ dc->caps.max_buffered_cursor_size = 64; // floor(sqrt(16 * 1024 / 4) * 4, 256) / 4 = 64
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
@@ -2294,6 +2298,7 @@ static bool dcn32_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index ad214986f7ac..3466ca34c93f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -731,11 +731,14 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_subvp_high_refresh = false,
.fpo_vactive_min_active_margin_us = 200,
.fpo_vactive_max_blank_us = 1000,
- .enable_legacy_fast_update = false,
.disable_dc_mode_overwrite = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static struct dce_aux *dcn321_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
@@ -1797,6 +1800,7 @@ static bool dcn321_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index fff57f23f4f7..ef69898d2cc5 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -33,7 +33,7 @@
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn35_resource.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "dcn20/dcn20_resource.h"
#include "dcn30/dcn30_resource.h"
@@ -767,7 +767,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
- .enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = true,
.disable_idle_power_optimizations = false,
.dmcub_emulation = false,
@@ -788,6 +787,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.min_disp_clk_khz = 50000,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1946,6 +1949,7 @@ static bool dcn35_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 0abd163b425e..f3c614c4490c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -83,7 +83,7 @@
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "link_enc_cfg.h"
#define DC_LOGGER_INIT(logger)
@@ -747,7 +747,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
- .enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = true,
.disable_idle_power_optimizations = false,
.dmcub_emulation = false,
@@ -768,6 +767,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.min_disp_clk_khz = 50000,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1917,6 +1920,7 @@ static bool dcn351_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
index ca125ee6c2fb..6469d5fe2e6d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -11,7 +11,7 @@
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn36_resource.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "dcn20/dcn20_resource.h"
#include "dcn30/dcn30_resource.h"
@@ -748,7 +748,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
- .enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = true,
.disable_idle_power_optimizations = false,
.dmcub_emulation = false,
@@ -769,6 +768,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.min_disp_clk_khz = 50000,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1918,6 +1921,7 @@ static bool dcn36_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 1d18807e4749..875ae97489d3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -73,7 +73,7 @@
#include "dc_state_priv.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#define DC_LOGGER_INIT(logger)
@@ -721,7 +721,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 60000, // 60us
.disable_unbounded_requesting = false,
- .enable_legacy_fast_update = false,
.dcc_meta_propagation_delay_us = 10,
.fams_version = {
.minor = 1,
@@ -737,6 +736,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.force_cositing = CHROMA_COSITING_NONE + 1,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static struct dce_aux *dcn401_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
@@ -1668,7 +1671,7 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc,
dc_state_set_stream_cursor_subvp_limit(stream, context, true);
status = DC_FAIL_HW_CURSOR_SUPPORT;
}
- };
+ }
}
if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) {
@@ -1995,6 +1998,7 @@ static bool dcn401_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index 0fc66487d800..e1fa2e80a15a 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -227,7 +227,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
#define LE_DCN401_REG_LIST_RI(id) \
LE_DCN3_REG_LIST_RI(id), \
SRI_ARR(DP_DPHY_INTERNAL_CTRL, DP, id), \
- SRI_ARR(DIG_BE_CLK_CNTL, DIG, id)
+ SRI_ARR(DIG_BE_CLK_CNTL, DIG, id),\
+ SR_ARR(DIO_CLK_CNTL, id)
/* DPP */
#define DPP_REG_LIST_DCN401_COMMON_RI(id) \
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
index 21d842857601..88c11b6be004 100644
--- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
@@ -9,7 +9,7 @@
#include "dc.h"
#include "clk_mgr.h"
#include "soc_and_ip_translator.h"
-#include "dml2/dml21/inc/dml_top_soc_parameter_types.h"
+#include "dml2_0/dml21/inc/dml_top_soc_parameter_types.h"
void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator);
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
index b1fb0f8a253a..7a839984dbc0 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
@@ -1018,6 +1018,21 @@ static bool spl_get_optimal_number_of_taps(
spl_scratch->scl_data.taps.h_taps_c = 6;
spl_scratch->scl_data.taps.v_taps_c = 6;
}
+
+ /* Override mode: keep EASF enabled but use input taps if valid */
+ if (spl_in->override_easf) {
+ spl_scratch->scl_data.taps.h_taps = (in_taps->h_taps != 0) ? in_taps->h_taps : spl_scratch->scl_data.taps.h_taps;
+ spl_scratch->scl_data.taps.v_taps = (in_taps->v_taps != 0) ? in_taps->v_taps : spl_scratch->scl_data.taps.v_taps;
+ spl_scratch->scl_data.taps.h_taps_c = (in_taps->h_taps_c != 0) ? in_taps->h_taps_c : spl_scratch->scl_data.taps.h_taps_c;
+ spl_scratch->scl_data.taps.v_taps_c = (in_taps->v_taps_c != 0) ? in_taps->v_taps_c : spl_scratch->scl_data.taps.v_taps_c;
+
+ if ((spl_scratch->scl_data.taps.h_taps > 6) || (spl_scratch->scl_data.taps.v_taps > 6))
+ skip_easf = true;
+ if ((spl_scratch->scl_data.taps.h_taps > 1) && (spl_scratch->scl_data.taps.h_taps % 2))
+ spl_scratch->scl_data.taps.h_taps--;
+ if ((spl_scratch->scl_data.taps.h_taps_c > 1) && (spl_scratch->scl_data.taps.h_taps_c % 2))
+ spl_scratch->scl_data.taps.h_taps_c--;
+ }
}
/*Ensure we can support the requested number of vtaps*/
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
index 23d254dea18f..20e4e52a77ac 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
@@ -545,6 +545,7 @@ struct spl_in {
enum linear_light_scaling lls_pref; // Linear Light Scaling
bool prefer_easf;
bool disable_easf;
+ bool override_easf; /* If true, keep EASF enabled but use provided in_taps */
struct spl_debug debug;
bool is_fullscreen;
bool is_hdr_on;
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
index 6ffc74fc9dcd..ad088d70e189 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -44,11 +44,6 @@ static void virtual_stream_encoder_dvi_set_stream_attribute(
struct dc_crtc_timing *crtc_timing,
bool is_dual_link) {}
-static void virtual_stream_encoder_lvds_set_stream_attribute(
- struct stream_encoder *enc,
- struct dc_crtc_timing *crtc_timing)
-{}
-
static void virtual_stream_encoder_set_throttled_vcp_size(
struct stream_encoder *enc,
struct fixed31_32 avg_time_slots_per_mtp)
@@ -120,8 +115,6 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = {
virtual_stream_encoder_hdmi_set_stream_attribute,
.dvi_set_stream_attribute =
virtual_stream_encoder_dvi_set_stream_attribute,
- .lvds_set_stream_attribute =
- virtual_stream_encoder_lvds_set_stream_attribute,
.set_throttled_vcp_size =
virtual_stream_encoder_set_throttled_vcp_size,
.update_hdmi_info_packets =
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 338fdc651f2c..9d0168986fe7 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -132,6 +132,7 @@ enum dmub_window_id {
DMUB_WINDOW_IB_MEM,
DMUB_WINDOW_SHARED_STATE,
DMUB_WINDOW_LSDMA_BUFFER,
+ DMUB_WINDOW_CURSOR_OFFLOAD,
DMUB_WINDOW_TOTAL,
};
@@ -317,6 +318,7 @@ struct dmub_srv_hw_params {
bool enable_non_transparent_setconfig;
bool lower_hbr3_phy_ssc;
bool override_hbr3_pll_vco;
+ bool disable_dpia_bw_allocation;
};
/**
@@ -361,6 +363,19 @@ struct dmub_diagnostic_data {
uint8_t is_pwait : 1;
};
+/**
+ * struct dmub_preos_info - preos fw info before loading post os fw.
+ */
+struct dmub_preos_info {
+ uint64_t fb_base;
+ uint64_t fb_offset;
+ uint64_t trace_buffer_phy_addr;
+ uint32_t trace_buffer_size;
+ uint32_t fw_version;
+ uint32_t boot_status;
+ uint32_t boot_options;
+};
+
struct dmub_srv_inbox {
/* generic status */
uint64_t num_submitted;
@@ -486,6 +501,7 @@ struct dmub_srv_hw_funcs {
uint32_t (*get_current_time)(struct dmub_srv *dmub);
void (*get_diagnostic_data)(struct dmub_srv *dmub);
+ bool (*get_preos_fw_info)(struct dmub_srv *dmub);
bool (*should_detect)(struct dmub_srv *dmub);
void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx);
@@ -535,7 +551,8 @@ struct dmub_srv_create_params {
* @fw_version: the current firmware version, if any
* @is_virtual: false if hardware support only
* @shared_state: dmub shared state between firmware and driver
- * @fw_state: dmub firmware state pointer
+ * @cursor_offload_v1: Cursor offload state
+ * @fw_state: dmub firmware state pointer (debug purpose only)
*/
struct dmub_srv {
enum dmub_asic asic;
@@ -544,7 +561,9 @@ struct dmub_srv {
bool is_virtual;
struct dmub_fb scratch_mem_fb;
struct dmub_fb ib_mem_gart;
+ struct dmub_fb cursor_offload_fb;
volatile struct dmub_shared_state_feature_block *shared_state;
+ volatile struct dmub_cursor_offload_v1 *cursor_offload_v1;
volatile const struct dmub_fw_state *fw_state;
/* private: internal use only */
@@ -583,6 +602,7 @@ struct dmub_srv {
enum dmub_srv_power_state_type power_state;
struct dmub_diagnostic_data debug;
struct dmub_fb lsdma_rb_fb;
+ struct dmub_preos_info preos_info;
};
/**
@@ -1068,4 +1088,14 @@ enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
*/
enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub);
+/**
+ * dmub_srv_get_preos_info() - retrieves preos fw info
+ * @dmub: the dmub service
+ *
+ * Return:
+ * true - preos fw info retrieved successfully
+ * false - preos fw info not retrieved successfully
+ */
+bool dmub_srv_get_preos_info(struct dmub_srv *dmub);
+
#endif /* _DMUB_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 92248224b713..3f2a0ed02c59 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -485,7 +485,19 @@ union replay_debug_flags {
*/
uint32_t enable_visual_confirm_debug : 1;
- uint32_t reserved : 18;
+ /**
+ * 0x4000 (bit 14)
+ * @debug_log_enabled: Debug Log Enabled
+ */
+ uint32_t debug_log_enabled : 1;
+
+ /**
+ * 0x8000 (bit 15)
+ * @enable_sub_feature_visual_confirm: Enable Sub Feature Visual Confirm
+ */
+ uint32_t enable_sub_feature_visual_confirm : 1;
+
+ uint32_t reserved : 16;
} bitfields;
uint32_t u32All;
@@ -593,6 +605,104 @@ union replay_hw_flags {
uint32_t u32All;
};
+/**
+ * Flags that can be set by driver to change some Panel Replay behaviour.
+ */
+union pr_debug_flags {
+ struct {
+ /**
+ * 0x1 (bit 0)
+ * Enable visual confirm in FW.
+ */
+ uint32_t visual_confirm : 1;
+
+ /**
+ * 0x2 (bit 1)
+ * @skip_crc: Set if need to skip CRC.
+ */
+ uint32_t skip_crc : 1;
+
+ /**
+ * 0x4 (bit 2)
+ * @force_link_power_on: Force disable ALPM control
+ */
+ uint32_t force_link_power_on : 1;
+
+ /**
+ * 0x8 (bit 3)
+ * @force_phy_power_on: Force phy power on
+ */
+ uint32_t force_phy_power_on : 1;
+
+ /**
+ * 0x10 (bit 4)
+ * @skip_crtc_disabled: CRTC disable skipped
+ */
+ uint32_t skip_crtc_disabled : 1;
+
+ /*
+ * 0x20 (bit 5)
+ * @visual_confirm_rate_control: Enable Visual Confirm rate control detection
+ */
+ uint32_t visual_confirm_rate_control : 1;
+
+ uint32_t reserved : 26;
+ } bitfields;
+
+ uint32_t u32All;
+};
+
+union pr_hw_flags {
+ struct {
+ /**
+ * @allow_alpm_fw_standby_mode: To indicate whether the
+ * ALPM FW standby mode is allowed
+ */
+ uint32_t allow_alpm_fw_standby_mode : 1;
+
+ /*
+ * @dsc_enable_status: DSC enable status in driver
+ */
+ uint32_t dsc_enable_status : 1;
+
+ /**
+ * @fec_enable_status: receive fec enable/disable status from driver
+ */
+ uint32_t fec_enable_status : 1;
+
+ /*
+ * @smu_optimizations_en: SMU power optimization.
+ * Only when active display is Replay capable and display enters Replay.
+ * Trigger interrupt to SMU to powerup/down.
+ */
+ uint32_t smu_optimizations_en : 1;
+
+ /**
+ * @phy_power_state: Indicates current phy power state
+ */
+ uint32_t phy_power_state : 1;
+
+ /**
+ * @link_power_state: Indicates current link power state
+ */
+ uint32_t link_power_state : 1;
+ /**
+ * Use TPS3 signal when restore main link.
+ */
+ uint32_t force_wakeup_by_tps3 : 1;
+ /**
+ * @is_alpm_initialized: Indicates whether ALPM is initialized
+ */
+ uint32_t is_alpm_initialized : 1;
+ /**
+ * @alpm_mode: Indicates ALPM mode selected
+ */
+ uint32_t alpm_mode : 2;
+ } bitfields;
+
+ uint32_t u32All;
+};
+
union fw_assisted_mclk_switch_version {
struct {
uint8_t minor : 5;
@@ -617,6 +727,7 @@ struct dmub_feature_caps {
uint8_t replay_supported;
uint8_t replay_reserved[3];
uint8_t abm_aux_backlight_support;
+ uint8_t lsdma_support_in_dmu;
};
struct dmub_visual_confirm_color {
@@ -629,6 +740,112 @@ struct dmub_visual_confirm_color {
uint16_t panel_inst;
};
+/**
+ * struct dmub_cursor_offload_pipe_data_dcn30_v1 - DCN30+ per pipe data.
+ */
+struct dmub_cursor_offload_pipe_data_dcn30_v1 {
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS;
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH : 16;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y : 16;
+ uint32_t CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET : 13;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE : 3;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH : 2;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK : 5;
+ uint32_t reserved0[4];
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE : 1;
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE : 3;
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE : 1;
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN : 1;
+ uint32_t CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 : 24;
+ uint32_t CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 : 24;
+ uint32_t CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS : 16;
+ uint32_t CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE, : 16;
+ uint32_t reserved1[5];
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET : 8;
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST : 8;
+ uint32_t reserved2[3];
+};
+
+/**
+ * struct dmub_cursor_offload_pipe_data_dcn401_v1 - DCN401 per pipe data.
+ */
+struct dmub_cursor_offload_pipe_data_dcn401_v1 {
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS;
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH : 16;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y : 16;
+ uint32_t CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET : 13;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE : 3;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH : 2;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK : 5;
+ uint32_t reserved0[4];
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE : 1;
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_MODE : 3;
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE : 1;
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN : 1;
+ uint32_t CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 : 24;
+ uint32_t CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 : 24;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y : 16;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y, : 16;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB : 16;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB : 16;
+ uint32_t reserved1[4];
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET : 8;
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST : 8;
+ uint32_t HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR : 1;
+ uint32_t reserved2[3];
+};
+
+/**
+ * struct dmub_cursor_offload_pipe_data_v1 - Per pipe data for cursor offload.
+ */
+struct dmub_cursor_offload_pipe_data_v1 {
+ union {
+ struct dmub_cursor_offload_pipe_data_dcn30_v1 dcn30; /**< DCN30 cursor data. */
+ struct dmub_cursor_offload_pipe_data_dcn401_v1 dcn401; /**< DCN401 cursor data. */
+ uint8_t payload[96]; /**< Guarantees the cursor pipe data size per-pipe. */
+ };
+};
+
+/**
+ * struct dmub_cursor_offload_payload_data_v1 - A payload of stream data.
+ */
+struct dmub_cursor_offload_payload_data_v1 {
+ uint32_t write_idx_start; /**< Write index, updated before pipe_data is written. */
+ uint32_t write_idx_finish; /**< Write index, updated after pipe_data is written. */
+ uint32_t pipe_mask; /**< Mask of pipes to update. */
+ uint32_t reserved; /**< Reserved for future use. */
+ struct dmub_cursor_offload_pipe_data_v1 pipe_data[6]; /**< Per-pipe cursor data. */
+};
+
+/**
+ * struct dmub_cursor_offload_stream_v1 - Per-stream data for cursor offload.
+ */
+struct dmub_cursor_offload_stream_v1 {
+ struct dmub_cursor_offload_payload_data_v1 payloads[4]; /**< A small buffer of cursor payloads. */
+ uint32_t write_idx; /**< The index of the last written payload. */
+};
+
+/**
+ * struct dmub_cursor_offload_v1 - Cursor offload feature state.
+ */
+struct dmub_cursor_offload_v1 {
+ struct dmub_cursor_offload_stream_v1 offload_streams[6]; /**< Per-stream cursor offload data */
+};
+
//==============================================================================
//</DMUB_TYPES>=================================================================
//==============================================================================
@@ -648,7 +865,8 @@ struct dmub_visual_confirm_color {
union dmub_fw_meta_feature_bits {
struct {
uint32_t shared_state_link_detection : 1; /**< 1 supports link detection via shared state */
- uint32_t reserved : 31;
+ uint32_t cursor_offload_v1_support: 1; /**< 1 supports cursor offload */
+ uint32_t reserved : 30;
} bits; /**< status bits */
uint32_t all; /**< 32-bit access to status bits */
};
@@ -814,6 +1032,28 @@ enum dmub_ips_comand_type {
};
/**
+ * enum dmub_cursor_offload_comand_type - Cursor offload subcommands.
+ */
+enum dmub_cursor_offload_comand_type {
+ /**
+ * Initializes the cursor offload feature.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_INIT = 0,
+ /**
+ * Enables cursor offloading for a stream and updates the timing parameters.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE = 1,
+ /**
+ * Disables cursor offloading for a given stream.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE = 2,
+ /**
+ * Programs the latest data for a given stream.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM = 3,
+};
+
+/**
* union dmub_fw_boot_options - Boot option definitions for SCRATCH14
*/
union dmub_fw_boot_options {
@@ -844,7 +1084,8 @@ union dmub_fw_boot_options {
uint32_t disable_sldo_opt: 1; /**< 1 to disable SLDO optimizations */
uint32_t lower_hbr3_phy_ssc: 1; /**< 1 to lower hbr3 phy ssc to 0.125 percent */
uint32_t override_hbr3_pll_vco: 1; /**< 1 to override the hbr3 pll vco to 0 */
- uint32_t reserved : 5; /**< reserved */
+ uint32_t disable_dpia_bw_allocation: 1; /**< 1 to disable the USB4 DPIA BW allocation */
+ uint32_t reserved : 4; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -877,6 +1118,7 @@ enum dmub_shared_state_feature_id {
DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1,
DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2,
DMUB_SHARED_SHARE_FEATURE__DEBUG_SETUP = 3,
+ DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1 = 4,
DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */
};
@@ -958,6 +1200,22 @@ struct dmub_shared_state_ips_driver {
}; /* 248-bytes, fixed */
/**
+ * struct dmub_shared_state_cursor_offload_v1 - Header metadata for cursor offload.
+ */
+struct dmub_shared_state_cursor_offload_stream_v1 {
+ uint32_t last_write_idx; /**< Last write index */
+ uint8_t reserved[28]; /**< Reserved bytes. */
+}; /* 32-bytes, fixed */
+
+/**
+ * struct dmub_shared_state_cursor_offload_v1 - Header metadata for cursor offload.
+ */
+struct dmub_shared_state_cursor_offload_v1 {
+ struct dmub_shared_state_cursor_offload_stream_v1 offload_streams[6]; /**< stream state, 32-bytes each */
+ uint8_t reserved[56]; /**< reserved for future use */
+}; /* 248-bytes, fixed */
+
+/**
* enum dmub_shared_state_feature_common - Generic payload.
*/
struct dmub_shared_state_feature_common {
@@ -983,6 +1241,7 @@ struct dmub_shared_state_feature_block {
struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */
struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */
struct dmub_shared_state_debug_setup debug_setup; /**< Debug setup */
+ struct dmub_shared_state_cursor_offload_v1 cursor_offload_v1; /**< Cursor offload */
} data; /**< Shared state data. */
}; /* 256-bytes, fixed */
@@ -1572,6 +1831,25 @@ enum dmub_cmd_type {
*/
DMUB_CMD__IPS = 91,
+ /**
+ * Command type use for Cursor offload.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD = 92,
+
+ /**
+ * Command type used for all SMART_POWER_OLED commands.
+ */
+ DMUB_CMD__SMART_POWER_OLED = 93,
+
+ /**
+ * Command type use for all Panel Replay commands.
+ */
+ DMUB_CMD__PR = 94,
+
+
+ /**
+ * Command type use for VBIOS shared commands.
+ */
DMUB_CMD__VBIOS = 128,
};
@@ -2369,6 +2647,7 @@ struct dmub_cmd_fams2_global_config {
union dmub_cmd_fams2_config {
struct dmub_cmd_fams2_global_config global;
+// coverity[cert_dcl37_c_violation:FALSE] errno.h, stddef.h, stdint.h not included in atombios.h
struct dmub_fams2_stream_static_state stream; //v0
union {
struct dmub_fams2_cmd_stream_static_base_state base;
@@ -3981,6 +4260,33 @@ enum replay_state {
};
/**
+ * Definition of a panel replay state
+ */
+enum pr_state {
+ PR_STATE_0 = 0x00, // State 0 steady state
+ // Pending SDP and Unlock before back to State 0
+ PR_STATE_0_PENDING_SDP_AND_UNLOCK = 0x01,
+ PR_STATE_1 = 0x10, // State 1
+ PR_STATE_2 = 0x20, // State 2 steady state
+ // Pending frame transmission before transition to State 2
+ PR_STATE_2_PENDING_FRAME_TRANSMISSION = 0x30,
+ // Active and Powered Up
+ PR_STATE_2_POWERED = 0x31,
+ // Active and Powered Down, but need to blank HUBP after DPG_EN latch
+ PR_STATE_2_PENDING_HUBP_BLANK = 0x32,
+ // Active and Pending Power Up
+ PR_STATE_2_PENDING_POWER_UP = 0x33,
+ // Active and Powered Up, Pending DPG latch
+ PR_STATE_2_PENDING_LOCK_FOR_DPG_POWER_ON = 0x34,
+ // Active and Powered Up, Pending SDP and Unlock
+ PR_STATE_2_PENDING_SDP_AND_UNLOCK = 0x35,
+ // Pending transmission of AS SDP for timing sync, but no rfb update
+ PR_STATE_2_PENDING_AS_SDP = 0x36,
+ // Invalid
+ PR_STATE_INVALID = 0xFF,
+};
+
+/**
* Replay command sub-types.
*/
enum dmub_cmd_replay_type {
@@ -4030,6 +4336,25 @@ enum dmub_cmd_replay_type {
DMUB_CMD__REPLAY_SET_GENERAL_CMD = 16,
};
+/*
+ * Panel Replay sub-types
+ */
+enum dmub_cmd_panel_replay_type {
+ DMUB_CMD__PR_ENABLE = 0,
+ DMUB_CMD__PR_COPY_SETTINGS = 1,
+ DMUB_CMD__PR_UPDATE_STATE = 2,
+ DMUB_CMD__PR_GENERAL_CMD = 3,
+};
+
+enum dmub_cmd_panel_replay_state_update_subtype {
+ PR_STATE_UPDATE_COASTING_VTOTAL = 0x1,
+ PR_STATE_UPDATE_SYNC_MODE = 0x2,
+};
+
+enum dmub_cmd_panel_replay_general_subtype {
+ PR_GENERAL_CMD_DEBUG_OPTION = 0x1,
+};
+
/**
* Replay general command sub-types.
*/
@@ -4045,6 +4370,7 @@ enum dmub_cmd_replay_general_subtype {
REPLAY_GENERAL_CMD_DISABLED_DESYNC_ERROR_DETECTION,
REPLAY_GENERAL_CMD_UPDATE_ERROR_STATUS,
REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE,
+ REPLAY_GENERAL_CMD_VIDEO_CONFERENCING,
};
struct dmub_alpm_auxless_data {
@@ -4182,17 +4508,13 @@ struct dmub_cmd_replay_set_version_data {
*/
uint8_t panel_inst;
/**
- * PSR version that FW should implement.
+ * Replay version that FW should implement.
*/
enum replay_version version;
/**
- * PSR control version.
- */
- uint8_t cmd_version;
- /**
* Explicit padding to 4 byte boundary.
*/
- uint8_t pad[2];
+ uint8_t pad[3];
};
/**
@@ -4238,6 +4560,45 @@ enum replay_enable {
};
/**
+ * Data passed from driver to FW in a DMUB_CMD__SMART_POWER_OLED_ENABLE command.
+ */
+struct dmub_rb_cmd_smart_power_oled_enable_data {
+ /**
+ * SMART_POWER_OLED enable or disable.
+ */
+ uint8_t enable;
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+
+ uint16_t peak_nits;
+ /**
+ * OTG HW instance.
+ */
+ uint8_t otg_inst;
+ /**
+ * DIG FE HW instance.
+ */
+ uint8_t digfe_inst;
+ /**
+ * DIG BE HW instance.
+ */
+ uint8_t digbe_inst;
+ uint8_t debugcontrol;
+ /*
+ * vertical interrupt trigger line
+ */
+ uint32_t triggerline;
+
+ uint16_t fixed_max_cll;
+
+ uint8_t pad[2];
+};
+
+/**
* Data passed from driver to FW in a DMUB_CMD__REPLAY_ENABLE command.
*/
struct dmub_rb_cmd_replay_enable_data {
@@ -4408,9 +4769,9 @@ struct dmub_cmd_replay_set_coasting_vtotal_data {
*/
uint16_t coasting_vtotal_high;
/**
- * Explicit padding to 4 byte boundary.
+ * frame skip number.
*/
- uint8_t pad[2];
+ uint16_t frame_skip_number;
};
/**
@@ -4571,6 +4932,58 @@ union dmub_replay_cmd_set {
};
/**
+ * SMART POWER OLED command sub-types.
+ */
+enum dmub_cmd_smart_power_oled_type {
+
+ /**
+ * Enable/Disable SMART_POWER_OLED.
+ */
+ DMUB_CMD__SMART_POWER_OLED_ENABLE = 1,
+ /**
+ * Get current MaxCLL value if SMART POWER OLED is enabled.
+ */
+ DMUB_CMD__SMART_POWER_OLED_GETMAXCLL = 2,
+};
+
+/**
+ * Definition of a DMUB_CMD__SMART_POWER_OLED command.
+ */
+struct dmub_rb_cmd_smart_power_oled_enable {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ struct dmub_rb_cmd_smart_power_oled_enable_data data;
+};
+
+struct dmub_cmd_smart_power_oled_getmaxcll_input {
+ uint8_t panel_inst;
+ uint8_t pad[3];
+};
+
+struct dmub_cmd_smart_power_oled_getmaxcll_output {
+ uint16_t current_max_cll;
+ uint8_t pad[2];
+};
+
+/**
+ * Definition of a DMUB_CMD__SMART_POWER_OLED command.
+ */
+struct dmub_rb_cmd_smart_power_oled_getmaxcll {
+ struct dmub_cmd_header header; /**< Command header */
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__SMART_POWER_OLED_GETMAXCLL command.
+ */
+ union dmub_cmd_smart_power_oled_getmaxcll_data {
+ struct dmub_cmd_smart_power_oled_getmaxcll_input input; /**< Input */
+ struct dmub_cmd_smart_power_oled_getmaxcll_output output; /**< Output */
+ uint32_t output_raw; /**< Raw data output */
+ } data;
+};
+
+/**
* Set of HW components that can be locked.
*
* Note: If updating with more HW components, fields
@@ -4652,6 +5065,7 @@ enum hw_lock_client {
*/
HW_LOCK_CLIENT_REPLAY = 4,
HW_LOCK_CLIENT_FAMS2 = 5,
+ HW_LOCK_CLIENT_CURSOR_OFFLOAD = 6,
/**
* Invalid client.
*/
@@ -6064,6 +6478,257 @@ struct dmub_rb_cmd_ips_query_residency_info {
};
/**
+ * struct dmub_cmd_cursor_offload_init_data - Payload for cursor offload init command.
+ */
+struct dmub_cmd_cursor_offload_init_data {
+ union dmub_addr state_addr; /**< State address for dmub_cursor_offload */
+ uint32_t state_size; /**< State size for dmub_cursor_offload */
+};
+
+/**
+ * struct dmub_rb_cmd_cursor_offload_init - Data for initializing cursor offload.
+ */
+struct dmub_rb_cmd_cursor_offload_init {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_cursor_offload_init_data init_data;
+};
+
+/**
+ * struct dmub_cmd_cursor_offload_stream_data - Payload for cursor offload stream command.
+ */
+struct dmub_cmd_cursor_offload_stream_data {
+ uint32_t otg_inst: 4; /**< OTG instance to control */
+ uint32_t reserved: 28; /**< Reserved for future use */
+ uint32_t line_time_in_ns; /**< Line time in ns for the OTG */
+ uint32_t v_total_max; /**< OTG v_total_max */
+};
+
+/**
+ * struct dmub_rb_cmd_cursor_offload_stream_cntl - Controls a stream for cursor offload.
+ */
+struct dmub_rb_cmd_cursor_offload_stream_cntl {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_cursor_offload_stream_data data;
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__PR_ENABLE command.
+ */
+struct dmub_cmd_pr_enable_data {
+ /**
+ * Panel Replay enable or disable.
+ */
+ uint8_t enable;
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Phy state to enter.
+ * Values to use are defined in dmub_phy_fsm_state
+ */
+ uint8_t phy_fsm_state;
+ /**
+ * Phy rate for DP - RBR/HBR/HBR2/HBR3.
+ * Set this using enum phy_link_rate.
+ * This does not support HDMI/DP2 for now.
+ */
+ uint8_t phy_rate;
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
+ * @pad: Align structure to 4 byte boundary.
+ */
+ uint8_t pad[2];
+};
+
+/**
+ * Definition of a DMUB_CMD__PR_ENABLE command.
+ * Panel Replay enable/disable is controlled using action in data.
+ */
+struct dmub_rb_cmd_pr_enable {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ struct dmub_cmd_pr_enable_data data;
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__PR_COPY_SETTINGS command.
+ */
+struct dmub_cmd_pr_copy_settings_data {
+ /**
+ * Flags that can be set by driver to change some replay behaviour.
+ */
+ union pr_debug_flags debug;
+
+ /**
+ * @flags: Flags used to determine feature functionality.
+ */
+ union pr_hw_flags flags;
+
+ /**
+ * DPP HW instance.
+ */
+ uint8_t dpp_inst;
+ /**
+ * OTG HW instance.
+ */
+ uint8_t otg_inst;
+ /**
+ * DIG FE HW instance.
+ */
+ uint8_t digfe_inst;
+ /**
+ * DIG BE HW instance.
+ */
+ uint8_t digbe_inst;
+ /**
+ * AUX HW instance.
+ */
+ uint8_t aux_inst;
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Length of each horizontal line in ns.
+ */
+ uint32_t line_time_in_ns;
+ /**
+ * PHY instance.
+ */
+ uint8_t dpphy_inst;
+ /**
+ * Determines if SMU optimzations are enabled/disabled.
+ */
+ uint8_t smu_optimizations_en;
+ /*
+ * Use FSM state for Replay power up/down
+ */
+ uint8_t use_phy_fsm;
+ /*
+ * Use FSFT afftet pixel clk
+ */
+ uint32_t pix_clk_100hz;
+ /*
+ * Use Original pixel clock
+ */
+ uint32_t sink_pix_clk_100hz;
+ /**
+ * Use for AUX-less ALPM LFPS wake operation
+ */
+ struct dmub_alpm_auxless_data auxless_alpm_data;
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
+ * @pad: Align structure to 4 byte boundary.
+ */
+ uint8_t pad[2];
+};
+
+/**
+ * Definition of a DMUB_CMD__PR_COPY_SETTINGS command.
+ */
+struct dmub_rb_cmd_pr_copy_settings {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__PR_COPY_SETTINGS command.
+ */
+ struct dmub_cmd_pr_copy_settings_data data;
+};
+
+struct dmub_cmd_pr_update_state_data {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+
+ uint8_t pad[3]; // align to 4-byte boundary
+ /*
+ * Update flags to control the update behavior.
+ */
+ uint32_t update_flag;
+ /**
+ * state/data to set.
+ */
+ uint32_t coasting_vtotal;
+ uint32_t sync_mode;
+};
+
+struct dmub_cmd_pr_general_cmd_data {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * subtype: PR general cmd sub type
+ */
+ uint8_t subtype;
+
+ uint8_t pad[2];
+ /**
+ * config data by different subtypes
+ */
+ union {
+ uint32_t u32All;
+ } data;
+};
+
+/**
+ * Definition of a DMUB_CMD__PR_UPDATE_STATE command.
+ */
+struct dmub_rb_cmd_pr_update_state {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__PR_UPDATE_STATE command.
+ */
+ struct dmub_cmd_pr_update_state_data data;
+};
+
+/**
+ * Definition of a DMUB_CMD__PR_GENERAL_CMD command.
+ */
+struct dmub_rb_cmd_pr_general_cmd {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__PR_GENERAL_CMD command.
+ */
+ struct dmub_cmd_pr_general_cmd_data data;
+};
+
+/**
* union dmub_rb_cmd - DMUB inbox command.
*/
union dmub_rb_cmd {
@@ -6392,6 +7057,38 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_ips_residency_cntl ips_residency_cntl;
struct dmub_rb_cmd_ips_query_residency_info ips_query_residency_info;
+ /**
+ * Definition of a DMUB_CMD__CURSOR_OFFLOAD_INIT command.
+ */
+ struct dmub_rb_cmd_cursor_offload_init cursor_offload_init;
+ /**
+ * Definition of a DMUB_CMD__CURSOR_OFFLOAD control commands.
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_UPDATE_DRR
+ */
+ struct dmub_rb_cmd_cursor_offload_stream_cntl cursor_offload_stream_ctnl;
+ /**
+ * Definition of a DMUB_CMD__SMART_POWER_OLED_ENABLE command.
+ */
+ struct dmub_rb_cmd_smart_power_oled_enable smart_power_oled_enable;
+ /**
+ * Definition of a DMUB_CMD__DMUB_CMD__SMART_POWER_OLED_GETMAXCLL command.
+ */
+ struct dmub_rb_cmd_smart_power_oled_getmaxcll smart_power_oled_getmaxcll;
+ /*
+ * Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
+ */
+ struct dmub_rb_cmd_pr_copy_settings pr_copy_settings;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_ENABLE command.
+ */
+ struct dmub_rb_cmd_pr_enable pr_enable;
+
+ struct dmub_rb_cmd_pr_update_state pr_update_state;
+
+ struct dmub_rb_cmd_pr_general_cmd pr_general_cmd;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 4777c7203b2c..cd04d7c756c3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -380,6 +380,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.override_hbr3_pll_vco = params->override_hbr3_pll_vco;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
+ boot_options.bits.disable_dpia_bw_allocation = params->disable_dpia_bw_allocation;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index ce041f6239dc..7e9856289910 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -89,50 +89,58 @@ static inline void dmub_dcn32_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn32_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 100000;
- uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
+ const uint32_t timeout_us = 1 * 1000 * 1000; //1s
+ const uint32_t poll_delay_us = 1; //1us
+ uint32_t i = 0;
+ uint32_t enabled, in_reset, scratch, pwait_mode;
- REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
- REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
+ REG_GET(DMCUB_CNTL,
+ DMCUB_ENABLE, &enabled);
+ REG_GET(DMCUB_CNTL2,
+ DMCUB_SOFT_RESET, &in_reset);
- if (in_reset == 0 && is_enabled != 0) {
+ if (enabled && in_reset == 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
- for (i = 0; i < timeout; ++i) {
- if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
- break;
-
- udelay(1);
- }
-
- for (i = 0; i < timeout; ++i) {
+ for (; i < timeout_us; i++) {
scratch = REG_READ(DMCUB_SCRATCH7);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
- udelay(1);
+ udelay(poll_delay_us);
}
- for (i = 0; i < timeout; ++i) {
+ for (; i < timeout_us; i++) {
REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode);
if (pwait_mode & (1 << 0))
break;
- udelay(1);
+ udelay(poll_delay_us);
}
- /* Force reset in case we timed out, DMCUB is likely hung. */
}
- if (is_enabled) {
+ if (enabled) {
REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
udelay(1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
}
+ if (i >= timeout_us) {
+ /* timeout should never occur */
+ BREAK_TO_DEBUGGER();
+ }
+
+ REG_UPDATE(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE, 0);
+
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
@@ -141,7 +149,7 @@ void dmub_dcn32_reset(struct dmub_srv *dmub)
REG_WRITE(DMCUB_OUTBOX0_WPTR, 0);
REG_WRITE(DMCUB_SCRATCH0, 0);
- /* Clear the GPINT command manually so we don't send anything during boot. */
+ /* Clear the GPINT command manually so we don't reset again. */
cmd.all = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
}
@@ -163,7 +171,9 @@ void dmub_dcn32_backdoor_load(struct dmub_srv *dmub,
dmub_dcn32_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
dmub_dcn32_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
@@ -193,7 +203,9 @@ void dmub_dcn32_backdoor_load_zfb_mode(struct dmub_srv *dmub,
{
union dmub_addr offset;
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
offset = cw0->offset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 834e5434ccb8..e13557ed97be 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -418,6 +418,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.disable_sldo_opt = params->disable_sldo_opt;
boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig;
boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc;
+ boot_options.bits.disable_dpia_bw_allocation = params->disable_dpia_bw_allocation;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
@@ -520,6 +521,45 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub)
dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
+
+bool dmub_dcn35_get_preos_fw_info(struct dmub_srv *dmub)
+{
+ uint64_t region3_cw5_offset;
+ uint32_t top_addr, top_addr_enable, offset_low;
+ uint32_t offset_high, base_addr, fw_version;
+ bool is_vbios_fw = false;
+
+ memset(&dmub->preos_info, 0, sizeof(dmub->preos_info));
+
+ fw_version = REG_READ(DMCUB_SCRATCH1);
+ is_vbios_fw = ((fw_version >> 6) & 0x01) ? true : false;
+ if (!is_vbios_fw)
+ return false;
+
+ dmub->preos_info.boot_status = REG_READ(DMCUB_SCRATCH0);
+ dmub->preos_info.fw_version = REG_READ(DMCUB_SCRATCH1);
+ dmub->preos_info.boot_options = REG_READ(DMCUB_SCRATCH14);
+ REG_GET(DMCUB_REGION3_CW5_TOP_ADDRESS,
+ DMCUB_REGION3_CW5_ENABLE, &top_addr_enable);
+ if (top_addr_enable) {
+ dmub_dcn35_get_fb_base_offset(dmub,
+ &dmub->preos_info.fb_base, &dmub->preos_info.fb_offset);
+ offset_low = REG_READ(DMCUB_REGION3_CW5_OFFSET);
+ offset_high = REG_READ(DMCUB_REGION3_CW5_OFFSET_HIGH);
+ region3_cw5_offset = ((uint64_t)offset_high << 32) | offset_low;
+ dmub->preos_info.trace_buffer_phy_addr = region3_cw5_offset
+ - dmub->preos_info.fb_base + dmub->preos_info.fb_offset;
+
+ REG_GET(DMCUB_REGION3_CW5_TOP_ADDRESS,
+ DMCUB_REGION3_CW5_TOP_ADDRESS, &top_addr);
+ base_addr = REG_READ(DMCUB_REGION3_CW5_BASE_ADDRESS) & 0x1FFFFFFF;
+ dmub->preos_info.trace_buffer_size =
+ (top_addr > base_addr) ? (top_addr - base_addr + 1) : 0;
+ }
+
+ return true;
+}
+
void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
/* DMCUB_REGION3_TMR_AXI_SPACE values:
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
index 39fcb7275da5..92e6695a2c9b 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
@@ -285,4 +285,6 @@ bool dmub_dcn35_is_hw_powered_up(struct dmub_srv *dmub);
void dmub_srv_dcn35_regs_init(struct dmub_srv *dmub, struct dc_context *ctx);
+bool dmub_dcn35_get_preos_fw_info(struct dmub_srv *dmub);
+
#endif /* _DMUB_DCN35_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index b31adbd0d685..95542299e3b3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -81,7 +81,7 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
dmub->hw_funcs.set_gpint(dmub, cmd);
for (; i < timeout_us; i++) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ scratch = REG_READ(DMCUB_SCRATCH7);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
@@ -97,11 +97,24 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
}
}
+ if (enabled) {
+ REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+ udelay(1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ }
+
if (i >= timeout_us) {
/* timeout should never occur */
BREAK_TO_DEBUGGER();
}
+ REG_UPDATE(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE, 0);
+
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
@@ -134,7 +147,6 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub,
/* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
dmub_dcn401_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
@@ -168,7 +180,6 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub,
/* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
offset = cw0->offset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index b17a19400c06..a6ae1d2e9685 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -66,7 +66,7 @@
#define DMUB_SCRATCH_MEM_SIZE (1024)
/* Default indirect buffer size. */
-#define DMUB_IB_MEM_SIZE (1280)
+#define DMUB_IB_MEM_SIZE (2560)
/* Default LSDMA ring buffer size. */
#define DMUB_LSDMA_RB_SIZE (64 * 1024)
@@ -359,6 +359,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_current_time = dmub_dcn35_get_current_time;
funcs->get_diagnostic_data = dmub_dcn35_get_diagnostic_data;
+ funcs->get_preos_fw_info = dmub_dcn35_get_preos_fw_info;
funcs->init_reg_offsets = dmub_srv_dcn35_regs_init;
if (asic == DMUB_ASIC_DCN351)
@@ -564,10 +565,11 @@ enum dmub_status
window_sizes[DMUB_WINDOW_4_MAILBOX] = DMUB_MAILBOX_SIZE;
window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
- window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
+ window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = dmub_align(DMUB_SCRATCH_MEM_SIZE, 64);
window_sizes[DMUB_WINDOW_IB_MEM] = DMUB_IB_MEM_SIZE;
window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size);
window_sizes[DMUB_WINDOW_LSDMA_BUFFER] = DMUB_LSDMA_RB_SIZE;
+ window_sizes[DMUB_WINDOW_CURSOR_OFFLOAD] = dmub_align(sizeof(struct dmub_cursor_offload_v1), 64);
out->fb_size =
dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
@@ -652,21 +654,22 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
- struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
- struct dmub_fb *ib_mem_gart = params->fb[DMUB_WINDOW_IB_MEM];
struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE];
struct dmub_rb_init_params rb_params, outbox0_rb_params;
struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6;
struct dmub_region inbox1, outbox1, outbox0;
+ uint32_t i;
+
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
- if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb ||
- !tracebuff_fb || !fw_state_fb || !scratch_mem_fb || !ib_mem_gart) {
- ASSERT(0);
- return DMUB_STATUS_INVALID;
+ for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) {
+ if (!params->fb[i]) {
+ ASSERT(0);
+ return DMUB_STATUS_INVALID;
+ }
}
dmub->fb_base = params->fb_base;
@@ -748,9 +751,11 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->shared_state = shared_state_fb->cpu_addr;
- dmub->scratch_mem_fb = *scratch_mem_fb;
+ dmub->scratch_mem_fb = *params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
+ dmub->ib_mem_gart = *params->fb[DMUB_WINDOW_IB_MEM];
- dmub->ib_mem_gart = *ib_mem_gart;
+ dmub->cursor_offload_fb = *params->fb[DMUB_WINDOW_CURSOR_OFFLOAD];
+ dmub->cursor_offload_v1 = (struct dmub_cursor_offload_v1 *)dmub->cursor_offload_fb.cpu_addr;
if (dmub->hw_funcs.setup_windows)
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, &region6);
@@ -1368,3 +1373,11 @@ enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub)
return DMUB_STATUS_OK;
}
+
+bool dmub_srv_get_preos_info(struct dmub_srv *dmub)
+{
+ if (!dmub || !dmub->hw_funcs.get_preos_fw_info)
+ return false;
+
+ return dmub->hw_funcs.get_preos_fw_info(dmub);
+}
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
index 812377d9e48f..973b6bdbac63 100644
--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -135,12 +135,8 @@ struct bp_external_encoder_control {
struct bp_crtc_source_select {
enum engine_id engine_id;
enum controller_id controller_id;
- /* from GPU Tx aka asic_signal */
- enum signal_type signal;
- /* sink_signal may differ from asicSignal if Translator encoder */
enum signal_type sink_signal;
- enum display_output_bit_depth display_output_bit_depth;
- bool enable_dp_audio;
+ uint8_t bit_depth;
};
struct bp_transmitter_control {
@@ -166,6 +162,11 @@ struct bp_transmitter_control {
bool single_pll_mode;
};
+struct bp_load_detection_parameters {
+ enum engine_id engine_id;
+ uint16_t device_id;
+};
+
struct bp_hw_crtc_timing_parameters {
enum controller_id controller_id;
/* horizontal part */
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index de8f3cfed6c8..07b937b92efc 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -30,6 +30,22 @@
#ifndef DP_SINK_HW_REVISION_START // can remove this once the define gets into linux drm_dp_helper.h
#define DP_SINK_HW_REVISION_START 0x409
#endif
+/* Panel Replay*/
+#ifndef DP_PANEL_REPLAY_CAPABILITY_SUPPORT // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PANEL_REPLAY_CAPABILITY_SUPPORT 0x0b0
+#endif /* DP_PANEL_REPLAY_CAPABILITY_SUPPORT */
+#ifndef DP_PANEL_REPLAY_CAPABILITY // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PANEL_REPLAY_CAPABILITY 0x0b1
+#endif /* DP_PANEL_REPLAY_CAPABILITY */
+#ifndef DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 0x1b0
+#endif /* DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 */
+#ifndef DP_PANEL_REPLAY_ENABLE // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PANEL_REPLAY_ENABLE (1 << 0)
+#endif /* DP_PANEL_REPLAY_ENABLE */
+#ifndef DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 0x1b1
+#endif /* DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 */
enum dpcd_revision {
DPCD_REV_10 = 0x10,
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index cc467031651d..38a77fa9b4af 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -169,6 +169,7 @@ struct dc_firmware_info {
uint32_t engine_clk_ss_percentage;
} feature;
+ uint32_t max_pixel_clock; /* in KHz */
uint32_t default_display_engine_pll_frequency; /* in KHz */
uint32_t external_clock_source_frequency_for_dp; /* in KHz */
uint32_t smu_gpu_pll_output_freq; /* in KHz */
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index 54e33062b3c0..1386fa124e85 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -310,4 +310,11 @@ static inline bool dal_graphics_object_id_equal(
}
return false;
}
+
+static inline bool dc_connector_supports_analog(const enum connector_id conn)
+{
+ return conn == CONNECTOR_ID_VGA ||
+ conn == CONNECTOR_ID_SINGLE_LINK_DVII ||
+ conn == CONNECTOR_ID_DUAL_LINK_DVII;
+}
#endif
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index a10d6b988aab..3a2c2d2fb629 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -118,6 +118,18 @@ static inline bool dc_is_dvi_signal(enum signal_type signal)
}
}
+/**
+ * dc_is_rgb_signal() - Whether the signal is analog RGB.
+ *
+ * Returns whether the given signal type is an analog RGB signal
+ * that is used with a DAC on VGA or DVI-I connectors.
+ * Not to be confused with other uses of "RGB", such as RGB color space.
+ */
+static inline bool dc_is_rgb_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_RGB);
+}
+
static inline bool dc_is_tmds_signal(enum signal_type signal)
{
switch (signal) {
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index ce421bcddcb0..1aae46d703ba 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -1260,6 +1260,17 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
update_v_total_for_static_ramp(
core_freesync, stream, in_out_vrr);
}
+
+ /*
+ * If VRR is inactive, set vtotal min and max to nominal vtotal
+ */
+ if (in_out_vrr->state == VRR_STATE_INACTIVE) {
+ in_out_vrr->adjust.v_total_min =
+ mod_freesync_calc_v_total_from_refresh(stream,
+ in_out_vrr->max_refresh_in_uhz);
+ in_out_vrr->adjust.v_total_max = in_out_vrr->adjust.v_total_min;
+ return;
+ }
}
unsigned long long mod_freesync_calc_nominal_field_rate(
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index c760216a6240..ca402ddcdacc 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -354,7 +354,7 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
/* reset retry counters */
reset_retry_counts(hdcp);
- /* reset error trace */
+ /* reset trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
/* add display to connection */
@@ -400,7 +400,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
/* clear retry counters */
reset_retry_counts(hdcp);
- /* reset error trace */
+ /* reset trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
/* remove display */
@@ -464,7 +464,7 @@ enum mod_hdcp_status mod_hdcp_update_display(struct mod_hdcp *hdcp,
/* clear retry counters */
reset_retry_counts(hdcp);
- /* reset error trace */
+ /* reset trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
/* set new adjustment */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index a37634942b07..26a351a184f3 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -88,6 +88,7 @@ struct mod_hdcp_transition_input_hdcp2 {
uint8_t lc_init_write;
uint8_t l_prime_available_poll;
uint8_t l_prime_read;
+ uint8_t l_prime_combo_read;
uint8_t l_prime_validation;
uint8_t eks_prepare;
uint8_t eks_write;
@@ -508,7 +509,7 @@ static inline void set_auth_complete(struct mod_hdcp *hdcp,
struct mod_hdcp_output *output)
{
output->auth_complete = 1;
- mod_hdcp_log_ddc_trace(hdcp);
+ HDCP_AUTH_COMPLETE_TRACE(hdcp);
}
/* connection topology helpers */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 8bc377560787..1bbd728d4345 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -29,6 +29,7 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
{
uint64_t n = 0;
uint8_t count = 0;
+ enum mod_hdcp_status status;
u8 bksv[sizeof(n)] = { };
memcpy(bksv, hdcp->auth.msg.hdcp1.bksv, sizeof(hdcp->auth.msg.hdcp1.bksv));
@@ -38,8 +39,14 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
count++;
n &= (n - 1);
}
- return (count == 20) ? MOD_HDCP_STATUS_SUCCESS :
- MOD_HDCP_STATUS_HDCP1_INVALID_BKSV;
+
+ if (count == 20) {
+ hdcp->connection.trace.hdcp1.attempt_count++;
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else {
+ status = MOD_HDCP_STATUS_HDCP1_INVALID_BKSV;
+ }
+ return status;
}
static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp)
@@ -135,6 +142,8 @@ static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
if (get_device_count(hdcp) == 0)
return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
+ hdcp->connection.trace.hdcp1.downstream_device_count = get_device_count(hdcp);
+
/* Some MST display may choose to report the internal panel as an HDCP RX.
* To update this condition with 1(because the immediate repeater's internal
* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp).
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index bb8ae80b37f8..27500abf9fee 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -48,6 +48,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp
static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
+ struct mod_hdcp_trace *trace = &hdcp->connection.trace;
if (is_dp_hdcp(hdcp))
status = (hdcp->auth.msg.hdcp2.rxcaps_dp[0] == HDCP_2_2_RX_CAPS_VERSION_VAL) &&
@@ -55,9 +56,14 @@ static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp)
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
else
- status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & HDCP_2_2_HDMI_SUPPORT_MASK) ?
- MOD_HDCP_STATUS_SUCCESS :
- MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
+ status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi
+ & HDCP_2_2_HDMI_SUPPORT_MASK)
+ ? MOD_HDCP_STATUS_SUCCESS
+ : MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
+
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+ trace->hdcp2.attempt_count++;
+
return status;
}
@@ -201,10 +207,17 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
+ struct mod_hdcp_trace *trace = &hdcp->connection.trace;
+
/* Avoid device count == 0 to do authentication */
if (get_device_count(hdcp) == 0)
return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
+ trace->hdcp2.downstream_device_count = get_device_count(hdcp);
+ trace->hdcp2.hdcp1_device_downstream =
+ HDCP_2_2_HDCP1_DEVICE_CONNECTED(hdcp->auth.msg.hdcp2.rx_id_list[2]);
+ trace->hdcp2.hdcp2_legacy_device_downstream =
+ HDCP_2_2_HDCP_2_0_REP_CONNECTED(hdcp->auth.msg.hdcp2.rx_id_list[2]);
/* Some MST display may choose to report the internal panel as an HDCP RX. */
/* To update this condition with 1(because the immediate repeater's internal */
/* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). */
@@ -452,54 +465,11 @@ out:
return status;
}
-static enum mod_hdcp_status locality_check_sw(struct mod_hdcp *hdcp,
- struct mod_hdcp_event_context *event_ctx,
- struct mod_hdcp_transition_input_hdcp2 *input)
-{
- enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
-
- if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init,
- &input->lc_init_write, &status,
- hdcp, "lc_init_write"))
- goto out;
- if (is_dp_hdcp(hdcp))
- msleep(16);
- else
- if (!mod_hdcp_execute_and_set(poll_l_prime_available,
- &input->l_prime_available_poll, &status,
- hdcp, "l_prime_available_poll"))
- goto out;
- if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime,
- &input->l_prime_read, &status,
- hdcp, "l_prime_read"))
- goto out;
-out:
- return status;
-}
-
-static enum mod_hdcp_status locality_check_fw(struct mod_hdcp *hdcp,
- struct mod_hdcp_event_context *event_ctx,
- struct mod_hdcp_transition_input_hdcp2 *input)
-{
- enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
-
- if (!mod_hdcp_execute_and_set(mod_hdcp_write_poll_read_lc_fw,
- &input->l_prime_read, &status,
- hdcp, "l_prime_read"))
- goto out;
-
-out:
- return status;
-}
-
static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
- const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c
- && hdcp->config.ddc.funcs.atomic_write_poll_read_aux
- && !hdcp->connection.link.adjust.hdcp2.force_sw_locality_check;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
@@ -511,9 +481,28 @@ static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
hdcp, "lc_init_prepare"))
goto out;
- status = (use_fw ? locality_check_fw : locality_check_sw)(hdcp, event_ctx, input);
- if (status != MOD_HDCP_STATUS_SUCCESS)
- goto out;
+ if (hdcp->connection.link.adjust.hdcp2.use_fw_locality_check) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_poll_read_lc_fw,
+ &input->l_prime_combo_read, &status,
+ hdcp, "l_prime_combo_read"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init,
+ &input->lc_init_write, &status,
+ hdcp, "lc_init_write"))
+ goto out;
+ if (is_dp_hdcp(hdcp))
+ msleep(16);
+ else
+ if (!mod_hdcp_execute_and_set(poll_l_prime_available,
+ &input->l_prime_available_poll, &status,
+ hdcp, "l_prime_available_poll"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime,
+ &input->l_prime_read, &status,
+ hdcp, "l_prime_read"))
+ goto out;
+ }
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime,
&input->l_prime_validation, &status,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
index 89ffb89e1932..9316312a4df5 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -184,31 +184,33 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK);
break;
- case H2_A2_LOCALITY_CHECK: {
- const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c
- && !adjust->hdcp2.force_sw_locality_check;
-
- /*
- * 1A-05: consider disconnection after LC init a failure
- * 1A-13-1: consider invalid l' a failure
- * 1A-13-2: consider l' timeout a failure
- */
+ case H2_A2_LOCALITY_CHECK:
+ /* 1A-05: consider disconnection after LC init a failure */
if (hdcp->state.stay_count > 10 ||
- input->lc_init_prepare != PASS ||
- (!use_fw && input->lc_init_write != PASS) ||
- (!use_fw && input->l_prime_available_poll != PASS)) {
+ input->lc_init_prepare != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
- } else if (input->l_prime_read != PASS) {
- if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) {
- adjust->hdcp2.force_sw_locality_check = true;
+ } else if (adjust->hdcp2.use_fw_locality_check &&
+ input->l_prime_combo_read != PASS) {
+ /* 1A-13-2: consider l' timeout a failure */
+ if (adjust->hdcp2.use_sw_locality_fallback) {
+ /* switch to software locality check */
+ adjust->hdcp2.use_fw_locality_check = 0;
callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
break;
}
-
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (!adjust->hdcp2.use_fw_locality_check &&
+ (input->lc_init_write != PASS ||
+ input->l_prime_available_poll != PASS ||
+ input->l_prime_read != PASS)) {
+ /* 1A-13-2: consider l' timeout a failure */
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->l_prime_validation != PASS) {
+ /* 1A-13-1: consider invalid l' a failure */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
break;
@@ -216,7 +218,6 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
break;
- }
case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
if (input->eks_prepare != PASS ||
input->eks_write != PASS) {
@@ -510,26 +511,29 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK);
break;
- case D2_A2_LOCALITY_CHECK: {
- const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_aux
- && !adjust->hdcp2.force_sw_locality_check;
-
+ case D2_A2_LOCALITY_CHECK:
if (hdcp->state.stay_count > 10 ||
- input->lc_init_prepare != PASS ||
- (!use_fw && input->lc_init_write != PASS)) {
- /* 1A-12: consider invalid l' a failure */
+ input->lc_init_prepare != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
- } else if (input->l_prime_read != PASS) {
- if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) {
- adjust->hdcp2.force_sw_locality_check = true;
+ } else if (adjust->hdcp2.use_fw_locality_check &&
+ input->l_prime_combo_read != PASS) {
+ if (adjust->hdcp2.use_sw_locality_fallback) {
+ /* switch to software locality check */
+ adjust->hdcp2.use_fw_locality_check = 0;
callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
break;
}
-
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (!adjust->hdcp2.use_fw_locality_check &&
+ (input->lc_init_write != PASS ||
+ input->l_prime_read != PASS)) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->l_prime_validation != PASS) {
+ /* 1A-12: consider invalid l' a failure */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
break;
@@ -537,7 +541,6 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
break;
- }
case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
if (input->eks_prepare != PASS ||
input->eks_write != PASS) {
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index 2e6408579194..0ca39873f807 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -758,6 +758,6 @@ enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp)
{
const bool success = (is_dp_hdcp(hdcp) ? write_stall_read_lc_fw_aux : write_poll_read_lc_fw_i2c)(hdcp);
- return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_LOCALITY_COMBO_READ_FAILURE;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
index 6b3b5f610907..5cb979c2cf8c 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -125,129 +125,11 @@ void mod_hdcp_log_ddc_trace(struct mod_hdcp *hdcp)
}
}
+#define CASE_FORMAT(entry) case entry: return #entry;
char *mod_hdcp_status_to_str(int32_t status)
{
switch (status) {
- case MOD_HDCP_STATUS_SUCCESS:
- return "MOD_HDCP_STATUS_SUCCESS";
- case MOD_HDCP_STATUS_FAILURE:
- return "MOD_HDCP_STATUS_FAILURE";
- case MOD_HDCP_STATUS_RESET_NEEDED:
- return "MOD_HDCP_STATUS_RESET_NEEDED";
- case MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND:
- return "MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND";
- case MOD_HDCP_STATUS_DISPLAY_NOT_FOUND:
- return "MOD_HDCP_STATUS_DISPLAY_NOT_FOUND";
- case MOD_HDCP_STATUS_INVALID_STATE:
- return "MOD_HDCP_STATUS_INVALID_STATE";
- case MOD_HDCP_STATUS_NOT_IMPLEMENTED:
- return "MOD_HDCP_STATUS_NOT_IMPLEMENTED";
- case MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE:
- return "MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE";
- case MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE:
- return "MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE";
- case MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE:
- return "MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE";
- case MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE:
- return "MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER:
- return "MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER";
- case MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE:
- return "MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE";
- case MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING:
- return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING";
- case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED:
- return "MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED";
- case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY:
- return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY";
- case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED:
- return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED";
- case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED:
- return "MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED";
- case MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_INVALID_BKSV:
- return "MOD_HDCP_STATUS_HDCP1_INVALID_BKSV";
- case MOD_HDCP_STATUS_DDC_FAILURE:
- return "MOD_HDCP_STATUS_DDC_FAILURE";
- case MOD_HDCP_STATUS_INVALID_OPERATION:
- return "MOD_HDCP_STATUS_INVALID_OPERATION";
- case MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE:
- return "MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE";
- case MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING:
- return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING";
- case MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING:
- return "MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING";
- case MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING:
- return "MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED:
- return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING:
- return "MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED:
- return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED";
- case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY:
- return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY";
- case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING:
- return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST:
- return "MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST";
- case MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE";
- case MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE:
- return "MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE";
+ MOD_HDCP_STATUS_LIST(CASE_FORMAT)
default:
return "MOD_HDCP_STATUS_UNKNOWN";
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index 1d83c1b9da10..26553aa4c5ca 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -31,6 +31,7 @@
#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__)
+#define HDCP_LOG_TRA(hdcp) do {} while (0)
/* default logs */
#define HDCP_ERROR_TRACE(hdcp, status) \
@@ -131,4 +132,9 @@
HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \
} while (0)
+#define HDCP_AUTH_COMPLETE_TRACE(hdcp) do { \
+ mod_hdcp_log_ddc_trace(hdcp); \
+ HDCP_LOG_TRA(hdcp); \
+} while (0)
+
#endif // MOD_HDCP_LOG_H_
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index b51ddf2846df..835467225458 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -35,69 +35,74 @@ struct mod_hdcp;
#define MAX_NUM_OF_DISPLAYS 6
#define MAX_NUM_OF_ATTEMPTS 4
#define MAX_NUM_OF_ERROR_TRACE 10
+#define MOD_HDCP_STATUS_LIST(FORMAT) \
+ FORMAT(MOD_HDCP_STATUS_SUCCESS) \
+ FORMAT(MOD_HDCP_STATUS_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_RESET_NEEDED) \
+ FORMAT(MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND) \
+ FORMAT(MOD_HDCP_STATUS_DISPLAY_NOT_FOUND) \
+ FORMAT(MOD_HDCP_STATUS_INVALID_STATE) \
+ FORMAT(MOD_HDCP_STATUS_NOT_IMPLEMENTED) \
+ FORMAT(MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_INVALID_BKSV) \
+ FORMAT(MOD_HDCP_STATUS_DDC_FAILURE) /* TODO: specific errors */ \
+ FORMAT(MOD_HDCP_STATUS_INVALID_OPERATION) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_LOCALITY_COMBO_READ_FAILURE)
+
+#define ENUM_FORMAT(entry) entry,
/* detailed return status */
enum mod_hdcp_status {
- MOD_HDCP_STATUS_SUCCESS = 0,
- MOD_HDCP_STATUS_FAILURE,
- MOD_HDCP_STATUS_RESET_NEEDED,
- MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND,
- MOD_HDCP_STATUS_DISPLAY_NOT_FOUND,
- MOD_HDCP_STATUS_INVALID_STATE,
- MOD_HDCP_STATUS_NOT_IMPLEMENTED,
- MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE,
- MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE,
- MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE,
- MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE,
- MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE,
- MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE,
- MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE,
- MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER,
- MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE,
- MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING,
- MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE,
- MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED,
- MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
- MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
- MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED,
- MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE,
- MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
- MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
- MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE,
- MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE,
- MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE,
- MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED,
- MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE,
- MOD_HDCP_STATUS_HDCP1_INVALID_BKSV,
- MOD_HDCP_STATUS_DDC_FAILURE, /* TODO: specific errors */
- MOD_HDCP_STATUS_INVALID_OPERATION,
- MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE,
- MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE,
- MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE,
- MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE,
- MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING,
- MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING,
- MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE,
- MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE,
- MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE,
- MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE,
- MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE,
- MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE,
- MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE,
- MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED,
- MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE,
- MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE,
- MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE,
- MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST,
- MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE,
- MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE,
- MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE,
+ MOD_HDCP_STATUS_LIST(ENUM_FORMAT)
};
struct mod_hdcp_displayport {
@@ -214,8 +219,9 @@ struct mod_hdcp_link_adjustment_hdcp2 {
uint8_t force_type : 2;
uint8_t force_no_stored_km : 1;
uint8_t increase_h_prime_timeout: 1;
- uint8_t force_sw_locality_check : 1;
- uint8_t reserved : 2;
+ uint8_t use_fw_locality_check : 1;
+ uint8_t use_sw_locality_fallback: 1;
+ uint8_t reserved : 1;
};
struct mod_hdcp_link_adjustment {
@@ -230,9 +236,23 @@ struct mod_hdcp_error {
uint8_t state_id;
};
+struct mod_hdcp1_trace {
+ uint8_t attempt_count;
+ uint8_t downstream_device_count;
+};
+
+struct mod_hdcp2_trace {
+ uint8_t attempt_count;
+ uint8_t downstream_device_count;
+ uint8_t hdcp1_device_downstream;
+ uint8_t hdcp2_legacy_device_downstream;
+};
+
struct mod_hdcp_trace {
struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE];
uint8_t error_count;
+ struct mod_hdcp1_trace hdcp1;
+ struct mod_hdcp2_trace hdcp2;
};
enum mod_hdcp_encryption_status {
@@ -303,10 +323,6 @@ struct mod_hdcp_display_query {
struct mod_hdcp_config {
struct mod_hdcp_psp psp;
struct mod_hdcp_ddc ddc;
- struct {
- uint8_t lc_enable_sw_fallback : 1;
- uint8_t reserved : 7;
- } debug;
uint8_t index;
};
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 29ccd3532d13..fd139b219bf9 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -975,6 +975,34 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
return true;
}
+void set_replay_frame_skip_number(struct dc_link *link,
+ enum replay_coasting_vtotal_type type,
+ uint32_t coasting_vtotal_refresh_rate_mhz,
+ uint32_t flicker_free_refresh_rate_mhz,
+ bool is_defer)
+{
+ uint32_t *frame_skip_number_array = NULL;
+ uint32_t frame_skip_number = 0;
+
+ if (link == NULL || flicker_free_refresh_rate_mhz == 0 || coasting_vtotal_refresh_rate_mhz == 0)
+ return;
+
+ if (is_defer)
+ frame_skip_number_array = link->replay_settings.defer_frame_skip_number_table;
+ else
+ frame_skip_number_array = link->replay_settings.frame_skip_number_table;
+
+ if (frame_skip_number_array == NULL)
+ return;
+
+ frame_skip_number = coasting_vtotal_refresh_rate_mhz / flicker_free_refresh_rate_mhz;
+
+ if (frame_skip_number >= 1)
+ frame_skip_number_array[type] = frame_skip_number - 1;
+ else
+ frame_skip_number_array[type] = 0;
+}
+
void set_replay_defer_update_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
uint32_t vtotal)
@@ -987,6 +1015,8 @@ void update_replay_coasting_vtotal_from_defer(struct dc_link *link,
{
link->replay_settings.coasting_vtotal_table[type] =
link->replay_settings.defer_update_coasting_vtotal_table[type];
+ link->replay_settings.frame_skip_number_table[type] =
+ link->replay_settings.defer_frame_skip_number_table[type];
}
void set_replay_coasting_vtotal(struct dc_link *link,
@@ -1007,6 +1037,9 @@ void calculate_replay_link_off_frame_count(struct dc_link *link,
uint8_t max_link_off_frame_count = 0;
uint16_t max_deviation_line = 0, pixel_deviation_per_line = 0;
+ if (!link || link->replay_settings.config.replay_version != DC_FREESYNC_REPLAY)
+ return;
+
max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line;
pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index 391209a3bf29..87d31d9dce5a 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -60,6 +60,11 @@ void set_replay_coasting_vtotal(struct dc_link *link,
void set_replay_defer_update_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
uint32_t vtotal);
+void set_replay_frame_skip_number(struct dc_link *link,
+ enum replay_coasting_vtotal_type type,
+ uint32_t coasting_vtotal_refresh_rate_Mhz,
+ uint32_t flicker_free_refresh_rate_Mhz,
+ bool is_defer);
void update_replay_coasting_vtotal_from_defer(struct dc_link *link,
enum replay_coasting_vtotal_type type);
void set_replay_low_rr_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
diff --git a/drivers/gpu/drm/amd/include/amd_cper.h b/drivers/gpu/drm/amd/include/amd_cper.h
index 086869264425..a252ee4c7874 100644
--- a/drivers/gpu/drm/amd/include/amd_cper.h
+++ b/drivers/gpu/drm/amd/include/amd_cper.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 75efda2969cf..17945094a138 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -109,6 +109,7 @@ enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_VPE,
AMD_IP_BLOCK_TYPE_UMSCH_MM,
AMD_IP_BLOCK_TYPE_ISP,
+ AMD_IP_BLOCK_TYPE_RAS,
AMD_IP_BLOCK_TYPE_NUM,
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h
index 2176548e9203..9778822dd2a0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h
@@ -60,5 +60,10 @@
#define mmVCE_VCPU_CACHE_SIZE1 0x800C
#define mmVCE_VCPU_CACHE_SIZE2 0x800E
#define mmVCE_VCPU_CNTL 0x8005
+#define mmVCE_VCPU_SCRATCH7 0x8037
+#define mmVCE_FW_REG_STATUS 0x8384
+#define mmVCE_LMI_FW_PERIODIC_CTRL 0x8388
+#define mmVCE_LMI_FW_START_KEYSEL 0x8386
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h
index ea5b26b11cb1..1f82d6f5abde 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h
@@ -61,6 +61,8 @@
#define VCE_RB_WPTR__RB_WPTR__SHIFT 0x00000004
#define VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK 0x00000001L
#define VCE_SOFT_RESET__ECPU_SOFT_RESET__SHIFT 0x00000000
+#define VCE_SOFT_RESET__FME_SOFT_RESET_MASK 0x00000004L
+#define VCE_SOFT_RESET__FME_SOFT_RESET__SHIFT 0x00000002
#define VCE_STATUS__JOB_BUSY_MASK 0x00000001L
#define VCE_STATUS__JOB_BUSY__SHIFT 0x00000000
#define VCE_STATUS__UENC_BUSY_MASK 0x00000100L
@@ -95,5 +97,13 @@
#define VCE_VCPU_CNTL__CLK_EN__SHIFT 0x00000000
#define VCE_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00040000L
#define VCE_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x00000012
+#define VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK 0x00010000
+#define VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_SHIFT 0x00000010
+#define VCE_FW_REG_STATUS__BUSY_MASK 0x0000001
+#define VCE_FW_REG_STATUS__BUSY__SHIFT 0x0000001
+#define VCE_FW_REG_STATUS__PASS_MASK 0x0000008
+#define VCE_FW_REG_STATUS__PASS__SHIFT 0x0000003
+#define VCE_FW_REG_STATUS__DONE_MASK 0x0000800
+#define VCE_FW_REG_STATUS__DONE__SHIFT 0x000000b
#endif
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
index 64b553e7de1a..e7fdcee22a71 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 2b0cdb2a2775..2366e68262e6 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -454,7 +454,7 @@ struct amd_pm_funcs {
bool gate,
int inst);
int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
- int (*set_power_limit)(void *handle, uint32_t n);
+ int (*set_power_limit)(void *handle, uint32_t limit_type, uint32_t n);
int (*get_power_limit)(void *handle, uint32_t *limit,
enum pp_power_limit_level pp_limit_level,
enum pp_power_type power_type);
@@ -532,6 +532,110 @@ struct metrics_table_header {
uint8_t content_revision;
};
+enum amdgpu_metrics_attr_id {
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_HOTSPOT,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_MEM,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_VRSOC,
+ AMDGPU_METRICS_ATTR_ID_CURR_SOCKET_POWER,
+ AMDGPU_METRICS_ATTR_ID_AVERAGE_GFX_ACTIVITY,
+ AMDGPU_METRICS_ATTR_ID_AVERAGE_UMC_ACTIVITY,
+ AMDGPU_METRICS_ATTR_ID_MEM_MAX_BANDWIDTH,
+ AMDGPU_METRICS_ATTR_ID_ENERGY_ACCUMULATOR,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_CLOCK_COUNTER,
+ AMDGPU_METRICS_ATTR_ID_ACCUMULATION_COUNTER,
+ AMDGPU_METRICS_ATTR_ID_PROCHOT_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_PPT_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_SOCKET_THM_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_VR_THM_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_HBM_THM_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFXCLK_LOCK_STATUS,
+ AMDGPU_METRICS_ATTR_ID_PCIE_LINK_WIDTH,
+ AMDGPU_METRICS_ATTR_ID_PCIE_LINK_SPEED,
+ AMDGPU_METRICS_ATTR_ID_XGMI_LINK_WIDTH,
+ AMDGPU_METRICS_ATTR_ID_XGMI_LINK_SPEED,
+ AMDGPU_METRICS_ATTR_ID_GFX_ACTIVITY_ACC,
+ AMDGPU_METRICS_ATTR_ID_MEM_ACTIVITY_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_BANDWIDTH_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_BANDWIDTH_INST,
+ AMDGPU_METRICS_ATTR_ID_PCIE_L0_TO_RECOV_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_REPLAY_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_REPLAY_ROVER_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_NAK_SENT_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_NAK_RCVD_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_XGMI_READ_DATA_ACC,
+ AMDGPU_METRICS_ATTR_ID_XGMI_WRITE_DATA_ACC,
+ AMDGPU_METRICS_ATTR_ID_XGMI_LINK_STATUS,
+ AMDGPU_METRICS_ATTR_ID_FIRMWARE_TIMESTAMP,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_GFXCLK,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_SOCCLK,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_VCLK0,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_DCLK0,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_UCLK,
+ AMDGPU_METRICS_ATTR_ID_NUM_PARTITION,
+ AMDGPU_METRICS_ATTR_ID_PCIE_LC_PERF_OTHER_END_RECOVERY,
+ AMDGPU_METRICS_ATTR_ID_GFX_BUSY_INST,
+ AMDGPU_METRICS_ATTR_ID_JPEG_BUSY,
+ AMDGPU_METRICS_ATTR_ID_VCN_BUSY,
+ AMDGPU_METRICS_ATTR_ID_GFX_BUSY_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_PPT_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_THM_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_LOW_UTILIZATION_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_TOTAL_ACC,
+ AMDGPU_METRICS_ATTR_ID_MAX,
+};
+
+enum amdgpu_metrics_attr_type {
+ AMDGPU_METRICS_TYPE_U8,
+ AMDGPU_METRICS_TYPE_S8,
+ AMDGPU_METRICS_TYPE_U16,
+ AMDGPU_METRICS_TYPE_S16,
+ AMDGPU_METRICS_TYPE_U32,
+ AMDGPU_METRICS_TYPE_S32,
+ AMDGPU_METRICS_TYPE_U64,
+ AMDGPU_METRICS_TYPE_S64,
+ AMDGPU_METRICS_TYPE_MAX,
+};
+
+enum amdgpu_metrics_attr_unit {
+ /* None */
+ AMDGPU_METRICS_UNIT_NONE,
+ /* MHz*/
+ AMDGPU_METRICS_UNIT_CLOCK_1,
+ /* Degree Celsius*/
+ AMDGPU_METRICS_UNIT_TEMP_1,
+ /* Watts*/
+ AMDGPU_METRICS_UNIT_POWER_1,
+ /* In nanoseconds*/
+ AMDGPU_METRICS_UNIT_TIME_1,
+ /* In 10 nanoseconds*/
+ AMDGPU_METRICS_UNIT_TIME_2,
+ /* Speed in GT/s */
+ AMDGPU_METRICS_UNIT_SPEED_1,
+ /* Speed in 0.1 GT/s */
+ AMDGPU_METRICS_UNIT_SPEED_2,
+ /* Bandwidth GB/s */
+ AMDGPU_METRICS_UNIT_BW_1,
+ /* Data in KB */
+ AMDGPU_METRICS_UNIT_DATA_1,
+ /* Percentage */
+ AMDGPU_METRICS_UNIT_PERCENT,
+ AMDGPU_METRICS_UNIT_MAX,
+};
+
+#define AMDGPU_METRICS_ATTR_UNIT_MASK 0xFF000000
+#define AMDGPU_METRICS_ATTR_UNIT_SHIFT 24
+#define AMDGPU_METRICS_ATTR_TYPE_MASK 0x00F00000
+#define AMDGPU_METRICS_ATTR_TYPE_SHIFT 20
+#define AMDGPU_METRICS_ATTR_ID_MASK 0x000FFC00
+#define AMDGPU_METRICS_ATTR_ID_SHIFT 10
+#define AMDGPU_METRICS_ATTR_INST_MASK 0x000003FF
+#define AMDGPU_METRICS_ATTR_INST_SHIFT 0
+
+#define AMDGPU_METRICS_ENC_ATTR(unit, type, id, inst) \
+ (((u64)(unit) << AMDGPU_METRICS_ATTR_UNIT_SHIFT) | \
+ ((u64)(type) << AMDGPU_METRICS_ATTR_TYPE_SHIFT) | \
+ ((u64)(id) << AMDGPU_METRICS_ATTR_ID_SHIFT) | (inst))
+
/*
* gpu_metrics_v1_0 is not recommended as it's not naturally aligned.
* Use gpu_metrics_v1_1 or later instead.
@@ -1221,6 +1325,19 @@ struct gpu_metrics_v1_8 {
uint32_t pcie_lc_perf_other_end_recovery;
};
+struct gpu_metrics_attr {
+ /* Field type encoded with AMDGPU_METRICS_ENC_ATTR */
+ uint64_t attr_encoding;
+ /* Attribute value, depends on attr_encoding */
+ void *attr_value;
+};
+
+struct gpu_metrics_v1_9 {
+ struct metrics_table_header common_header;
+ int attr_count;
+ struct gpu_metrics_attr metrics_attrs[];
+};
+
/*
* gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
* Use gpu_metrics_v2_1 or later instead.
@@ -1703,4 +1820,10 @@ struct amdgpu_partition_metrics_v1_0 {
uint64_t gfx_below_host_limit_total_acc[MAX_XCC];
};
+struct amdgpu_partition_metrics_v1_1 {
+ struct metrics_table_header common_header;
+ int attr_count;
+ struct gpu_metrics_attr metrics_attrs[];
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index ab1cfc92dbeb..f9629d42ada2 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -345,7 +345,8 @@ union MESAPI__REMOVE_QUEUE {
uint32_t unmap_kiq_utility_queue : 1;
uint32_t preempt_legacy_gfx_queue : 1;
uint32_t unmap_legacy_queue : 1;
- uint32_t reserved : 28;
+ uint32_t remove_queue_after_reset : 1;
+ uint32_t reserved : 27;
};
struct MES_API_STATUS api_status;
diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
index 69611c7e30e3..2f12cba4eb66 100644
--- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
@@ -399,7 +399,8 @@ union MESAPI__REMOVE_QUEUE {
uint32_t unmap_kiq_utility_queue : 1;
uint32_t preempt_legacy_gfx_queue : 1;
uint32_t unmap_legacy_queue : 1;
- uint32_t reserved : 28;
+ uint32_t remove_queue_after_reset : 1;
+ uint32_t reserved : 27;
};
struct MES_API_STATUS api_status;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 518d07afc7df..79b174e5326d 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -195,24 +195,6 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
return ret;
}
-int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
-{
- int ret = 0;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (pp_funcs && pp_funcs->notify_rlc_state) {
- mutex_lock(&adev->pm.mutex);
-
- ret = pp_funcs->notify_rlc_state(
- adev->powerplay.pp_handle,
- en);
-
- mutex_unlock(&adev->pm.mutex);
- }
-
- return ret;
-}
-
int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@@ -1205,8 +1187,11 @@ int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
- if (!pp_funcs->get_pp_table)
- return 0;
+ if (!table)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(adev) || !pp_funcs->get_pp_table || adev->scpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
@@ -1616,6 +1601,7 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
}
int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
+ uint32_t limit_type,
uint32_t limit)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@@ -1626,7 +1612,7 @@ int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
- limit);
+ limit_type, limit);
mutex_unlock(&adev->pm.mutex);
return ret;
@@ -1732,7 +1718,10 @@ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
- if (!pp_funcs->set_pp_table)
+ if (!buf || !size)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(adev) || !pp_funcs->set_pp_table || adev->scpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
@@ -2139,3 +2128,10 @@ ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
return ret;
}
+
+const struct ras_smu_drv *amdgpu_dpm_get_ras_smu_driver(struct amdgpu_device *adev)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+
+ return smu_get_ras_smu_driver(pp_handle);
+}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index b5fbb0fd1dc0..65296a819e6a 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -108,8 +108,9 @@ const char * const amdgpu_pp_profile_name[] = {
static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
{
bool runpm_check = runpm ? adev->in_runpm : false;
+ bool full_init = (adev->init_lvl->level == AMDGPU_INIT_LEVEL_DEFAULT);
- if (amdgpu_in_reset(adev))
+ if (amdgpu_in_reset(adev) || !full_init)
return -EBUSY;
if (adev->in_suspend && !runpm_check)
@@ -173,7 +174,6 @@ static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
*/
static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
{
- pm_runtime_mark_last_busy(adev->dev);
pm_runtime_put_autosuspend(adev->dev);
}
@@ -2506,7 +2506,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
.attr_update = pp_dpm_clk_default_attr_update),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
@@ -2638,6 +2638,15 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
-EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_table)) {
+ int ret;
+ char *tmp = NULL;
+
+ ret = amdgpu_dpm_get_pp_table(adev, &tmp);
+ if (ret == -EOPNOTSUPP || !tmp)
+ *states = ATTR_STATE_UNSUPPORTED;
+ else
+ *states = ATTR_STATE_SUPPORTED;
}
switch (gc_ver) {
@@ -3372,7 +3381,9 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
"fastPPT" : "slowPPT");
else
- return sysfs_emit(buf, "PPT\n");
+ return sysfs_emit(buf, "%s\n",
+ to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
+ "PPT1" : "PPT");
}
static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
@@ -3390,13 +3401,12 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
return err;
value = value / 1000000; /* convert to Watt */
- value |= limit_type << 24;
err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
- err = amdgpu_dpm_set_power_limit(adev, value);
+ err = amdgpu_dpm_set_power_limit(adev, limit_type, value);
amdgpu_pm_put_access(adev);
@@ -3578,7 +3588,6 @@ static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_m
static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
-static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
@@ -3627,7 +3636,6 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_power1_cap.dev_attr.attr,
&sensor_dev_attr_power1_cap_default.dev_attr.attr,
&sensor_dev_attr_power1_label.dev_attr.attr,
- &sensor_dev_attr_power2_average.dev_attr.attr,
&sensor_dev_attr_power2_cap_max.dev_attr.attr,
&sensor_dev_attr_power2_cap_min.dev_attr.attr,
&sensor_dev_attr_power2_cap.dev_attr.attr,
@@ -3826,13 +3834,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
/* only Vangogh has fast PPT limit and power labels */
- if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
- (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
- attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
+ if ((attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
- attr == &sensor_dev_attr_power2_label.dev_attr.attr))
+ attr == &sensor_dev_attr_power2_label.dev_attr.attr) &&
+ (amdgpu_dpm_get_power_limit(adev, &tmp,
+ PP_PWR_LIMIT_MAX,
+ PP_PWR_TYPE_FAST) == -EOPNOTSUPP))
return 0;
return effective_mode;
@@ -4724,14 +4733,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
ret = devm_device_add_group(adev->dev,
&amdgpu_pm_policy_attr_group);
if (ret)
- goto err_out0;
+ goto err_out1;
}
if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) {
ret = devm_device_add_group(adev->dev,
&amdgpu_board_attr_group);
if (ret)
- goto err_out0;
+ goto err_out1;
if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
(void *)&tmp) != -EOPNOTSUPP) {
sysfs_add_file_to_group(&adev->dev->kobj,
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 65c1d98af26c..aa3f427819a0 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -424,8 +424,6 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
enum pp_mp1_state mp1_state);
-int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en);
-
int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev);
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
@@ -553,7 +551,7 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
enum pp_power_limit_level pp_limit_level,
enum pp_power_type power_type);
int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
- uint32_t limit);
+ uint32_t limit_type, uint32_t limit);
int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev);
int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
struct seq_file *m);
@@ -614,5 +612,6 @@ int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask);
bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev);
bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev,
enum smu_temp_metric_type type);
+const struct ras_smu_drv *amdgpu_dpm_get_ras_smu_driver(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index cf9932e68055..1f539cc65f41 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -2558,18 +2558,13 @@ static int si_enable_power_containment(struct amdgpu_device *adev,
if (enable) {
if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive);
- if (smc_result != PPSMC_Result_OK) {
+ if (smc_result != PPSMC_Result_OK)
ret = -EINVAL;
- ni_pi->pc_enabled = false;
- } else {
- ni_pi->pc_enabled = true;
- }
}
} else {
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive);
if (smc_result != PPSMC_Result_OK)
ret = -EINVAL;
- ni_pi->pc_enabled = false;
}
}
@@ -3500,6 +3495,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
* for these GPUs to calculate bandwidth requirements.
*/
if (high_pixelclock_count) {
+ /* Work around flickering lines at the bottom edge
+ * of the screen when using a single 4K 60Hz monitor.
+ */
+ disable_mclk_switching = true;
+
/* On Oland, we observe some flickering when two 4K 60Hz
* displays are connected, possibly because voltage is too low.
* Raise the voltage by requiring a higher SCLK.
@@ -7046,13 +7046,20 @@ static void si_set_vce_clock(struct amdgpu_device *adev,
if ((old_rps->evclk != new_rps->evclk) ||
(old_rps->ecclk != new_rps->ecclk)) {
/* Turn the clocks on when encoding, off otherwise */
+ dev_dbg(adev->dev, "set VCE clocks: %u, %u\n", new_rps->evclk, new_rps->ecclk);
+
if (new_rps->evclk || new_rps->ecclk) {
- /* Place holder for future VCE1.0 porting to amdgpu
- vce_v1_0_enable_mgcg(adev, false, false);*/
+ amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk);
+ amdgpu_device_ip_set_clockgating_state(
+ adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(
+ adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_UNGATE);
} else {
- /* Place holder for future VCE1.0 porting to amdgpu
- vce_v1_0_enable_mgcg(adev, true, false);
- amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk);*/
+ amdgpu_device_ip_set_powergating_state(
+ adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(
+ adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_GATE);
+ amdgpu_asic_set_vce_clocks(adev, 0, 0);
}
}
}
@@ -7504,8 +7511,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
pi->pasi = CYPRESS_HASI_DFLT;
pi->vrc = SISLANDS_VRC_DFLT;
- pi->gfx_clock_gating = true;
-
eg_pi->sclk_deep_sleep = true;
si_pi->sclk_deep_sleep_above_low = false;
@@ -7516,7 +7521,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
eg_pi->dynamic_ac_timing = true;
- eg_pi->light_sleep = true;
#if defined(CONFIG_ACPI)
eg_pi->pcie_performance_request =
amdgpu_acpi_is_pcie_performance_request_supported(adev);
@@ -7577,6 +7581,7 @@ static void si_dpm_debugfs_print_current_performance_level(void *handle,
} else {
pl = &ps->performance_levels[current_index];
seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ seq_printf(m, "vce evclk: %d ecclk: %d\n", rps->evclk, rps->ecclk);
seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h
index 11cb7874a6bb..3aed75fbf913 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h
@@ -38,11 +38,7 @@
#define MC_ARB_DRAM_TIMING2_2 0xa00
#define MC_ARB_DRAM_TIMING2_3 0xa01
-#define MAX_NO_OF_MVDD_VALUES 2
-#define MAX_NO_VREG_STEPS 32
#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
-#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
-#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
#define RV770_ASI_DFLT 1000
#define CYPRESS_HASI_DFLT 400000
#define PCIE_PERF_REQ_PECI_GEN1 2
@@ -51,11 +47,6 @@
#define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */
#define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */
-#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
-
-#define RV770_SMC_TABLE_ADDRESS 0xB000
-#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3
-
#define SMC_STROBE_RATIO 0x0F
#define SMC_STROBE_ENABLE 0x10
@@ -64,27 +55,6 @@
#define SMC_MC_RTT_ENABLE 0x04
#define SMC_MC_STUTTER_EN 0x08
-#define RV770_SMC_VOLTAGEMASK_VDDC 0
-#define RV770_SMC_VOLTAGEMASK_MVDD 1
-#define RV770_SMC_VOLTAGEMASK_VDDCI 2
-#define RV770_SMC_VOLTAGEMASK_MAX 4
-
-#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
-#define NISLANDS_SMC_STROBE_RATIO 0x0F
-#define NISLANDS_SMC_STROBE_ENABLE 0x10
-
-#define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01
-#define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02
-#define NISLANDS_SMC_MC_RTT_ENABLE 0x04
-#define NISLANDS_SMC_MC_STUTTER_EN 0x08
-
-#define MAX_NO_VREG_STEPS 32
-
-#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0
-#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1
-#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
-#define NISLANDS_SMC_VOLTAGEMASK_MAX 4
-
#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0
#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1
#define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2
@@ -219,32 +189,6 @@ enum si_cac_config_reg_type
SISLANDS_CACCONFIG_MAX
};
-enum si_power_level {
- SI_POWER_LEVEL_LOW = 0,
- SI_POWER_LEVEL_MEDIUM = 1,
- SI_POWER_LEVEL_HIGH = 2,
- SI_POWER_LEVEL_CTXSW = 3,
-};
-
-enum si_td {
- SI_TD_AUTO,
- SI_TD_UP,
- SI_TD_DOWN,
-};
-
-enum si_display_watermark {
- SI_DISPLAY_WATERMARK_LOW = 0,
- SI_DISPLAY_WATERMARK_HIGH = 1,
-};
-
-enum si_display_gap
-{
- SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
- SI_PM_DISPLAY_GAP_VBLANK = 1,
- SI_PM_DISPLAY_GAP_WATERMARK = 2,
- SI_PM_DISPLAY_GAP_IGNORE = 3,
-};
-
extern const struct amdgpu_ip_block_version si_smu_ip_block;
struct ni_leakage_coeffients
@@ -258,56 +202,6 @@ struct ni_leakage_coeffients
u32 t_ref;
};
-struct SMC_Evergreen_MCRegisterAddress
-{
- uint16_t s0;
- uint16_t s1;
-};
-
-typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
-
-struct evergreen_mc_reg_entry {
- u32 mclk_max;
- u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct evergreen_mc_reg_table {
- u8 last;
- u8 num_entries;
- u16 valid_flag;
- struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct SMC_Evergreen_MCRegisterSet
-{
- uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
-};
-
-typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
-
-struct SMC_Evergreen_MCRegisters
-{
- uint8_t last;
- uint8_t reserved[3];
- SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
- SMC_Evergreen_MCRegisterSet data[5];
-};
-
-typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
-
-struct SMC_NIslands_MCRegisterSet
-{
- uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
-
-struct ni_mc_reg_entry {
- u32 mclk_max;
- u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
struct SMC_NIslands_MCRegisterAddress
{
uint16_t s0;
@@ -316,257 +210,20 @@ struct SMC_NIslands_MCRegisterAddress
typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
-struct SMC_NIslands_MCRegisters
-{
- uint8_t last;
- uint8_t reserved[3];
- SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
- SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
-};
-
-typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
-
-struct evergreen_ulv_param {
- bool supported;
- struct rv7xx_pl *pl;
-};
-
-struct evergreen_arb_registers {
- u32 mc_arb_dram_timing;
- u32 mc_arb_dram_timing2;
- u32 mc_arb_rfsh_rate;
- u32 mc_arb_burst_time;
-};
-
-struct at {
- u32 rlp;
- u32 rmp;
- u32 lhp;
- u32 lmp;
-};
-
-struct ni_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_func_cntl_4;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 mclk_pwrmgt_cntl;
- u32 dll_cntl;
- u32 mpll_ad_func_cntl;
- u32 mpll_ad_func_cntl_2;
- u32 mpll_dq_func_cntl;
- u32 mpll_dq_func_cntl_2;
- u32 mpll_ss1;
- u32 mpll_ss2;
-};
-
-struct RV770_SMC_SCLK_VALUE
-{
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t sclk_value;
-};
-
-typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
-
-struct RV770_SMC_MCLK_VALUE
-{
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_AD_FUNC_CNTL_2;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL_2;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
-
-
-struct RV730_SMC_MCLK_VALUE
-{
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL2;
- uint32_t vMPLL_FUNC_CNTL3;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
-
-struct RV770_SMC_VOLTAGE_VALUE
-{
- uint16_t value;
- uint8_t index;
- uint8_t padding;
-};
-
-typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
-
-union RV7XX_SMC_MCLK_VALUE
-{
- RV770_SMC_MCLK_VALUE mclk770;
- RV730_SMC_MCLK_VALUE mclk730;
-};
-
-typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
-
-struct RV770_SMC_HW_PERFORMANCE_LEVEL
-{
- uint8_t arbValue;
- union{
- uint8_t seqValue;
- uint8_t ACIndex;
- };
- uint8_t displayWatermark;
- uint8_t gen2PCIE;
- uint8_t gen2XSP;
- uint8_t backbias;
- uint8_t strobeMode;
- uint8_t mcFlags;
- uint32_t aT;
- uint32_t bSP;
- RV770_SMC_SCLK_VALUE sclk;
- RV7XX_SMC_MCLK_VALUE mclk;
- RV770_SMC_VOLTAGE_VALUE vddc;
- RV770_SMC_VOLTAGE_VALUE mvdd;
- RV770_SMC_VOLTAGE_VALUE vddci;
- uint8_t reserved1;
- uint8_t reserved2;
- uint8_t stateFlags;
- uint8_t padding;
-};
-
-typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
-
-struct RV770_SMC_SWSTATE
-{
- uint8_t flags;
- uint8_t padding1;
- uint8_t padding2;
- uint8_t padding3;
- RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
-};
-
-typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
-
-struct RV770_SMC_VOLTAGEMASKTABLE
-{
- uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX];
- uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
-};
-
-typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
-
-struct RV770_SMC_STATETABLE
-{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint8_t highSMIO[MAX_NO_VREG_STEPS];
- uint32_t lowSMIO[MAX_NO_VREG_STEPS];
- RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- RV770_SMC_SWSTATE initialState;
- RV770_SMC_SWSTATE ACPIState;
- RV770_SMC_SWSTATE driverState;
- RV770_SMC_SWSTATE ULVState;
-};
-
-typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
-
-struct vddc_table_entry {
- u16 vddc;
- u8 vddc_index;
- u8 high_smio;
- u32 low_smio;
-};
-
-struct rv770_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 mpll_ad_func_cntl;
- u32 mpll_ad_func_cntl_2;
- u32 mpll_dq_func_cntl;
- u32 mpll_dq_func_cntl_2;
- u32 mclk_pwrmgt_cntl;
- u32 dll_cntl;
- u32 mpll_ss1;
- u32 mpll_ss2;
-};
-
-struct rv730_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 mclk_pwrmgt_cntl;
- u32 dll_cntl;
- u32 mpll_func_cntl;
- u32 mpll_func_cntl2;
- u32 mpll_func_cntl3;
- u32 mpll_ss;
- u32 mpll_ss2;
-};
-
-union r7xx_clock_registers {
- struct rv770_clock_registers rv770;
- struct rv730_clock_registers rv730;
-};
-
struct rv7xx_power_info {
/* flags */
- bool mem_gddr5;
- bool pcie_gen2;
- bool dynamic_pcie_gen2;
- bool acpi_pcie_gen2;
- bool boot_in_gen2;
bool voltage_control; /* vddc */
bool mvdd_control;
bool sclk_ss;
bool mclk_ss;
bool dynamic_ss;
- bool gfx_clock_gating;
- bool mg_clock_gating;
- bool mgcgtssm;
- bool power_gating;
bool thermal_protection;
- bool display_gap;
- bool dcodt;
- bool ulps;
- /* registers */
- union r7xx_clock_registers clk_regs;
- u32 s0_vid_lower_smio_cntl;
/* voltage */
- u32 vddc_mask_low;
- u32 mvdd_mask_low;
u32 mvdd_split_frequency;
- u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES];
u16 max_vddc;
u16 max_vddc_in_table;
u16 min_vddc_in_table;
- struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS];
- u8 valid_vddc_entries;
- /* dc odt */
- u32 mclk_odt_threshold;
- u8 odt_value_0[2];
- u8 odt_value_1[2];
/* stored values */
- u32 boot_sclk;
u16 acpi_vddc;
u32 ref_div;
u32 active_auto_throttle_sources;
@@ -582,17 +239,6 @@ struct rv7xx_power_info {
u32 asi;
u32 pasi;
u32 vrc;
- u32 restricted_levels;
- u32 rlp;
- u32 rmp;
- u32 lhp;
- u32 lmp;
- /* smc offsets */
- u16 state_table_start;
- u16 soft_regs_start;
- u16 sram_end;
- /* scratch structs */
- RV770_SMC_STATETABLE smc_statetable;
};
enum si_pcie_gen {
@@ -611,44 +257,12 @@ struct rv7xx_pl {
enum si_pcie_gen pcie_gen; /* si+ only */
};
-struct rv7xx_ps {
- struct rv7xx_pl high;
- struct rv7xx_pl medium;
- struct rv7xx_pl low;
- bool dc_compatible;
-};
-
struct si_ps {
u16 performance_level_count;
bool dc_compatible;
struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
};
-struct ni_mc_reg_table {
- u8 last;
- u8 num_entries;
- u16 valid_flag;
- struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct ni_cac_data
-{
- struct ni_leakage_coeffients leakage_coefficients;
- u32 i_leakage;
- s32 leakage_minimum_temperature;
- u32 pwr_const;
- u32 dc_cac_value;
- u32 bif_cac_value;
- u32 lkge_pwr;
- u8 mc_wr_weight;
- u8 mc_rd_weight;
- u8 allow_ovrflw;
- u8 num_win_tdp;
- u8 l2num_win_tdp;
- u8 lts_truncate_n;
-};
-
struct evergreen_power_info {
/* must be first! */
struct rv7xx_power_info rv7xx;
@@ -657,203 +271,33 @@ struct evergreen_power_info {
bool dynamic_ac_timing;
bool abm;
bool mcls;
- bool light_sleep;
- bool memory_transition;
bool pcie_performance_request;
- bool pcie_performance_request_registered;
bool sclk_deep_sleep;
- bool dll_default_on;
- bool ls_clock_gating;
bool smu_uvd_hs;
bool uvd_enabled;
/* stored values */
u16 acpi_vddci;
- u8 mvdd_high_index;
- u8 mvdd_low_index;
u32 mclk_edc_wr_enable_threshold;
- struct evergreen_mc_reg_table mc_reg_table;
struct atom_voltage_table vddc_voltage_table;
struct atom_voltage_table vddci_voltage_table;
- struct evergreen_arb_registers bootup_arb_registers;
- struct evergreen_ulv_param ulv;
- struct at ats[2];
- /* smc offsets */
- u16 mc_reg_table_start;
struct amdgpu_ps current_rps;
- struct rv7xx_ps current_ps;
struct amdgpu_ps requested_rps;
- struct rv7xx_ps requested_ps;
-};
-
-struct PP_NIslands_Dpm2PerfLevel
-{
- uint8_t MaxPS;
- uint8_t TgtAct;
- uint8_t MaxPS_StepInc;
- uint8_t MaxPS_StepDec;
- uint8_t PSST;
- uint8_t NearTDPDec;
- uint8_t AboveSafeInc;
- uint8_t BelowSafeInc;
- uint8_t PSDeltaLimit;
- uint8_t PSDeltaWin;
- uint8_t Reserved[6];
-};
-
-typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
-
-struct PP_NIslands_DPM2Parameters
-{
- uint32_t TDPLimit;
- uint32_t NearTDPLimit;
- uint32_t SafePowerLimit;
- uint32_t PowerBoostLimit;
-};
-typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
-
-struct NISLANDS_SMC_SCLK_VALUE
-{
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_FUNC_CNTL_4;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t sclk_value;
-};
-
-typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
-
-struct NISLANDS_SMC_MCLK_VALUE
-{
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL_1;
- uint32_t vMPLL_FUNC_CNTL_2;
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_AD_FUNC_CNTL_2;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL_2;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
-
-struct NISLANDS_SMC_VOLTAGE_VALUE
-{
- uint16_t value;
- uint8_t index;
- uint8_t padding;
-};
-
-typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
-
-struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
-{
- uint8_t arbValue;
- uint8_t ACIndex;
- uint8_t displayWatermark;
- uint8_t gen2PCIE;
- uint8_t reserved1;
- uint8_t reserved2;
- uint8_t strobeMode;
- uint8_t mcFlags;
- uint32_t aT;
- uint32_t bSP;
- NISLANDS_SMC_SCLK_VALUE sclk;
- NISLANDS_SMC_MCLK_VALUE mclk;
- NISLANDS_SMC_VOLTAGE_VALUE vddc;
- NISLANDS_SMC_VOLTAGE_VALUE mvdd;
- NISLANDS_SMC_VOLTAGE_VALUE vddci;
- NISLANDS_SMC_VOLTAGE_VALUE std_vddc;
- uint32_t powergate_en;
- uint8_t hUp;
- uint8_t hDown;
- uint8_t stateFlags;
- uint8_t arbRefreshState;
- uint32_t SQPowerThrottle;
- uint32_t SQPowerThrottle_2;
- uint32_t reserved[2];
- PP_NIslands_Dpm2PerfLevel dpm2;
-};
-
-typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
-
-struct NISLANDS_SMC_SWSTATE
-{
- uint8_t flags;
- uint8_t levelCount;
- uint8_t padding2;
- uint8_t padding3;
- NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[];
-};
-
-typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
-
-struct NISLANDS_SMC_VOLTAGEMASKTABLE
-{
- uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
- uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
-};
-
-typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
-
-#define NISLANDS_MAX_NO_VREG_STEPS 32
-
-struct NISLANDS_SMC_STATETABLE
-{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
- uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
- NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- PP_NIslands_DPM2Parameters dpm2Params;
- NISLANDS_SMC_SWSTATE initialState;
- NISLANDS_SMC_SWSTATE ACPIState;
- NISLANDS_SMC_SWSTATE ULVState;
- NISLANDS_SMC_SWSTATE driverState;
- NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
};
-typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
-
struct ni_power_info {
/* must be first! */
struct evergreen_power_info eg;
- struct ni_clock_registers clock_registers;
- struct ni_mc_reg_table mc_reg_table;
u32 mclk_rtt_mode_threshold;
/* flags */
- bool use_power_boost_limit;
bool support_cac_long_term_average;
bool cac_enabled;
bool cac_configuration_required;
bool driver_calculate_cac_leakage;
- bool pc_enabled;
bool enable_power_containment;
bool enable_cac;
bool enable_sq_ramping;
- /* smc offsets */
- u16 arb_table_start;
- u16 fan_table_start;
- u16 cac_table_start;
- u16 spll_table_start;
- /* CAC stuff */
- struct ni_cac_data cac_data;
- u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS];
- const struct ni_cac_weights *cac_weights;
- u8 lta_window_size;
- u8 lts_truncate;
struct si_ps current_ps;
struct si_ps requested_ps;
- /* scratch structs */
- SMC_NIslands_MCRegisters smc_mc_reg_table;
- NISLANDS_SMC_STATETABLE smc_statetable;
};
struct si_cac_config_reg
@@ -952,7 +396,6 @@ struct si_leakage_voltage
struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
};
-
struct si_ulv_param {
bool supported;
u32 cg_ulv_control;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 554492dfa3c0..3aaf3dd71868 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "pp_debug.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
@@ -28,12 +27,10 @@
#include <linux/firmware.h>
#include <linux/reboot.h>
#include "amd_shared.h"
-#include "amd_powerplay.h"
#include "power_state.h"
#include "amdgpu.h"
#include "hwmgr.h"
#include "amdgpu_dpm_internal.h"
-#include "amdgpu_display.h"
static const struct amd_pm_funcs pp_dpm_funcs;
@@ -634,9 +631,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
{
struct pp_hwmgr *hwmgr = handle;
- if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table)
+ if (!hwmgr || !hwmgr->pm_en || !table)
return -EINVAL;
+ if (!hwmgr->soft_pp_table)
+ return -EOPNOTSUPP;
+
*table = (char *)hwmgr->soft_pp_table;
return hwmgr->soft_pp_table_size;
}
@@ -955,7 +955,7 @@ static int pp_dpm_switch_power_profile(void *handle,
return 0;
}
-static int pp_set_power_limit(void *handle, uint32_t limit)
+static int pp_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit)
{
struct pp_hwmgr *hwmgr = handle;
uint32_t max_power_limit;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 8da882c51856..9b28c0728269 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -5444,8 +5444,7 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
else if (hwmgr->pp_table_version == PP_TABLE_V0)
- thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->max = data->thermal_temp_setting.temperature_shutdown;
thermal_data->sw_ctf_threshold = thermal_data->max;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
index d2dbd90bb427..0a876c840c79 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
@@ -2024,7 +2024,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
table->VRConfig = 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
index 1f50f1e74c48..aa3ae9b115c4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
@@ -2028,7 +2028,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
result = iceland_populate_smc_svi2_config(hwmgr, table);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
index ac9ec8257f82..38e19e5cad4d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
@@ -139,7 +139,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id,
NULL);
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -164,7 +164,7 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
index f9c0f117725d..0bf1bf5528c2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
@@ -60,7 +60,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id,
NULL);
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -90,7 +90,7 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
index d3ff6a831ed5..e2ba593faa5d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
@@ -68,7 +68,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return -EINVAL);
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -98,7 +98,7 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
index a5c95b180672..e3515156d26f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
@@ -192,7 +192,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return ret);
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -223,7 +223,7 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
@@ -256,7 +256,7 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
@@ -306,7 +306,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
"[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
return ret);
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index fb8086859857..f51fa265230b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -68,7 +68,7 @@ static int smu_handle_task(struct smu_context *smu,
static int smu_reset(struct smu_context *smu);
static int smu_set_fan_speed_pwm(void *handle, u32 speed);
static int smu_set_fan_control_mode(void *handle, u32 value);
-static int smu_set_power_limit(void *handle, uint32_t limit);
+static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit);
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
@@ -508,11 +508,14 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
/* Enable restore flag */
smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
- /* set the user dpm power limit */
- if (smu->user_dpm_profile.power_limit) {
- ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
+ /* set the user dpm power limits */
+ for (int i = SMU_DEFAULT_PPT_LIMIT; i < SMU_LIMIT_TYPE_COUNT; i++) {
+ if (!smu->user_dpm_profile.power_limits[i])
+ continue;
+ ret = smu_set_power_limit(smu, i,
+ smu->user_dpm_profile.power_limits[i]);
if (ret)
- dev_err(smu->adev->dev, "Failed to set power limit value\n");
+ dev_err(smu->adev->dev, "Failed to set %d power limit value\n", i);
}
/* set the user dpm clock configurations */
@@ -609,6 +612,17 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev)
return true;
}
+int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg,
+ uint32_t param, uint32_t *read_arg)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = -EOPNOTSUPP;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->ras_send_msg)
+ ret = smu->ppt_funcs->ras_send_msg(smu, msg, param, read_arg);
+
+ return ret;
+}
static int smu_sys_get_pp_table(void *handle,
char **table)
@@ -620,7 +634,7 @@ static int smu_sys_get_pp_table(void *handle,
return -EOPNOTSUPP;
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
- return -EINVAL;
+ return -EOPNOTSUPP;
if (smu_table->hardcode_pptable)
*table = smu_table->hardcode_pptable;
@@ -1655,9 +1669,12 @@ static int smu_smc_hw_setup(struct smu_context *smu)
if (adev->in_suspend && smu_is_dpm_running(smu)) {
dev_info(adev->dev, "dpm has been enabled\n");
ret = smu_system_features_control(smu, true);
- if (ret)
+ if (ret) {
dev_err(adev->dev, "Failed system features control!\n");
- return ret;
+ return ret;
+ }
+
+ return smu_enable_thermal_alert(smu);
}
break;
default:
@@ -2040,6 +2057,12 @@ static int smu_disable_dpms(struct smu_context *smu)
smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
return 0;
+ /* vangogh s0ix */
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2)) &&
+ adev->in_s0ix)
+ return 0;
+
/*
* For gpu reset, runpm and hibernation through BACO,
* BACO feature has to be kept enabled.
@@ -2225,7 +2248,6 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
int ret;
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
- struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (amdgpu_sriov_multi_vf_mode(adev))
return 0;
@@ -2257,18 +2279,6 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
adev->pm.dpm_enabled = true;
- if (smu->current_power_limit) {
- ret = smu_set_power_limit(smu, smu->current_power_limit);
- if (ret && ret != -EOPNOTSUPP)
- return ret;
- }
-
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) {
- ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
- if (ret)
- return ret;
- }
-
dev_info(adev->dev, "SMU is resumed successfully!\n");
return 0;
@@ -2796,6 +2806,17 @@ const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
.funcs = &smu_ip_funcs,
};
+const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle)
+{
+ struct smu_context *smu = (struct smu_context *)handle;
+ const struct ras_smu_drv *tmp = NULL;
+ int ret;
+
+ ret = smu_get_ras_smu_drv(smu, &tmp);
+
+ return ret ? NULL : tmp;
+}
+
static int smu_load_microcode(void *handle)
{
struct smu_context *smu = handle;
@@ -2889,6 +2910,9 @@ int smu_get_power_limit(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
+ if (!limit)
+ return -EINVAL;
+
switch (pp_power_type) {
case PP_PWR_TYPE_SUSTAINED:
limit_type = SMU_DEFAULT_PPT_LIMIT;
@@ -2920,6 +2944,8 @@ int smu_get_power_limit(void *handle,
if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
if (smu->ppt_funcs->get_ppt_limit)
ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
+ else
+ return -EOPNOTSUPP;
} else {
switch (limit_level) {
case SMU_PPT_LIMIT_CURRENT:
@@ -2958,37 +2984,34 @@ int smu_get_power_limit(void *handle,
return ret;
}
-static int smu_set_power_limit(void *handle, uint32_t limit)
+static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit)
{
struct smu_context *smu = handle;
- uint32_t limit_type = limit >> 24;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- limit &= (1<<24)-1;
- if (limit_type != SMU_DEFAULT_PPT_LIMIT)
- if (smu->ppt_funcs->set_power_limit)
- return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
-
- if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
- dev_err(smu->adev->dev,
- "New power limit (%d) is out of range [%d,%d]\n",
- limit, smu->min_power_limit, smu->max_power_limit);
- return -EINVAL;
+ if (limit_type == SMU_DEFAULT_PPT_LIMIT) {
+ if (!limit)
+ limit = smu->current_power_limit;
+ if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
+ dev_err(smu->adev->dev,
+ "New power limit (%d) is out of range [%d,%d]\n",
+ limit, smu->min_power_limit, smu->max_power_limit);
+ return -EINVAL;
+ }
}
- if (!limit)
- limit = smu->current_power_limit;
-
if (smu->ppt_funcs->set_power_limit) {
ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
- if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
- smu->user_dpm_profile.power_limit = limit;
+ if (ret)
+ return ret;
+ if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
+ smu->user_dpm_profile.power_limits[limit_type] = limit;
}
- return ret;
+ return 0;
}
static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 582c186d8b62..8815fc70b63b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -212,6 +212,7 @@ enum smu_power_src_type {
enum smu_ppt_limit_type {
SMU_DEFAULT_PPT_LIMIT = 0,
SMU_FAST_PPT_LIMIT,
+ SMU_LIMIT_TYPE_COUNT,
};
enum smu_ppt_limit_level {
@@ -231,7 +232,7 @@ enum smu_memory_pool_size {
struct smu_user_dpm_profile {
uint32_t fan_mode;
- uint32_t power_limit;
+ uint32_t power_limits[SMU_LIMIT_TYPE_COUNT];
uint32_t fan_speed_pwm;
uint32_t fan_speed_rpm;
uint32_t flags;
@@ -1521,6 +1522,21 @@ struct pptable_funcs {
*/
ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id,
void *table);
+ /**
+ * @ras_send_msg: Send a message with a parameter from Ras
+ * &msg: Type of message.
+ * &param: Message parameter.
+ * &read_arg: SMU response (optional).
+ */
+ int (*ras_send_msg)(struct smu_context *smu,
+ enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
+
+
+ /**
+ * @get_ras_smu_drv: Get RAS smu driver interface
+ * Return: ras_smu_drv *
+ */
+ int (*get_ras_smu_drv)(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv);
};
typedef enum {
@@ -1785,7 +1801,10 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
enum pp_pm_policy p_type, char *sysbuf);
+const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle);
+int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg,
+ uint32_t param, uint32_t *readarg);
#endif
void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
index bf6aa9620911..dd30d96e1ca2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
@@ -87,7 +87,7 @@ typedef enum {
/*37*/ FEATURE_DVO = 37,
/*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38,
/*39*/ FEATURE_GLOBAL_DPM = 39,
-/*40*/ FEATURE_NODE_POWER_MANAGER = 40,
+/*40*/ FEATURE_HROM_EN = 40,
/*41*/ NUM_FEATURES = 41
} FEATURE_LIST_e;
@@ -189,7 +189,7 @@ typedef enum {
SVI_MAX_TEMP_ENTRIES, // 13
} SVI_TEMP_e;
-#define SMU_METRICS_TABLE_VERSION 0x14
+#define SMU_METRICS_TABLE_VERSION 0x15
#define SMU_SYSTEM_METRICS_TABLE_VERSION 0x1
@@ -367,6 +367,11 @@ typedef struct {
//Node Power Limit
uint32_t MaxNodePowerLimit;
+
+ // PPT1 Configuration
+ uint32_t PPT1Max;
+ uint32_t PPT1Min;
+ uint32_t PPT1Default;
} StaticMetricsTable_t;
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
index 4b066c42e0ec..d09b6ae9827e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
@@ -105,23 +105,21 @@
#define PPSMC_MSG_UpdatePccWaitDecMaxStr 0x4C
#define PPSMC_MSG_ResetSDMA 0x4D
#define PPSMC_MSG_GetRasTableVersion 0x4E
-#define PPSMC_MSG_GetRmaStatus 0x4F
-#define PPSMC_MSG_GetErrorCount 0x50
-#define PPSMC_MSG_GetBadPageCount 0x51
-#define PPSMC_MSG_GetBadPageInfo 0x52
-#define PPSMC_MSG_GetBadPagePaAddrLoHi 0x53
-#define PPSMC_MSG_SetTimestampLoHi 0x54
-#define PPSMC_MSG_GetTimestampLoHi 0x55
-#define PPSMC_MSG_GetRasPolicy 0x56
-#define PPSMC_MSG_DumpErrorRecord 0x57
+#define PPSMC_MSG_GetBadPageCount 0x50
+#define PPSMC_MSG_GetBadPageMcaAddress 0x51
+#define PPSMC_MSG_SetTimestamp 0x53
+#define PPSMC_MSG_SetTimestampHi 0x54
+#define PPSMC_MSG_GetTimestamp 0x55
+#define PPSMC_MSG_GetBadPageIpIdLoHi 0x57
#define PPSMC_MSG_EraseRasTable 0x58
#define PPSMC_MSG_GetStaticMetricsTable 0x59
#define PPSMC_MSG_ResetVfArbitersByIndex 0x5A
-#define PPSMC_MSG_GetBadPageSeverity 0x5B
#define PPSMC_MSG_GetSystemMetricsTable 0x5C
#define PPSMC_MSG_GetSystemMetricsVersion 0x5D
#define PPSMC_MSG_ResetVCN 0x5E
-#define PPSMC_Message_Count 0x5F
+#define PPSMC_MSG_SetFastPptLimit 0x5F
+#define PPSMC_MSG_GetFastPptLimit 0x60
+#define PPSMC_Message_Count 0x61
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 2256c77da636..9b71a8afdd35 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -279,7 +279,16 @@
__SMU_DUMMY_MAP(ResetSDMA), \
__SMU_DUMMY_MAP(ResetVCN), \
__SMU_DUMMY_MAP(GetStaticMetricsTable), \
- __SMU_DUMMY_MAP(GetSystemMetricsTable),
+ __SMU_DUMMY_MAP(GetSystemMetricsTable), \
+ __SMU_DUMMY_MAP(GetRASTableVersion), \
+ __SMU_DUMMY_MAP(GetBadPageCount), \
+ __SMU_DUMMY_MAP(GetBadPageMcaAddr), \
+ __SMU_DUMMY_MAP(SetTimestamp), \
+ __SMU_DUMMY_MAP(GetTimestamp), \
+ __SMU_DUMMY_MAP(GetBadPageIpid), \
+ __SMU_DUMMY_MAP(EraseRasTable), \
+ __SMU_DUMMY_MAP(SetFastPptLimit), \
+ __SMU_DUMMY_MAP(GetFastPptLimit),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -458,7 +467,8 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(GFX_EDC_XVMIN), \
__SMU_DUMMY_MAP(GFX_DIDT_XVMIN), \
__SMU_DUMMY_MAP(FAN_ABNORMAL), \
- __SMU_DUMMY_MAP(PIT),
+ __SMU_DUMMY_MAP(PIT), \
+ __SMU_DUMMY_MAP(HROM_EN),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index 9548bd3c624b..55401e6b2b0b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -291,11 +291,12 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type,
char *buf)
{
- int ret = 0, size = 0;
+ int ret = 0, size = 0, start_offset = 0;
uint32_t cur_value = 0;
int i;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -353,7 +354,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
return ret;
}
- return size;
+ return size - start_offset;
}
static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 0028f10ead42..7c9f77124ab2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1469,7 +1469,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
uint16_t *curve_settings;
- int i, levels, size = 0, ret = 0;
+ int i, levels, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
@@ -1484,6 +1484,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
uint32_t min_value, max_value;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_GFXCLK:
@@ -1497,11 +1498,11 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_DCEFCLK:
ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
if (ret)
- return size;
+ return size - start_offset;
ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
if (ret)
- return size;
+ return size - start_offset;
ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
if (ret < 0)
@@ -1511,7 +1512,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
for (i = 0; i < count; i++) {
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
if (ret)
- return size;
+ return size - start_offset;
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
@@ -1519,10 +1520,10 @@ static int navi10_print_clk_levels(struct smu_context *smu,
} else {
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
if (ret)
- return size;
+ return size - start_offset;
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
if (ret)
- return size;
+ return size - start_offset;
freq_values[1] = cur_value;
mark_index = cur_value == freq_values[0] ? 0 :
@@ -1653,7 +1654,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int navi10_force_clk_levels(struct smu_context *smu,
@@ -2888,7 +2889,7 @@ static int navi10_set_dummy_pstates_table_location(struct smu_context *smu)
dummy_table += 0x1000;
}
- amdgpu_asic_flush_hdp(smu->adev, NULL);
+ amdgpu_hdp_flush(smu->adev, NULL);
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 31c2c0386b1f..774283ac7827 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1281,7 +1281,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
OverDriveTable_t *od_table =
(OverDriveTable_t *)table_context->overdrive_table;
- int i, size = 0, ret = 0;
+ int i, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
@@ -1289,6 +1289,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
uint32_t min_value, max_value;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_GFXCLK:
@@ -1434,7 +1435,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
}
print_clk_out:
- return size;
+ return size - start_offset;
}
static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 78e4186d06cc..b0d6487171d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1022,7 +1022,12 @@ int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
{
- return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
+ int ret = 0;
+
+ if (smu->smu_table.thermal_controller_type)
+ ret = amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
+
+ return ret;
}
static uint16_t convert_to_vddc(uint8_t vid)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 2c9869feba61..9626da2dba58 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -565,7 +565,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_legacy_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
@@ -576,6 +576,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -658,7 +659,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int vangogh_print_clk_levels(struct smu_context *smu,
@@ -666,7 +667,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
uint32_t min, max;
@@ -678,6 +679,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -779,7 +781,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int vangogh_common_print_clk_levels(struct smu_context *smu,
@@ -2217,6 +2219,9 @@ static int vangogh_post_smu_init(struct smu_context *smu)
uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
+ if (adev->in_s0ix)
+ return 0;
+
/* allow message will be sent after enable message on Vangogh*/
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
@@ -2308,8 +2313,7 @@ static int vangogh_get_power_limit(struct smu_context *smu,
uint32_t *max_power_limit,
uint32_t *min_power_limit)
{
- struct smu_11_5_power_context *power_context =
- smu->smu_power.power_context;
+ struct smu_11_5_power_context *power_context = smu->smu_power.power_context;
uint32_t ppt_limit;
int ret = 0;
@@ -2345,12 +2349,11 @@ static int vangogh_get_power_limit(struct smu_context *smu,
}
static int vangogh_get_ppt_limit(struct smu_context *smu,
- uint32_t *ppt_limit,
- enum smu_ppt_limit_type type,
- enum smu_ppt_limit_level level)
+ uint32_t *ppt_limit,
+ enum smu_ppt_limit_type type,
+ enum smu_ppt_limit_level level)
{
- struct smu_11_5_power_context *power_context =
- smu->smu_power.power_context;
+ struct smu_11_5_power_context *power_context = smu->smu_power.power_context;
if (!power_context)
return -EOPNOTSUPP;
@@ -2399,7 +2402,6 @@ static int vangogh_set_power_limit(struct smu_context *smu,
smu->current_power_limit = ppt_limit;
break;
case SMU_FAST_PPT_LIMIT:
- ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24);
if (ppt_limit > power_context->max_fast_ppt_limit) {
dev_err(smu->adev->dev,
"New power limit (%d) is over the max allowed %d\n",
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 3baf20f4c373..eaa9ea162f16 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
static int renoir_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
SmuMetrics_t metrics;
bool cur_value_match_level = false;
@@ -506,6 +506,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_RANGE:
@@ -550,7 +551,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
i == 2 ? "*" : "");
}
- return size;
+ return size - start_offset;
case SMU_SOCCLK:
count = NUM_SOCCLK_DPM_LEVELS;
cur_value = metrics.ClockFrequency[CLOCK_SOCCLK];
@@ -607,7 +608,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index c1062e5f0393..677781060246 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -1195,15 +1195,16 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
- int i, curr_freq, size = 0;
+ int i, curr_freq, size = 0, start_offset = 0;
int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
switch (clk_type) {
@@ -1534,7 +1535,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index cb3fea9e8cf3..9e635f733fbf 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -34,6 +34,7 @@
#include "amdgpu_fru_eeprom.h"
#include <linux/pci.h>
#include "smu_cmn.h"
+#include "amdgpu_ras.h"
#undef MP1_Public
#undef smnMP1_FIRMWARE_FLAGS
@@ -58,7 +59,7 @@
#define NUM_JPEG_RINGS_FW 10
#define NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics) \
- (ARRAY_SIZE(gpu_metrics->xcp_stats[0].jpeg_busy) / 4)
+ (ARRAY_SIZE(gpu_metrics->jpeg_busy) / 4)
const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = {
SMU_13_0_12_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION),
@@ -81,6 +82,7 @@ const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] =
SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK),
SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK),
SMU_13_0_12_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_PIT),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_HROM_EN_BIT, FEATURE_HROM_EN),
};
const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = {
@@ -139,6 +141,15 @@ const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 1),
+ MSG_MAP(GetRASTableVersion, PPSMC_MSG_GetRasTableVersion, 0),
+ MSG_MAP(GetBadPageCount, PPSMC_MSG_GetBadPageCount, 0),
+ MSG_MAP(GetBadPageMcaAddr, PPSMC_MSG_GetBadPageMcaAddress, 0),
+ MSG_MAP(SetTimestamp, PPSMC_MSG_SetTimestamp, 0),
+ MSG_MAP(GetTimestamp, PPSMC_MSG_GetTimestamp, 0),
+ MSG_MAP(GetBadPageIpid, PPSMC_MSG_GetBadPageIpIdLoHi, 0),
+ MSG_MAP(EraseRasTable, PPSMC_MSG_EraseRasTable, 0),
+ MSG_MAP(SetFastPptLimit, PPSMC_MSG_SetFastPptLimit, 1),
+ MSG_MAP(GetFastPptLimit, PPSMC_MSG_GetFastPptLimit, 1),
};
int smu_v13_0_12_tables_init(struct smu_context *smu)
@@ -345,6 +356,12 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS)))
pptable->MaxNodePowerLimit =
SMUQ10_ROUND(static_metrics->MaxNodePowerLimit);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)) &&
+ static_metrics->PPT1Max) {
+ pptable->PPT1Max = static_metrics->PPT1Max;
+ pptable->PPT1Min = static_metrics->PPT1Min;
+ pptable->PPT1Default = static_metrics->PPT1Default;
+ }
smu_v13_0_12_init_xgmi_data(smu, static_metrics);
pptable->Init = true;
}
@@ -449,7 +466,7 @@ static int smu_v13_0_12_get_system_metrics_table(struct smu_context *smu)
return ret;
}
- amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ amdgpu_hdp_invalidate(smu->adev, NULL);
smu_table_cache_update_time(sys_table, jiffies);
memcpy(sys_table->cache.buffer, table->cpu_addr,
smu_v13_0_12_get_system_metrics_size());
@@ -719,15 +736,14 @@ static ssize_t smu_v13_0_12_get_temp_metrics(struct smu_context *smu,
ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics)
{
const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW;
- struct amdgpu_partition_metrics_v1_0 *xcp_metrics;
+ struct smu_v13_0_6_partition_metrics *xcp_metrics;
struct amdgpu_device *adev = smu->adev;
MetricsTable_t *metrics;
int inst, j, k, idx;
u32 inst_mask;
metrics = (MetricsTable_t *)smu_metrics;
- xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *) table;
- smu_cmn_init_partition_metrics(xcp_metrics, 1, 0);
+ xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table;
amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
idx = 0;
for_each_inst(k, inst_mask) {
@@ -772,22 +788,17 @@ ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp
return sizeof(*xcp_metrics);
}
-ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics)
+void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
+ void *smu_metrics,
+ struct smu_v13_0_6_gpu_metrics *gpu_metrics)
{
- struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_8 *gpu_metrics =
- (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
- int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
+ int ret = 0, xcc_id, inst, i, j;
u8 num_jpeg_rings_gpu_metrics;
MetricsTable_t *metrics;
- struct amdgpu_xcp *xcp;
- u32 inst_mask;
metrics = (MetricsTable_t *)smu_metrics;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
-
gpu_metrics->temperature_hotspot =
SMUQ10_ROUND(metrics->MaxSocketTemperature);
/* Individual HBM stack temperature is not reported */
@@ -877,60 +888,186 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void
gpu_metrics->xgmi_link_status[j] = ret;
}
- gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
-
num_jpeg_rings_gpu_metrics = NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics);
- for_each_xcp(adev->xcp_mgr, xcp, i) {
- amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
- idx = 0;
- for_each_inst(k, inst_mask) {
- /* Both JPEG and VCN has same instances */
- inst = GET_INST(VCN, k);
-
- for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) {
- gpu_metrics->xcp_stats[i].jpeg_busy
- [(idx * num_jpeg_rings_gpu_metrics) + j] =
- SMUQ10_ROUND(metrics->JpegBusy
- [(inst * NUM_JPEG_RINGS_FW) + j]);
- }
- gpu_metrics->xcp_stats[i].vcn_busy[idx] =
- SMUQ10_ROUND(metrics->VcnBusy[inst]);
- idx++;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ inst = GET_INST(VCN, i);
+
+ for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) {
+ gpu_metrics->jpeg_busy[(i * num_jpeg_rings_gpu_metrics) +
+ j] =
+ SMUQ10_ROUND(
+ metrics->JpegBusy[(inst *
+ NUM_JPEG_RINGS_FW) +
+ j]);
}
+ gpu_metrics->vcn_busy[i] = SMUQ10_ROUND(metrics->VcnBusy[inst]);
+ }
- amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
- idx = 0;
- for_each_inst(k, inst_mask) {
- inst = GET_INST(GC, k);
- gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] =
- SMUQ10_ROUND(metrics->GfxBusy[inst]);
- gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
- SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
- gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] =
- SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]);
- gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] =
- SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]);
- gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] =
- SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]);
- gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] =
- SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]);
- }
- idx++;
- }
+ for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
+ inst = GET_INST(GC, i);
+ gpu_metrics->gfx_busy_inst[i] =
+ SMUQ10_ROUND(metrics->GfxBusy[inst]);
+ gpu_metrics->gfx_busy_acc[i] =
+ SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ if (smu_v13_0_6_cap_supported(smu,
+ SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics
+ ->gfx_below_host_limit_ppt_acc[i] = SMUQ10_ROUND(
+ metrics->GfxclkBelowHostLimitPptAcc[inst]);
+ gpu_metrics
+ ->gfx_below_host_limit_thm_acc[i] = SMUQ10_ROUND(
+ metrics->GfxclkBelowHostLimitThmAcc[inst]);
+ gpu_metrics->gfx_low_utilization_acc[i] = SMUQ10_ROUND(
+ metrics->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->gfx_below_host_limit_total_acc
+ [i] = SMUQ10_ROUND(
+ metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+ };
}
gpu_metrics->xgmi_link_width = metrics->XgmiWidth;
gpu_metrics->xgmi_link_speed = metrics->XgmiBitrate;
gpu_metrics->firmware_timestamp = metrics->Timestamp;
-
- *table = (void *)gpu_metrics;
-
- return sizeof(*gpu_metrics);
}
const struct smu_temp_funcs smu_v13_0_12_temp_funcs = {
.temp_metrics_is_supported = smu_v13_0_12_is_temp_metrics_supported,
.get_temp_metrics = smu_v13_0_12_get_temp_metrics,
};
+
+static int smu_v13_0_12_get_ras_table_version(struct amdgpu_device *adev,
+ uint32_t *table_version)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetRASTableVersion, 0, table_version);
+}
+
+static int smu_v13_0_12_get_badpage_count(struct amdgpu_device *adev, uint32_t *count,
+ uint32_t timeout)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint64_t end, now;
+ int ret = 0;
+
+ now = (uint64_t)ktime_to_ms(ktime_get());
+ end = now + timeout;
+ do {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetBadPageCount, 0, count);
+ /* eeprom is not ready */
+ if (ret != -EBUSY)
+ return ret;
+ mdelay(10);
+ now = (uint64_t)ktime_to_ms(ktime_get());
+ } while (now < end);
+
+ dev_err(adev->dev,
+ "smu get bad page count timeout!\n");
+ return ret;
+}
+
+static int smu_v13_0_12_set_timestamp(struct amdgpu_device *adev, uint64_t timestamp)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetTimestamp, (uint32_t)timestamp, 0);
+}
+
+static int smu_v13_0_12_get_timestamp(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *timestamp)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t temp;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetTimestamp, index, &temp);
+ if (!ret)
+ *timestamp = temp;
+
+ return ret;
+}
+
+static int smu_v13_0_12_get_badpage_ipid(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *ipid)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t temp_arg, temp_ipid_lo, temp_ipid_high;
+ int ret;
+
+ temp_arg = index | (1 << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetBadPageIpid, temp_arg, &temp_ipid_lo);
+ if (ret)
+ return ret;
+
+ temp_arg = index | (2 << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetBadPageIpid, temp_arg, &temp_ipid_high);
+ if (!ret)
+ *ipid = (uint64_t)temp_ipid_high << 32 | temp_ipid_lo;
+ return ret;
+}
+
+static int smu_v13_0_12_erase_ras_table(struct amdgpu_device *adev,
+ uint32_t *result)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_EraseRasTable, 0, result);
+}
+
+static int smu_v13_0_12_get_badpage_mca_addr(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *mca_addr)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t temp_arg, temp_addr_lo, temp_addr_high;
+ int ret;
+
+ temp_arg = index | (1 << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetBadPageMcaAddr, temp_arg, &temp_addr_lo);
+ if (ret)
+ return ret;
+
+ temp_arg = index | (2 << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetBadPageMcaAddr, temp_arg, &temp_addr_high);
+ if (!ret)
+ *mca_addr = (uint64_t)temp_addr_high << 32 | temp_addr_lo;
+ return ret;
+}
+
+static const struct ras_eeprom_smu_funcs smu_v13_0_12_eeprom_smu_funcs = {
+ .get_ras_table_version = smu_v13_0_12_get_ras_table_version,
+ .get_badpage_count = smu_v13_0_12_get_badpage_count,
+ .get_badpage_mca_addr = smu_v13_0_12_get_badpage_mca_addr,
+ .set_timestamp = smu_v13_0_12_set_timestamp,
+ .get_timestamp = smu_v13_0_12_get_timestamp,
+ .get_badpage_ipid = smu_v13_0_12_get_badpage_ipid,
+ .erase_ras_table = smu_v13_0_12_erase_ras_table,
+};
+
+static void smu_v13_0_12_ras_smu_feature_flags(struct amdgpu_device *adev, uint64_t *flags)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ if (!flags)
+ return;
+
+ *flags = 0ULL;
+
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(RAS_EEPROM)))
+ *flags |= RAS_SMU_FEATURE_BIT__RAS_EEPROM;
+
+}
+
+const struct ras_smu_drv smu_v13_0_12_ras_smu_drv = {
+ .smu_eeprom_funcs = &smu_v13_0_12_eeprom_smu_funcs,
+ .ras_smu_feature_flags = smu_v13_0_12_ras_smu_feature_flags,
+};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index b081ae3e8f43..6908f9930f16 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -497,11 +497,12 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu,
static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -565,7 +566,7 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v13_0_4_read_sensor(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index f5db181ef489..4576bf008b22 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -861,11 +861,12 @@ out:
static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min = 0, max = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -928,7 +929,7 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
}
print_clk_out:
- return size;
+ return size - start_offset;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 285cf7979693..44e1cd821eec 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -356,6 +356,9 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
if (fw_ver > 0x04560900)
smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
+ if (fw_ver >= 0x04560D00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(FAST_PPT));
+
if (fw_ver >= 0x04560700) {
if (fw_ver >= 0x04560900) {
smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS));
@@ -450,7 +453,8 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
((pgm == 4) && (fw_ver >= 0x4557000)))
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
- if ((pgm == 0) && (fw_ver >= 0x00558200))
+ if ((pgm == 0 && fw_ver >= 0x00558200) ||
+ (pgm == 7 && fw_ver >= 0x07551400))
smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
}
@@ -548,7 +552,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- void *gpu_metrics_table __free(kfree) = NULL;
+ struct smu_v13_0_6_gpu_metrics *gpu_metrics;
void *driver_pptable __free(kfree) = NULL;
void *metrics_table __free(kfree) = NULL;
struct amdgpu_device *adev = smu->adev;
@@ -578,24 +582,28 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8);
- gpu_metrics_table =
- kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
- if (!gpu_metrics_table)
- return -ENOMEM;
-
driver_pptable = kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
if (!driver_pptable)
return -ENOMEM;
+ ret = smu_table_cache_init(smu, SMU_TABLE_SMU_METRICS,
+ sizeof(struct smu_v13_0_6_gpu_metrics), 1);
+ if (ret)
+ return ret;
+
+ gpu_metrics = (struct smu_v13_0_6_gpu_metrics
+ *)(tables[SMU_TABLE_SMU_METRICS].cache.buffer);
+
+ smu_v13_0_6_gpu_metrics_init(gpu_metrics, 1, 9);
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
IP_VERSION(13, 0, 12)) {
ret = smu_v13_0_12_tables_init(smu);
- if (ret)
+ if (ret) {
+ smu_table_cache_fini(smu, SMU_TABLE_SMU_METRICS);
return ret;
+ }
}
- smu_table->gpu_metrics_table = no_free_ptr(gpu_metrics_table);
smu_table->metrics_table = no_free_ptr(metrics_table);
smu_table->driver_pptable = no_free_ptr(driver_pptable);
@@ -731,6 +739,7 @@ static int smu_v13_0_6_fini_smc_tables(struct smu_context *smu)
{
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
smu_v13_0_12_tables_fini(smu);
+ smu_table_cache_fini(smu, SMU_TABLE_SMU_METRICS);
return smu_v13_0_fini_smc_tables(smu);
}
@@ -765,7 +774,7 @@ int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
return ret;
}
- amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ amdgpu_hdp_invalidate(smu->adev, NULL);
memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
smu_table->metrics_time = jiffies;
@@ -844,12 +853,23 @@ int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu)
return ret;
}
- amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ amdgpu_hdp_invalidate(smu->adev, NULL);
memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
return 0;
}
+static void smu_v13_0_6_update_caps(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)) &&
+ !pptable->PPT1Max)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(FAST_PPT));
+}
+
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -866,8 +886,12 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
uint8_t max_width;
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
- smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
- return smu_v13_0_12_setup_driver_pptable(smu);
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ ret = smu_v13_0_12_setup_driver_pptable(smu);
+ if (ret)
+ return ret;
+ goto out;
+ }
/* Store one-time values in driver PPTable */
if (!pptable->Init) {
@@ -947,7 +971,8 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
smu_v13_0_6_fill_static_metrics_table(smu, static_metrics);
}
}
-
+out:
+ smu_v13_0_6_update_caps(smu);
return 0;
}
@@ -1393,7 +1418,7 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
return -EINVAL;
if (curr_clk < SMU_13_0_6_DSCLK_THRESHOLD) {
- size = sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk);
+ size += sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk);
for (i = 0; i < clocks.num_levels; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz\n", i,
clocks.data[i].clocks_in_khz /
@@ -1428,7 +1453,7 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
enum smu_clk_type type, char *buf)
{
- int now, size = 0;
+ int now, size = 0, start_offset = 0;
int ret = 0;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
struct smu_13_0_dpm_table *single_dpm_table;
@@ -1437,10 +1462,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
uint32_t min_clk, max_clk;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
dpm_context = smu_dpm->dpm_context;
@@ -1512,9 +1538,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
- return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
- now, "mclk");
+ ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "mclk");
+ if (ret < 0)
+ return ret;
+ size += ret;
+ break;
case SMU_SOCCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK,
&now);
@@ -1526,9 +1556,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
single_dpm_table = &(dpm_context->dpm_tables.soc_table);
- return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
- now, "socclk");
+ ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "socclk");
+ if (ret < 0)
+ return ret;
+ size += ret;
+ break;
case SMU_FCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK,
&now);
@@ -1540,9 +1574,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
- return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
- now, "fclk");
+ ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "fclk");
+ if (ret < 0)
+ return ret;
+ size += ret;
+ break;
case SMU_VCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK,
&now);
@@ -1554,9 +1592,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
- return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
- now, "vclk");
+ ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "vclk");
+ if (ret < 0)
+ return ret;
+ size += ret;
+ break;
case SMU_DCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK,
&now);
@@ -1568,14 +1610,18 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
- return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
- now, "dclk");
+ ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "dclk");
+ if (ret < 0)
+ return ret;
+ size += ret;
+ break;
default:
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max,
@@ -1845,7 +1891,7 @@ static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
if (current_power_limit)
*current_power_limit = power_limit;
if (default_power_limit)
- *default_power_limit = power_limit;
+ *default_power_limit = pptable->MaxSocketPowerLimit;
if (max_power_limit) {
*max_power_limit = pptable->MaxSocketPowerLimit;
@@ -1860,9 +1906,66 @@ static int smu_v13_0_6_set_power_limit(struct smu_context *smu,
enum smu_ppt_limit_type limit_type,
uint32_t limit)
{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ int ret;
+
+ if (limit_type == SMU_FAST_PPT_LIMIT) {
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)))
+ return -EOPNOTSUPP;
+ if (limit > pptable->PPT1Max || limit < pptable->PPT1Min) {
+ dev_err(smu->adev->dev,
+ "New power limit (%d) should be between min %d max %d\n",
+ limit, pptable->PPT1Min, pptable->PPT1Max);
+ return -EINVAL;
+ }
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetFastPptLimit,
+ limit, NULL);
+ if (ret)
+ dev_err(smu->adev->dev, "Set fast PPT limit failed!\n");
+ return ret;
+ }
+
return smu_v13_0_set_power_limit(smu, limit_type, limit);
}
+static int smu_v13_0_6_get_ppt_limit(struct smu_context *smu,
+ uint32_t *ppt_limit,
+ enum smu_ppt_limit_type type,
+ enum smu_ppt_limit_level level)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ int ret = 0;
+
+ if (type == SMU_FAST_PPT_LIMIT) {
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)))
+ return -EOPNOTSUPP;
+ switch (level) {
+ case SMU_PPT_LIMIT_MAX:
+ *ppt_limit = pptable->PPT1Max;
+ break;
+ case SMU_PPT_LIMIT_CURRENT:
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPptLimit, ppt_limit);
+ if (ret)
+ dev_err(smu->adev->dev, "Get fast PPT limit failed!\n");
+ break;
+ case SMU_PPT_LIMIT_DEFAULT:
+ *ppt_limit = pptable->PPT1Default;
+ break;
+ case SMU_PPT_LIMIT_MIN:
+ *ppt_limit = pptable->PPT1Min;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+ }
+ return -EOPNOTSUPP;
+}
+
static int smu_v13_0_6_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -2383,7 +2486,7 @@ static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu,
memcpy(table->cpu_addr, table_data, table_size);
/* Flush hdp cache */
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction,
NULL);
@@ -2627,7 +2730,7 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
{
const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
int version = smu_v13_0_6_get_metrics_version(smu);
- struct amdgpu_partition_metrics_v1_0 *xcp_metrics;
+ struct smu_v13_0_6_partition_metrics *xcp_metrics;
MetricsTableV0_t *metrics_v0 __free(kfree) = NULL;
struct amdgpu_device *adev = smu->adev;
int ret, inst, i, j, k, idx;
@@ -2647,8 +2750,8 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
if (i == adev->xcp_mgr->num_xcps)
return -EINVAL;
- xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *)table;
- smu_cmn_init_partition_metrics(xcp_metrics, 1, 0);
+ xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table;
+ smu_v13_0_6_partition_metrics_init(xcp_metrics, 1, 1);
metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
if (!metrics_v0)
@@ -2740,18 +2843,16 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_8 *gpu_metrics =
- (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
+ struct smu_table *tables = smu_table->tables;
+ struct smu_v13_0_6_gpu_metrics *gpu_metrics;
int version = smu_v13_0_6_get_metrics_version(smu);
MetricsTableV0_t *metrics_v0 __free(kfree) = NULL;
- int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
+ int ret = 0, xcc_id, inst, i, j;
MetricsTableV1_t *metrics_v1;
MetricsTableV2_t *metrics_v2;
- struct amdgpu_xcp *xcp;
u16 link_width_level;
u8 num_jpeg_rings;
- u32 inst_mask;
bool per_inst;
metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
@@ -2759,16 +2860,20 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
if (ret)
return ret;
- if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
- IP_VERSION(13, 0, 12) &&
- smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
- return smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0);
+ metrics_v2 = (MetricsTableV2_t *)metrics_v0;
+ gpu_metrics = (struct smu_v13_0_6_gpu_metrics
+ *)(tables[SMU_TABLE_SMU_METRICS].cache.buffer);
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0,
+ gpu_metrics);
+ goto fill;
+ }
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
-
gpu_metrics->temperature_hotspot =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version));
/* Individual HBM stack temperature is not reported */
@@ -2889,55 +2994,49 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->xgmi_link_status[j] = ret;
}
- gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
-
per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));
num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
- for_each_xcp(adev->xcp_mgr, xcp, i) {
- amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
- idx = 0;
- for_each_inst(k, inst_mask) {
- /* Both JPEG and VCN has same instances */
- inst = GET_INST(VCN, k);
-
- for (j = 0; j < num_jpeg_rings; ++j) {
- gpu_metrics->xcp_stats[i].jpeg_busy
- [(idx * num_jpeg_rings) + j] =
- SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, version)
- [(inst * num_jpeg_rings) + j]);
- }
- gpu_metrics->xcp_stats[i].vcn_busy[idx] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]);
- idx++;
-
- }
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ inst = GET_INST(JPEG, i);
+ for (j = 0; j < num_jpeg_rings; ++j)
+ gpu_metrics->jpeg_busy[(i * num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(
+ JpegBusy,
+ version)[(inst * num_jpeg_rings) + j]);
+ }
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ inst = GET_INST(VCN, i);
+ gpu_metrics->vcn_busy[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]);
+ }
- if (per_inst) {
- amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
- idx = 0;
- for_each_inst(k, inst_mask) {
- inst = GET_INST(GC, k);
- gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] =
- SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]);
- gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
- SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc,
- version)[inst]);
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
- gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] =
- SMUQ10_ROUND
- (metrics_v0->GfxclkBelowHostLimitPptAcc[inst]);
- gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] =
- SMUQ10_ROUND
- (metrics_v0->GfxclkBelowHostLimitThmAcc[inst]);
- gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] =
- SMUQ10_ROUND
- (metrics_v0->GfxclkLowUtilizationAcc[inst]);
- gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] =
- SMUQ10_ROUND
- (metrics_v0->GfxclkBelowHostLimitTotalAcc[inst]);
- }
- idx++;
+ if (per_inst) {
+ for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
+ inst = GET_INST(GC, i);
+ gpu_metrics->gfx_busy_inst[i] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]);
+ gpu_metrics->gfx_busy_acc[i] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusyAcc,
+ version)[inst]);
+ if (smu_v13_0_6_cap_supported(
+ smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics->gfx_below_host_limit_ppt_acc
+ [i] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitPptAcc
+ [inst]);
+ gpu_metrics->gfx_below_host_limit_thm_acc
+ [i] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitThmAcc
+ [inst]);
+ gpu_metrics->gfx_low_utilization_acc
+ [i] = SMUQ10_ROUND(
+ metrics_v0
+ ->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->gfx_below_host_limit_total_acc
+ [i] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitTotalAcc
+ [inst]);
}
}
}
@@ -2947,7 +3046,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version);
- *table = (void *)gpu_metrics;
+fill:
+ *table = tables[SMU_TABLE_SMU_METRICS].cache.buffer;
return sizeof(*gpu_metrics);
}
@@ -3226,6 +3326,24 @@ static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+static int smu_v13_0_6_ras_send_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t param, uint32_t *read_arg)
+{
+ int ret;
+
+ switch (msg) {
+ case SMU_MSG_QueryValidMcaCount:
+ case SMU_MSG_QueryValidMcaCeCount:
+ case SMU_MSG_McaBankDumpDW:
+ case SMU_MSG_McaBankCeDumpDW:
+ case SMU_MSG_ClearMcaOnRead:
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg, param, read_arg);
+ break;
+ default:
+ ret = -EPERM;
+ }
+
+ return ret;
+}
static int smu_v13_0_6_post_init(struct smu_context *smu)
{
@@ -3863,6 +3981,29 @@ static void smu_v13_0_6_set_temp_funcs(struct smu_context *smu)
== IP_VERSION(13, 0, 12)) ? &smu_v13_0_12_temp_funcs : NULL;
}
+static int smu_v13_0_6_get_ras_smu_drv(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv)
+{
+ if (!ras_smu_drv)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(smu->adev))
+ return -EOPNOTSUPP;
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_HROM_EN_BIT))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(RAS_EEPROM));
+
+ switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 12):
+ *ras_smu_drv = &smu_v13_0_12_ras_smu_drv;
+ break;
+ default:
+ *ras_smu_drv = NULL;
+ break;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
@@ -3894,6 +4035,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_enabled_mask = smu_v13_0_6_get_enabled_mask,
.feature_is_enabled = smu_cmn_feature_is_enabled,
.set_power_limit = smu_v13_0_6_set_power_limit,
+ .get_ppt_limit = smu_v13_0_6_get_ppt_limit,
.set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
.register_irq_handler = smu_v13_0_6_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
@@ -3921,6 +4063,8 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.reset_sdma = smu_v13_0_6_reset_sdma,
.dpm_reset_vcn = smu_v13_0_6_reset_vcn,
.post_init = smu_v13_0_6_post_init,
+ .ras_send_msg = smu_v13_0_6_ras_send_msg,
+ .get_ras_smu_drv = smu_v13_0_6_get_ras_smu_drv,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index 7ef5f3e66c27..6cbdd7c5ded9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -50,6 +50,9 @@ struct PPTable_t {
uint32_t MinLclkDpmRange;
uint64_t PublicSerialNumber_AID;
uint32_t MaxNodePowerLimit;
+ uint32_t PPT1Max;
+ uint32_t PPT1Min;
+ uint32_t PPT1Default;
bool Init;
};
@@ -72,9 +75,18 @@ enum smu_v13_0_6_caps {
SMU_CAP(PLDM_VERSION),
SMU_CAP(TEMP_METRICS),
SMU_CAP(NPM_METRICS),
+ SMU_CAP(RAS_EEPROM),
+ SMU_CAP(FAST_PPT),
SMU_CAP(ALL),
};
+#define SMU_13_0_6_NUM_XGMI_LINKS 8
+#define SMU_13_0_6_MAX_GFX_CLKS 8
+#define SMU_13_0_6_MAX_CLKS 4
+#define SMU_13_0_6_MAX_XCC 8
+#define SMU_13_0_6_MAX_VCN 4
+#define SMU_13_0_6_MAX_JPEG 40
+
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap);
int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu);
@@ -87,7 +99,6 @@ size_t smu_v13_0_12_get_system_metrics_size(void);
int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member, uint32_t *value);
-ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics);
ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu,
struct amdgpu_xcp *xcp, void *table,
void *smu_metrics);
@@ -99,4 +110,156 @@ int smu_v13_0_12_get_npm_data(struct smu_context *smu,
extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
extern const struct smu_temp_funcs smu_v13_0_12_temp_funcs;
+extern const struct ras_smu_drv smu_v13_0_12_ras_smu_drv;
+
+#if defined(SWSMU_CODE_LAYER_L2)
+#include "smu_cmn.h"
+
+/* SMUv 13.0.6 GPU metrics*/
+#define SMU_13_0_6_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \
+ SMU_SCALAR(SMU_MATTR(TEMPERATURE_HOTSPOT), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_hotspot); \
+ SMU_SCALAR(SMU_MATTR(TEMPERATURE_MEM), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_mem); \
+ SMU_SCALAR(SMU_MATTR(TEMPERATURE_VRSOC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_vrsoc); \
+ SMU_SCALAR(SMU_MATTR(CURR_SOCKET_POWER), SMU_MUNIT(POWER_1), \
+ SMU_MTYPE(U16), curr_socket_power); \
+ SMU_SCALAR(SMU_MATTR(AVERAGE_GFX_ACTIVITY), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U16), average_gfx_activity); \
+ SMU_SCALAR(SMU_MATTR(AVERAGE_UMC_ACTIVITY), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U16), average_umc_activity); \
+ SMU_SCALAR(SMU_MATTR(MEM_MAX_BANDWIDTH), SMU_MUNIT(BW_1), \
+ SMU_MTYPE(U64), mem_max_bandwidth); \
+ SMU_SCALAR(SMU_MATTR(ENERGY_ACCUMULATOR), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), energy_accumulator); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_CLOCK_COUNTER), SMU_MUNIT(TIME_1), \
+ SMU_MTYPE(U64), system_clock_counter); \
+ SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), accumulation_counter); \
+ SMU_SCALAR(SMU_MATTR(PROCHOT_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), prochot_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(PPT_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), ppt_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(SOCKET_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), socket_thm_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(VR_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), vr_thm_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(HBM_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), hbm_thm_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(GFXCLK_LOCK_STATUS), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), gfxclk_lock_status); \
+ SMU_SCALAR(SMU_MATTR(PCIE_LINK_WIDTH), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), pcie_link_width); \
+ SMU_SCALAR(SMU_MATTR(PCIE_LINK_SPEED), SMU_MUNIT(SPEED_2), \
+ SMU_MTYPE(U16), pcie_link_speed); \
+ SMU_SCALAR(SMU_MATTR(XGMI_LINK_WIDTH), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), xgmi_link_width); \
+ SMU_SCALAR(SMU_MATTR(XGMI_LINK_SPEED), SMU_MUNIT(SPEED_1), \
+ SMU_MTYPE(U16), xgmi_link_speed); \
+ SMU_SCALAR(SMU_MATTR(GFX_ACTIVITY_ACC), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U32), gfx_activity_acc); \
+ SMU_SCALAR(SMU_MATTR(MEM_ACTIVITY_ACC), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U32), mem_activity_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_BANDWIDTH_ACC), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U64), pcie_bandwidth_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_BANDWIDTH_INST), SMU_MUNIT(BW_1), \
+ SMU_MTYPE(U64), pcie_bandwidth_inst); \
+ SMU_SCALAR(SMU_MATTR(PCIE_L0_TO_RECOV_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_l0_to_recov_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_replay_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_ROVER_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_replay_rover_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_NAK_SENT_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), pcie_nak_sent_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_NAK_RCVD_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), pcie_nak_rcvd_count_acc); \
+ SMU_ARRAY(SMU_MATTR(XGMI_READ_DATA_ACC), SMU_MUNIT(DATA_1), \
+ SMU_MTYPE(U64), xgmi_read_data_acc, \
+ SMU_13_0_6_NUM_XGMI_LINKS); \
+ SMU_ARRAY(SMU_MATTR(XGMI_WRITE_DATA_ACC), SMU_MUNIT(DATA_1), \
+ SMU_MTYPE(U64), xgmi_write_data_acc, \
+ SMU_13_0_6_NUM_XGMI_LINKS); \
+ SMU_ARRAY(SMU_MATTR(XGMI_LINK_STATUS), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), xgmi_link_status, \
+ SMU_13_0_6_NUM_XGMI_LINKS); \
+ SMU_SCALAR(SMU_MATTR(FIRMWARE_TIMESTAMP), SMU_MUNIT(TIME_2), \
+ SMU_MTYPE(U64), firmware_timestamp); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_gfxclk, SMU_13_0_6_MAX_GFX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_socclk, SMU_13_0_6_MAX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_vclk0, SMU_13_0_6_MAX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_dclk0, SMU_13_0_6_MAX_CLKS); \
+ SMU_SCALAR(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_uclk); \
+ SMU_SCALAR(SMU_MATTR(PCIE_LC_PERF_OTHER_END_RECOVERY), \
+ SMU_MUNIT(NONE), SMU_MTYPE(U32), \
+ pcie_lc_perf_other_end_recovery); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U32), gfx_busy_inst, SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ jpeg_busy, SMU_13_0_6_MAX_JPEG); \
+ SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ vcn_busy, SMU_13_0_6_MAX_VCN); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(PERCENT), SMU_MTYPE(U64), \
+ gfx_busy_acc, SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_low_utilization_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \
+ SMU_13_0_6_MAX_XCC);
+
+DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_gpu_metrics, SMU_13_0_6_METRICS_FIELDS);
+void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
+ void *smu_metrics,
+ struct smu_v13_0_6_gpu_metrics *gpu_metrics);
+
+#define SMU_13_0_6_PARTITION_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \
+ SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_gfxclk, SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_socclk, SMU_13_0_6_MAX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_vclk0, SMU_13_0_6_MAX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_dclk0, SMU_13_0_6_MAX_CLKS); \
+ SMU_SCALAR(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_uclk); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U32), gfx_busy_inst, SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ jpeg_busy, SMU_13_0_6_MAX_JPEG); \
+ SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ vcn_busy, SMU_13_0_6_MAX_VCN); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(PERCENT), SMU_MTYPE(U64), \
+ gfx_busy_acc, SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_low_utilization_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \
+ SMU_13_0_6_MAX_XCC);
+
+DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_partition_metrics,
+ SMU_13_0_6_PARTITION_METRICS_FIELDS);
+
+#endif /* SWSMU_CODE_LAYER_L2 */
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index c96fa5e49ed6..a3fc35b9011e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -1184,15 +1184,16 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
- int i, curr_freq, size = 0;
+ int i, curr_freq, size = 0, start_offset = 0;
int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
switch (clk_type) {
@@ -1523,7 +1524,7 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long input)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 73b4506ef5a8..5d7e671fa3c3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -1041,12 +1041,13 @@ static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu,
static int yellow_carp_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
uint32_t clk_limit = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -1111,7 +1112,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
}
print_clk_out:
- return size;
+ return size - start_offset;
}
static int yellow_carp_force_clk_levels(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index fe00c84b1cc6..b1bd946d8e30 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -1132,11 +1132,12 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, ret = 0, size = 0;
+ int i, idx, ret = 0, size = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -1202,7 +1203,7 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 086501cc5213..2cea688c604f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1056,15 +1056,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
struct smu_14_0_dpm_table *single_dpm_table;
struct smu_14_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
- int i, curr_freq, size = 0;
+ int i, curr_freq, size = 0, start_offset = 0;
int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
switch (clk_type) {
@@ -1374,7 +1375,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v14_0_2_force_clk_levels(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index f532f7c69259..4040ff926544 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -164,9 +164,13 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
msg_index, param, message);
break;
case SMU_RESP_BUSY_OTHER:
- dev_err_ratelimited(adev->dev,
- "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
- msg_index, param, message);
+ /* It is normal for SMU_MSG_GetBadPageCount to return busy
+ * so don't print error at this case.
+ */
+ if (msg != SMU_MSG_GetBadPageCount)
+ dev_err_ratelimited(adev->dev,
+ "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
+ msg_index, param, message);
break;
case SMU_RESP_DEBUG_END:
dev_err_ratelimited(adev->dev,
@@ -969,7 +973,7 @@ int smu_cmn_update_table(struct smu_context *smu,
table_index);
uint32_t table_size;
int ret = 0;
- if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
+ if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
table_size = smu_table->tables[table_index].size;
@@ -980,7 +984,7 @@ int smu_cmn_update_table(struct smu_context *smu,
* Flush hdp cache: to guard the content seen by
* GPU is consitent with CPU.
*/
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
}
ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
@@ -992,7 +996,7 @@ int smu_cmn_update_table(struct smu_context *smu,
return ret;
if (!drv2smu) {
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table_data, table->cpu_addr, table_size);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 0ae91c8b6d72..8d7c4814c68f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -202,5 +202,72 @@ void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
u32 workload_mask,
u32 *backend_workload_mask);
+/*SMU gpu metrics */
+
+/* Attribute ID mapping */
+#define SMU_MATTR(X) AMDGPU_METRICS_ATTR_ID_##X
+/* Type ID mapping */
+#define SMU_MTYPE(X) AMDGPU_METRICS_TYPE_##X
+/* Unit ID mapping */
+#define SMU_MUNIT(X) AMDGPU_METRICS_UNIT_##X
+
+/* Map TYPEID to C type */
+#define SMU_CTYPE(TYPEID) SMU_CTYPE_##TYPEID
+
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U8 u8
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S8 s8
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U16 u16
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S16 s16
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U32 u32
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S32 s32
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U64 u64
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S64 s64
+
+/* struct members */
+#define SMU_METRICS_SCALAR(ID, UNIT, TYPEID, NAME) \
+ u64 NAME##_ftype; \
+ SMU_CTYPE(TYPEID) NAME
+
+#define SMU_METRICS_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \
+ u64 NAME##_ftype; \
+ SMU_CTYPE(TYPEID) NAME[SIZE]
+
+/* Init functions for scalar/array fields - init to 0xFFs */
+#define SMU_METRICS_INIT_SCALAR(ID, UNIT, TYPEID, NAME) \
+ do { \
+ obj->NAME##_ftype = \
+ AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, 1); \
+ obj->NAME = (SMU_CTYPE(TYPEID)) ~0; \
+ count++; \
+ } while (0)
+
+#define SMU_METRICS_INIT_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \
+ do { \
+ obj->NAME##_ftype = \
+ AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, SIZE); \
+ memset(obj->NAME, 0xFF, sizeof(obj->NAME)); \
+ count++; \
+ } while (0)
+
+/* Declare Metrics Class and Template object */
+#define DECLARE_SMU_METRICS_CLASS(CLASSNAME, SMU_METRICS_FIELD_LIST) \
+ struct __packed CLASSNAME { \
+ struct metrics_table_header header; \
+ int attr_count; \
+ SMU_METRICS_FIELD_LIST(SMU_METRICS_SCALAR, SMU_METRICS_ARRAY); \
+ }; \
+ static inline void CLASSNAME##_init(struct CLASSNAME *obj, \
+ uint8_t frev, uint8_t crev) \
+ { \
+ int count = 0; \
+ memset(obj, 0xFF, sizeof(*obj)); \
+ obj->header.format_revision = frev; \
+ obj->header.content_revision = crev; \
+ obj->header.structure_size = sizeof(*obj); \
+ SMU_METRICS_FIELD_LIST(SMU_METRICS_INIT_SCALAR, \
+ SMU_METRICS_INIT_ARRAY) \
+ obj->attr_count = count; \
+ }
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index c09ecf1a68a0..34f6b4b1c3ba 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -100,6 +100,7 @@
#define smu_is_asic_wbrf_supported(smu) smu_ppt_funcs(is_asic_wbrf_supported, false, smu)
#define smu_enable_uclk_shadow(smu, enable) smu_ppt_funcs(enable_uclk_shadow, 0, smu, enable)
#define smu_set_wbrf_exclusion_ranges(smu, freq_band_range) smu_ppt_funcs(set_wbrf_exclusion_ranges, -EOPNOTSUPP, smu, freq_band_range)
+#define smu_get_ras_smu_drv(smu, ras_smu_drv) smu_ppt_funcs(get_ras_smu_drv, -EOPNOTSUPP, smu, ras_smu_drv)
#endif
#endif
diff --git a/drivers/gpu/drm/amd/ras/Makefile b/drivers/gpu/drm/amd/ras/Makefile
new file mode 100644
index 000000000000..bbdaba811d34
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/Makefile
@@ -0,0 +1,34 @@
+#
+# Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+ifeq ($(AMD_GPU_RAS_MGR),)
+ AMD_GPU_RAS_MGR := ras_mgr
+endif
+
+subdir-ccflags-y += -I$(AMD_GPU_RAS_FULL_PATH)/rascore
+subdir-ccflags-y += -I$(AMD_GPU_RAS_FULL_PATH)/$(AMD_GPU_RAS_MGR)
+
+RAS_LIBS = $(AMD_GPU_RAS_MGR) rascore
+
+AMD_RAS = $(addsuffix /Makefile, $(addprefix $(AMD_GPU_RAS_FULL_PATH)/,$(RAS_LIBS)))
+
+include $(AMD_RAS)
+
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/Makefile b/drivers/gpu/drm/amd/ras/ras_mgr/Makefile
new file mode 100644
index 000000000000..5e5a2cfa4068
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/Makefile
@@ -0,0 +1,33 @@
+# Copyright 2025 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+RAS_MGR_FILES = amdgpu_ras_sys.o \
+ amdgpu_ras_mgr.o \
+ amdgpu_ras_eeprom_i2c.o \
+ amdgpu_ras_mp1_v13_0.o \
+ amdgpu_ras_cmd.o \
+ amdgpu_ras_process.o \
+ amdgpu_ras_nbio_v7_9.o
+
+
+RAS_MGR = $(addprefix $(AMD_GPU_RAS_PATH)/ras_mgr/, $(RAS_MGR_FILES))
+
+AMD_GPU_RAS_FILES += $(RAS_MGR)
+
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c
new file mode 100644
index 000000000000..78419b7f7729
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "ras_sys.h"
+#include "amdgpu_ras_cmd.h"
+#include "amdgpu_ras_mgr.h"
+
+/* inject address is 52 bits */
+#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
+
+#define AMDGPU_RAS_TYPE_RASCORE 0x1
+#define AMDGPU_RAS_TYPE_AMDGPU 0x2
+#define AMDGPU_RAS_TYPE_VF 0x3
+
+static int amdgpu_ras_trigger_error_prepare(struct ras_core_context *ras_core,
+ struct ras_cmd_inject_error_req *block_info)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret;
+
+ if (block_info->block_id == TA_RAS_BLOCK__XGMI_WAFL) {
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ RAS_DEV_WARN(adev, "Failed to disallow df cstate");
+
+ ret = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DISALLOW);
+ if (ret && (ret != -EOPNOTSUPP))
+ RAS_DEV_WARN(adev, "Failed to disallow XGMI power down");
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_trigger_error_end(struct ras_core_context *ras_core,
+ struct ras_cmd_inject_error_req *block_info)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret;
+
+ if (block_info->block_id == TA_RAS_BLOCK__XGMI_WAFL) {
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
+ ret = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DEFAULT);
+ if (ret && (ret != -EOPNOTSUPP))
+ RAS_DEV_WARN(adev, "Failed to allow XGMI power down");
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ RAS_DEV_WARN(adev, "Failed to allow df cstate");
+ }
+
+ return 0;
+}
+
+static uint64_t local_addr_to_xgmi_global_addr(struct ras_core_context *ras_core,
+ uint64_t addr)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
+
+ return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
+}
+
+static int amdgpu_ras_inject_error(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_cmd_inject_error_req *req =
+ (struct ras_cmd_inject_error_req *)cmd->input_buff_raw;
+ int ret = RAS_CMD__ERROR_GENERIC;
+
+ if (req->block_id == RAS_BLOCK_ID__UMC) {
+ if (amdgpu_ras_mgr_check_retired_addr(adev, req->address)) {
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
+ req->address);
+ return RAS_CMD__ERROR_ACCESS_DENIED;
+ }
+
+ if ((req->address >= adev->gmc.mc_vram_size &&
+ adev->gmc.mc_vram_size) ||
+ (req->address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+ RAS_DEV_WARN(adev, "RAS WARN: input address 0x%llx is invalid.",
+ req->address);
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+ }
+
+ /* Calculate XGMI relative offset */
+ if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+ req->block_id != RAS_BLOCK_ID__GFX) {
+ req->address = local_addr_to_xgmi_global_addr(ras_core, req->address);
+ }
+ }
+
+ amdgpu_ras_trigger_error_prepare(ras_core, req);
+ ret = rascore_handle_cmd(ras_core, cmd, data);
+ amdgpu_ras_trigger_error_end(ras_core, req);
+ if (ret) {
+ RAS_DEV_ERR(adev, "ras inject block %u failed %d\n", req->block_id, ret);
+ ret = RAS_CMD__ERROR_ACCESS_DENIED;
+ }
+
+
+ return ret;
+}
+
+static int amdgpu_ras_get_ras_safe_fb_addr_ranges(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_cmd_dev_handle *input_data =
+ (struct ras_cmd_dev_handle *)cmd->input_buff_raw;
+ struct ras_cmd_ras_safe_fb_address_ranges_rsp *ranges =
+ (struct ras_cmd_ras_safe_fb_address_ranges_rsp *)cmd->output_buff_raw;
+ struct amdgpu_mem_partition_info *mem_ranges;
+ uint32_t i = 0;
+
+ if (cmd->input_size != sizeof(*input_data))
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ mem_ranges = adev->gmc.mem_partitions;
+ for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
+ ranges->range[i].start = mem_ranges[i].range.fpfn << AMDGPU_GPU_PAGE_SHIFT;
+ ranges->range[i].size = mem_ranges[i].size;
+ ranges->range[i].idx = i;
+ }
+
+ ranges->num_ranges = adev->gmc.num_mem_partitions;
+
+ ranges->version = 0;
+ cmd->output_size = sizeof(struct ras_cmd_ras_safe_fb_address_ranges_rsp);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_translate_fb_address(struct ras_core_context *ras_core,
+ enum ras_fb_addr_type src_type,
+ enum ras_fb_addr_type dest_type,
+ union ras_translate_fb_address *src_addr,
+ union ras_translate_fb_address *dest_addr)
+{
+ uint64_t soc_phy_addr;
+ int ret = RAS_CMD__SUCCESS;
+
+ /* Does not need to be queued as event as this is a SW translation */
+ switch (src_type) {
+ case RAS_FB_ADDR_SOC_PHY:
+ soc_phy_addr = src_addr->soc_phy_addr;
+ break;
+ case RAS_FB_ADDR_BANK:
+ ret = ras_cmd_translate_bank_to_soc_pa(ras_core,
+ src_addr->bank_addr, &soc_phy_addr);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+ break;
+ default:
+ return RAS_CMD__ERROR_INVALID_CMD;
+ }
+
+ switch (dest_type) {
+ case RAS_FB_ADDR_SOC_PHY:
+ dest_addr->soc_phy_addr = soc_phy_addr;
+ break;
+ case RAS_FB_ADDR_BANK:
+ ret = ras_cmd_translate_soc_pa_to_bank(ras_core,
+ soc_phy_addr, &dest_addr->bank_addr);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+ break;
+ default:
+ return RAS_CMD__ERROR_INVALID_CMD;
+ }
+
+ return ret;
+}
+
+static int amdgpu_ras_translate_fb_address(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_translate_fb_address_req *req_buff =
+ (struct ras_cmd_translate_fb_address_req *)cmd->input_buff_raw;
+ struct ras_cmd_translate_fb_address_rsp *rsp_buff =
+ (struct ras_cmd_translate_fb_address_rsp *)cmd->output_buff_raw;
+ int ret = RAS_CMD__ERROR_GENERIC;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_translate_fb_address_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if ((req_buff->src_addr_type >= RAS_FB_ADDR_UNKNOWN) ||
+ (req_buff->dest_addr_type >= RAS_FB_ADDR_UNKNOWN))
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ ret = ras_translate_fb_address(ras_core, req_buff->src_addr_type,
+ req_buff->dest_addr_type, &req_buff->trans_addr, &rsp_buff->trans_addr);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ rsp_buff->version = 0;
+ cmd->output_size = sizeof(struct ras_cmd_translate_fb_address_rsp);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static struct ras_cmd_func_map amdgpu_ras_cmd_maps[] = {
+ {RAS_CMD__INJECT_ERROR, amdgpu_ras_inject_error},
+ {RAS_CMD__GET_SAFE_FB_ADDRESS_RANGES, amdgpu_ras_get_ras_safe_fb_addr_ranges},
+ {RAS_CMD__TRANSLATE_FB_ADDRESS, amdgpu_ras_translate_fb_address},
+};
+
+int amdgpu_ras_handle_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_func_map *ras_cmd = NULL;
+ int i, res;
+
+ for (i = 0; i < ARRAY_SIZE(amdgpu_ras_cmd_maps); i++) {
+ if (cmd->cmd_id == amdgpu_ras_cmd_maps[i].cmd_id) {
+ ras_cmd = &amdgpu_ras_cmd_maps[i];
+ break;
+ }
+ }
+
+ if (ras_cmd)
+ res = ras_cmd->func(ras_core, cmd, NULL);
+ else
+ res = RAS_CMD__ERROR_UKNOWN_CMD;
+
+ return res;
+}
+
+int amdgpu_ras_submit_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd)
+{
+ struct ras_core_context *cmd_core = ras_core;
+ int timeout = 60;
+ int res;
+
+ cmd->cmd_res = RAS_CMD__ERROR_INVALID_CMD;
+ cmd->output_size = 0;
+
+ if (!ras_core_is_enabled(cmd_core))
+ return RAS_CMD__ERROR_ACCESS_DENIED;
+
+ while (ras_core_gpu_in_reset(cmd_core)) {
+ msleep(1000);
+ if (!timeout--)
+ return RAS_CMD__ERROR_TIMEOUT;
+ }
+
+ res = amdgpu_ras_handle_cmd(cmd_core, cmd, NULL);
+ if (res == RAS_CMD__ERROR_UKNOWN_CMD)
+ res = rascore_handle_cmd(cmd_core, cmd, NULL);
+
+ cmd->cmd_res = res;
+
+ if (cmd->output_size > cmd->output_buf_size) {
+ RAS_DEV_ERR(cmd_core->dev,
+ "Output size 0x%x exceeds output buffer size 0x%x!\n",
+ cmd->output_size, cmd->output_buf_size);
+ return RAS_CMD__SUCCESS_EXEED_BUFFER;
+ }
+
+ return RAS_CMD__SUCCESS;
+}
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h
new file mode 100644
index 000000000000..5973b156cc85
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __AMDGPU_RAS_CMD_H__
+#define __AMDGPU_RAS_CMD_H__
+#include "ras.h"
+
+enum amdgpu_ras_cmd_id {
+ RAS_CMD__AMDGPU_BEGIN = RAS_CMD_ID_AMDGPU_START,
+ RAS_CMD__TRANSLATE_MEMORY_FD,
+ RAS_CMD__AMDGPU_SUPPORTED_MAX = RAS_CMD_ID_AMDGPU_END,
+};
+
+struct ras_cmd_translate_memory_fd_req {
+ struct ras_cmd_dev_handle dev;
+ uint32_t type;
+ uint32_t fd;
+ uint64_t address;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_translate_memory_fd_rsp {
+ uint32_t version;
+ uint32_t padding;
+ uint64_t start;
+ uint64_t size;
+ uint32_t reserved[2];
+};
+
+int amdgpu_ras_handle_cmd(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data);
+int amdgpu_ras_submit_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd);
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c
new file mode 100644
index 000000000000..3ed3ff42b7e1
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_ras_eeprom.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_eeprom_i2c.h"
+#include "ras_eeprom.h"
+
+/* These are memory addresses as would be seen by one or more EEPROM
+ * chips strung on the I2C bus, usually by manipulating pins 1-3 of a
+ * set of EEPROM devices. They form a continuous memory space.
+ *
+ * The I2C device address includes the device type identifier, 1010b,
+ * which is a reserved value and indicates that this is an I2C EEPROM
+ * device. It also includes the top 3 bits of the 19 bit EEPROM memory
+ * address, namely bits 18, 17, and 16. This makes up the 7 bit
+ * address sent on the I2C bus with bit 0 being the direction bit,
+ * which is not represented here, and sent by the hardware directly.
+ *
+ * For instance,
+ * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0.
+ * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h.
+ * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h.
+ * Depending on the size of the I2C EEPROM device(s), bits 18:16 may
+ * address memory in a device or a device on the I2C bus, depending on
+ * the status of pins 1-3. See top of amdgpu_eeprom.c.
+ *
+ * The RAS table lives either at address 0 or address 40000h of EEPROM.
+ */
+#define EEPROM_I2C_MADDR_0 0x0
+#define EEPROM_I2C_MADDR_4 0x40000
+
+#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF))
+#define to_amdgpu_ras(x) (container_of(x, struct amdgpu_ras, eeprom_control))
+
+#define EEPROM_PAGE_BITS 8
+#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS)
+#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1)
+
+#define EEPROM_OFFSET_SIZE 2
+
+static int ras_eeprom_i2c_config(struct ras_core_context *ras_core)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ u8 i2c_addr;
+
+ if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
+ /* The address given by VBIOS is an 8-bit, wire-format
+ * address, i.e. the most significant byte.
+ *
+ * Normalize it to a 19-bit EEPROM address. Remove the
+ * device type identifier and make it a 7-bit address;
+ * then make it a 19-bit EEPROM address. See top of
+ * amdgpu_eeprom.c.
+ */
+ i2c_addr = (i2c_addr & 0x0F) >> 1;
+ control->i2c_address = ((u32) i2c_addr) << 16;
+ return 0;
+ }
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ control->i2c_address = EEPROM_I2C_MADDR_4;
+ return 0;
+ default:
+ return -ENODATA;
+ }
+ return -ENODATA;
+}
+
+static int ras_eeprom_i2c_xfer(struct ras_core_context *ras_core, u32 eeprom_addr,
+ u8 *eeprom_buf, u32 buf_size, bool read)
+{
+ struct i2c_adapter *i2c_adap = ras_core->ras_eeprom.i2c_adapter;
+ u8 eeprom_offset_buf[EEPROM_OFFSET_SIZE];
+ struct i2c_msg msgs[] = {
+ {
+ .flags = 0,
+ .len = EEPROM_OFFSET_SIZE,
+ .buf = eeprom_offset_buf,
+ },
+ {
+ .flags = read ? I2C_M_RD : 0,
+ },
+ };
+ const u8 *p = eeprom_buf;
+ int r;
+ u16 len;
+
+ for (r = 0; buf_size > 0;
+ buf_size -= len, eeprom_addr += len, eeprom_buf += len) {
+ /* Set the EEPROM address we want to write to/read from.
+ */
+ msgs[0].addr = MAKE_I2C_ADDR(eeprom_addr);
+ msgs[1].addr = msgs[0].addr;
+ msgs[0].buf[0] = (eeprom_addr >> 8) & 0xff;
+ msgs[0].buf[1] = eeprom_addr & 0xff;
+
+ if (!read) {
+ /* Write the maximum amount of data, without
+ * crossing the device's page boundary, as per
+ * its spec. Partial page writes are allowed,
+ * starting at any location within the page,
+ * so long as the page boundary isn't crossed
+ * over (actually the page pointer rolls
+ * over).
+ *
+ * As per the AT24CM02 EEPROM spec, after
+ * writing into a page, the I2C driver should
+ * terminate the transfer, i.e. in
+ * "i2c_transfer()" below, with a STOP
+ * condition, so that the self-timed write
+ * cycle begins. This is implied for the
+ * "i2c_transfer()" abstraction.
+ */
+ len = min(EEPROM_PAGE_SIZE - (eeprom_addr & EEPROM_PAGE_MASK),
+ buf_size);
+ } else {
+ /* Reading from the EEPROM has no limitation
+ * on the number of bytes read from the EEPROM
+ * device--they are simply sequenced out.
+ * Keep in mind that i2c_msg.len is u16 type.
+ */
+ len = min(U16_MAX, buf_size);
+ }
+ msgs[1].len = len;
+ msgs[1].buf = eeprom_buf;
+
+
+ /* This constitutes a START-STOP transaction.
+ */
+ r = i2c_transfer(i2c_adap, msgs, ARRAY_SIZE(msgs));
+ if (r != ARRAY_SIZE(msgs))
+ break;
+
+ if (!read) {
+ /* According to EEPROM specs the length of the
+ * self-writing cycle, tWR (tW), is 10 ms.
+ *
+ * TODO: Use polling on ACK, aka Acknowledge
+ * Polling, to minimize waiting for the
+ * internal write cycle to complete, as it is
+ * usually smaller than tWR (tW).
+ */
+ msleep(10);
+ }
+ }
+
+ return r < 0 ? r : eeprom_buf - p;
+}
+
+const struct ras_eeprom_sys_func amdgpu_ras_eeprom_i2c_sys_func = {
+ .eeprom_i2c_xfer = ras_eeprom_i2c_xfer,
+ .update_eeprom_i2c_config = ras_eeprom_i2c_config,
+};
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h
new file mode 100644
index 000000000000..3b5878605411
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_RAS_EEPROM_I2C_H__
+#define __AMDGPU_RAS_EEPROM_I2C_H__
+#include "ras.h"
+
+extern const struct ras_eeprom_sys_func amdgpu_ras_eeprom_i2c_sys_func;
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c
new file mode 100644
index 000000000000..afe8135b6258
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_xgmi.h"
+#include "ras_sys.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_cmd.h"
+#include "amdgpu_ras_process.h"
+#include "amdgpu_ras_eeprom_i2c.h"
+#include "amdgpu_ras_mp1_v13_0.h"
+#include "amdgpu_ras_nbio_v7_9.h"
+
+#define MAX_SOCKET_NUM_PER_HIVE 8
+#define MAX_AID_NUM_PER_SOCKET 4
+#define MAX_XCD_NUM_PER_AID 2
+
+/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
+#define TYPICAL_ECC_BAD_PAGE_RATE (100ULL * SZ_1M)
+
+#define COUNT_BAD_PAGE_THRESHOLD(size) (((size) >> 21) << 4)
+
+/* Reserve 8 physical dram row for possible retirement.
+ * In worst cases, it will lose 8 * 2MB memory in vram domain
+ */
+#define RAS_RESERVED_VRAM_SIZE_DEFAULT (16ULL << 20)
+
+
+static void ras_mgr_init_event_mgr(struct ras_event_manager *mgr)
+{
+ struct ras_event_state *event_state;
+ int i;
+
+ memset(mgr, 0, sizeof(*mgr));
+ atomic64_set(&mgr->seqno, 0);
+
+ for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
+ event_state = &mgr->event_state[i];
+ event_state->last_seqno = RAS_EVENT_INVALID_ID;
+ atomic64_set(&event_state->count, 0);
+ }
+}
+
+static void amdgpu_ras_mgr_init_event_mgr(struct ras_core_context *ras_core)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_event_manager *event_mgr;
+ struct amdgpu_hive_info *hive;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ event_mgr = hive ? &hive->event_mgr : &ras_mgr->ras_event_mgr;
+
+ /* init event manager with node 0 on xgmi system */
+ if (!amdgpu_reset_in_recovery(adev)) {
+ if (!hive || adev->gmc.xgmi.node_id == 0)
+ ras_mgr_init_event_mgr(event_mgr);
+ }
+
+ if (hive)
+ amdgpu_put_xgmi_hive(hive);
+}
+
+static int amdgpu_ras_mgr_init_aca_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_aca_config *aca_cfg = &config->aca_cfg;
+
+ aca_cfg->socket_num_per_hive = MAX_SOCKET_NUM_PER_HIVE;
+ aca_cfg->aid_num_per_socket = MAX_AID_NUM_PER_SOCKET;
+ aca_cfg->xcd_num_per_aid = MAX_XCD_NUM_PER_AID;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_init_eeprom_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_eeprom_config *eeprom_cfg = &config->eeprom_cfg;
+
+ eeprom_cfg->eeprom_sys_fn = &amdgpu_ras_eeprom_i2c_sys_func;
+ eeprom_cfg->eeprom_i2c_adapter = adev->pm.ras_eeprom_i2c_bus;
+ if (eeprom_cfg->eeprom_i2c_adapter) {
+ const struct i2c_adapter_quirks *quirks =
+ ((struct i2c_adapter *)eeprom_cfg->eeprom_i2c_adapter)->quirks;
+
+ if (quirks) {
+ eeprom_cfg->max_i2c_read_len = quirks->max_read_len;
+ eeprom_cfg->max_i2c_write_len = quirks->max_write_len;
+ }
+ }
+
+ /*
+ * amdgpu_bad_page_threshold is used to config
+ * the threshold for the number of bad pages.
+ * -1: Threshold is set to default value
+ * Driver will issue a warning message when threshold is reached
+ * and continue runtime services.
+ * 0: Disable bad page retirement
+ * Driver will not retire bad pages
+ * which is intended for debugging purpose.
+ * -2: Threshold is determined by a formula
+ * that assumes 1 bad page per 100M of local memory.
+ * Driver will continue runtime services when threhold is reached.
+ * 0 < threshold < max number of bad page records in EEPROM,
+ * A user-defined threshold is set
+ * Driver will halt runtime services when this custom threshold is reached.
+ */
+ if (amdgpu_bad_page_threshold == NONSTOP_OVER_THRESHOLD)
+ eeprom_cfg->eeprom_record_threshold_count =
+ div64_u64(adev->gmc.mc_vram_size, TYPICAL_ECC_BAD_PAGE_RATE);
+ else if (amdgpu_bad_page_threshold == WARN_NONSTOP_OVER_THRESHOLD)
+ eeprom_cfg->eeprom_record_threshold_count =
+ COUNT_BAD_PAGE_THRESHOLD(RAS_RESERVED_VRAM_SIZE_DEFAULT);
+ else
+ eeprom_cfg->eeprom_record_threshold_count = amdgpu_bad_page_threshold;
+
+ eeprom_cfg->eeprom_record_threshold_config = amdgpu_bad_page_threshold;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_init_mp1_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_mp1_config *mp1_cfg = &config->mp1_cfg;
+ int ret = 0;
+
+ switch (config->mp1_ip_version) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
+ mp1_cfg->mp1_sys_fn = &amdgpu_ras_mp1_sys_func_v13_0;
+ break;
+ default:
+ RAS_DEV_ERR(adev,
+ "The mp1(0x%x) ras config is not right!\n",
+ config->mp1_ip_version);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int amdgpu_ras_mgr_init_nbio_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_nbio_config *nbio_cfg = &config->nbio_cfg;
+ int ret = 0;
+
+ switch (config->nbio_ip_version) {
+ case IP_VERSION(7, 9, 0):
+ case IP_VERSION(7, 9, 1):
+ nbio_cfg->nbio_sys_fn = &amdgpu_ras_nbio_sys_func_v7_9;
+ break;
+ default:
+ RAS_DEV_ERR(adev,
+ "The nbio(0x%x) ras config is not right!\n",
+ config->nbio_ip_version);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int amdgpu_ras_mgr_get_ras_psp_system_status(struct ras_core_context *ras_core,
+ struct ras_psp_sys_status *status)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ta_context *context = &adev->psp.ras_context.context;
+
+ status->initialized = context->initialized;
+ status->session_id = context->session_id;
+ status->psp_cmd_mutex = &adev->psp.mutex;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_get_ras_ta_init_param(struct ras_core_context *ras_core,
+ struct ras_ta_init_param *ras_ta_param)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ uint32_t nps_mode;
+
+ if (amdgpu_ras_is_poison_mode_supported(adev))
+ ras_ta_param->poison_mode_en = 1;
+
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
+ ras_ta_param->dgpu_mode = 1;
+
+ ras_ta_param->xcc_mask = adev->gfx.xcc_mask;
+ ras_ta_param->channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
+
+ ras_ta_param->active_umc_mask = adev->umc.active_mask;
+
+ if (!amdgpu_ras_mgr_get_curr_nps_mode(adev, &nps_mode))
+ ras_ta_param->nps_mode = nps_mode;
+
+ return 0;
+}
+
+const struct ras_psp_sys_func amdgpu_ras_psp_sys_func = {
+ .get_ras_psp_system_status = amdgpu_ras_mgr_get_ras_psp_system_status,
+ .get_ras_ta_init_param = amdgpu_ras_mgr_get_ras_ta_init_param,
+};
+
+static int amdgpu_ras_mgr_init_psp_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_psp_config *psp_cfg = &config->psp_cfg;
+
+ psp_cfg->psp_sys_fn = &amdgpu_ras_psp_sys_func;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_init_umc_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_umc_config *umc_cfg = &config->umc_cfg;
+
+ umc_cfg->umc_vram_type = adev->gmc.vram_type;
+
+ return 0;
+}
+
+static struct ras_core_context *amdgpu_ras_mgr_create_ras_core(struct amdgpu_device *adev)
+{
+ struct ras_core_config init_config;
+
+ memset(&init_config, 0, sizeof(init_config));
+
+ init_config.umc_ip_version = amdgpu_ip_version(adev, UMC_HWIP, 0);
+ init_config.mp1_ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0);
+ init_config.gfx_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
+ init_config.nbio_ip_version = amdgpu_ip_version(adev, NBIO_HWIP, 0);
+ init_config.psp_ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0);
+
+ if (init_config.umc_ip_version == IP_VERSION(12, 0, 0) ||
+ init_config.umc_ip_version == IP_VERSION(12, 5, 0))
+ init_config.aca_ip_version = IP_VERSION(1, 0, 0);
+
+ init_config.sys_fn = &amdgpu_ras_sys_fn;
+ init_config.ras_eeprom_supported = true;
+ init_config.poison_supported =
+ amdgpu_ras_is_poison_mode_supported(adev);
+
+ amdgpu_ras_mgr_init_aca_config(adev, &init_config);
+ amdgpu_ras_mgr_init_eeprom_config(adev, &init_config);
+ amdgpu_ras_mgr_init_mp1_config(adev, &init_config);
+ amdgpu_ras_mgr_init_nbio_config(adev, &init_config);
+ amdgpu_ras_mgr_init_psp_config(adev, &init_config);
+ amdgpu_ras_mgr_init_umc_config(adev, &init_config);
+
+ return ras_core_create(&init_config);
+}
+
+static int amdgpu_ras_mgr_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_mgr *ras_mgr;
+ int ret = 0;
+
+ /* Disabled by default */
+ con->uniras_enabled = false;
+
+ /* Enabled only in debug mode */
+ if (adev->debug_enable_ras_aca) {
+ con->uniras_enabled = true;
+ RAS_DEV_INFO(adev, "Debug amdgpu uniras!");
+ }
+
+ if (!con->uniras_enabled)
+ return 0;
+
+ ras_mgr = kzalloc(sizeof(*ras_mgr), GFP_KERNEL);
+ if (!ras_mgr)
+ return -EINVAL;
+
+ con->ras_mgr = ras_mgr;
+ ras_mgr->adev = adev;
+
+ ras_mgr->ras_core = amdgpu_ras_mgr_create_ras_core(adev);
+ if (!ras_mgr->ras_core) {
+ RAS_DEV_ERR(adev, "Failed to create ras core!\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ras_mgr->ras_core->dev = adev;
+
+ amdgpu_ras_process_init(adev);
+ ras_core_sw_init(ras_mgr->ras_core);
+ amdgpu_ras_mgr_init_event_mgr(ras_mgr->ras_core);
+ return 0;
+
+err:
+ kfree(ras_mgr);
+ return ret;
+}
+
+static int amdgpu_ras_mgr_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_mgr *ras_mgr = (struct amdgpu_ras_mgr *)con->ras_mgr;
+
+ if (!con->uniras_enabled)
+ return 0;
+
+ if (!ras_mgr)
+ return 0;
+
+ amdgpu_ras_process_fini(adev);
+ ras_core_sw_fini(ras_mgr->ras_core);
+ ras_core_destroy(ras_mgr->ras_core);
+ ras_mgr->ras_core = NULL;
+
+ kfree(con->ras_mgr);
+ con->ras_mgr = NULL;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ int ret;
+
+ if (!con->uniras_enabled)
+ return 0;
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EINVAL;
+
+ ret = ras_core_hw_init(ras_mgr->ras_core);
+ if (ret) {
+ RAS_DEV_ERR(adev, "Failed to initialize ras core!\n");
+ return ret;
+ }
+
+ ras_mgr->ras_is_ready = true;
+
+ amdgpu_enable_uniras(adev, true);
+
+ RAS_DEV_INFO(adev, "AMDGPU RAS Is Ready.\n");
+ return 0;
+}
+
+static int amdgpu_ras_mgr_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!con->uniras_enabled)
+ return 0;
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EINVAL;
+
+ ras_core_hw_fini(ras_mgr->ras_core);
+
+ ras_mgr->ras_is_ready = false;
+
+ return 0;
+}
+
+struct amdgpu_ras_mgr *amdgpu_ras_mgr_get_context(struct amdgpu_device *adev)
+{
+ if (!adev || !adev->psp.ras_context.ras)
+ return NULL;
+
+ return (struct amdgpu_ras_mgr *)adev->psp.ras_context.ras->ras_mgr;
+}
+
+static const struct amd_ip_funcs __maybe_unused ras_v1_0_ip_funcs = {
+ .name = "ras_v1_0",
+ .sw_init = amdgpu_ras_mgr_sw_init,
+ .sw_fini = amdgpu_ras_mgr_sw_fini,
+ .hw_init = amdgpu_ras_mgr_hw_init,
+ .hw_fini = amdgpu_ras_mgr_hw_fini,
+};
+
+const struct amdgpu_ip_block_version ras_v1_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_RAS,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ras_v1_0_ip_funcs,
+};
+
+int amdgpu_enable_uniras(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EPERM;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EPERM;
+
+ RAS_DEV_INFO(adev, "Enable amdgpu unified ras!");
+ return ras_core_set_status(ras_mgr->ras_core, enable);
+}
+
+bool amdgpu_uniras_enabled(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return false;
+
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ return ras_core_is_enabled(ras_mgr->ras_core);
+}
+
+static bool amdgpu_ras_mgr_is_ready(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (ras_mgr && ras_mgr->ras_core && ras_mgr->ras_is_ready &&
+ ras_core_is_ready(ras_mgr->ras_core))
+ return true;
+
+ return false;
+}
+
+int amdgpu_ras_mgr_handle_fatal_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ return ras_core_handle_nbio_irq(ras_mgr->ras_core, data);
+}
+
+uint64_t amdgpu_ras_mgr_gen_ras_event_seqno(struct amdgpu_device *adev,
+ enum ras_seqno_type seqno_type)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ int ret;
+ uint64_t seq_no;
+
+ if (!amdgpu_ras_mgr_is_ready(adev) ||
+ (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX))
+ return 0;
+
+ seq_no = ras_core_gen_seqno(ras_mgr->ras_core, seqno_type);
+
+ if ((seqno_type == RAS_SEQNO_TYPE_DE) ||
+ (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)) {
+ ret = ras_core_put_seqno(ras_mgr->ras_core, seqno_type, seq_no);
+ if (ret)
+ RAS_DEV_WARN(adev, "There are too many ras interrupts!");
+ }
+
+ return seq_no;
+}
+
+int amdgpu_ras_mgr_handle_controller_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_ih_info *ih_info = (struct ras_ih_info *)data;
+ uint64_t seq_no = 0;
+ int ret = 0;
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ if (ih_info && (ih_info->block == AMDGPU_RAS_BLOCK__UMC)) {
+ if (ras_mgr->ras_core->poison_supported) {
+ seq_no = amdgpu_ras_mgr_gen_ras_event_seqno(adev, RAS_SEQNO_TYPE_DE);
+ RAS_DEV_INFO(adev,
+ "{%llu} RAS poison is created, no user action is needed.\n",
+ seq_no);
+ }
+
+ ret = amdgpu_ras_process_handle_umc_interrupt(adev, ih_info);
+ } else if (ras_mgr->ras_core->poison_supported) {
+ ret = amdgpu_ras_process_handle_unexpected_interrupt(adev, ih_info);
+ } else {
+ RAS_DEV_WARN(adev,
+ "No RAS interrupt handler for non-UMC block with poison disabled.\n");
+ }
+
+ return ret;
+}
+
+int amdgpu_ras_mgr_handle_consumer_interrupt(struct amdgpu_device *adev, void *data)
+{
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ return amdgpu_ras_process_handle_consumption_interrupt(adev, data);
+}
+
+int amdgpu_ras_mgr_update_ras_ecc(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ return ras_core_update_ecc_info(ras_mgr->ras_core);
+}
+
+int amdgpu_ras_mgr_reset_gpu(struct amdgpu_device *adev, uint32_t flags)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ con->gpu_reset_flags |= flags;
+ return amdgpu_ras_reset_gpu(adev);
+}
+
+bool amdgpu_ras_mgr_check_eeprom_safety_watermark(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return false;
+
+ return ras_eeprom_check_safety_watermark(ras_mgr->ras_core);
+}
+
+int amdgpu_ras_mgr_get_curr_nps_mode(struct amdgpu_device *adev,
+ uint32_t *nps_mode)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ uint32_t mode;
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EINVAL;
+
+ mode = ras_core_get_curr_nps_mode(ras_mgr->ras_core);
+ if (!mode || mode > AMDGPU_NPS8_PARTITION_MODE)
+ return -EINVAL;
+
+ *nps_mode = mode;
+
+ return 0;
+}
+
+bool amdgpu_ras_mgr_check_retired_addr(struct amdgpu_device *adev,
+ uint64_t addr)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return false;
+
+ return ras_umc_check_retired_addr(ras_mgr->ras_core, addr);
+}
+
+bool amdgpu_ras_mgr_is_rma(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr || !ras_mgr->ras_core || !ras_mgr->ras_is_ready)
+ return false;
+
+ return ras_core_gpu_is_rma(ras_mgr->ras_core);
+}
+
+int amdgpu_ras_mgr_handle_ras_cmd(struct amdgpu_device *adev,
+ uint32_t cmd_id, void *input, uint32_t input_size,
+ void *output, uint32_t out_size)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_cmd_ctx *cmd_ctx;
+ uint32_t ctx_buf_size = PAGE_SIZE;
+ int ret;
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ cmd_ctx = kzalloc(ctx_buf_size, GFP_KERNEL);
+ if (!cmd_ctx)
+ return -ENOMEM;
+
+ cmd_ctx->cmd_id = cmd_id;
+
+ memcpy(cmd_ctx->input_buff_raw, input, input_size);
+ cmd_ctx->input_size = input_size;
+ cmd_ctx->output_buf_size = ctx_buf_size - sizeof(*cmd_ctx);
+
+ ret = amdgpu_ras_submit_cmd(ras_mgr->ras_core, cmd_ctx);
+ if (!ret && !cmd_ctx->cmd_res && output && (out_size == cmd_ctx->output_size))
+ memcpy(output, cmd_ctx->output_buff_raw, cmd_ctx->output_size);
+
+ kfree(cmd_ctx);
+
+ return ret;
+}
+
+int amdgpu_ras_mgr_pre_reset(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_mgr_is_ready(adev)) {
+ RAS_DEV_ERR(adev, "Invalid ras suspend!\n");
+ return -EPERM;
+ }
+
+ amdgpu_ras_process_pre_reset(adev);
+ return 0;
+}
+
+int amdgpu_ras_mgr_post_reset(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_mgr_is_ready(adev)) {
+ RAS_DEV_ERR(adev, "Invalid ras resume!\n");
+ return -EPERM;
+ }
+
+ amdgpu_ras_process_post_reset(adev);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h
new file mode 100644
index 000000000000..8fb7eb4b8f13
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __AMDGPU_RAS_MGR_H__
+#define __AMDGPU_RAS_MGR_H__
+#include "ras.h"
+#include "amdgpu_ras_process.h"
+
+enum ras_ih_type {
+ RAS_IH_NONE,
+ RAS_IH_FROM_BLOCK_CONTROLLER,
+ RAS_IH_FROM_CONSUMER_CLIENT,
+ RAS_IH_FROM_FATAL_ERROR,
+};
+
+struct ras_ih_info {
+ uint32_t block;
+ union {
+ struct amdgpu_iv_entry iv_entry;
+ struct {
+ uint16_t pasid;
+ uint32_t reset;
+ pasid_notify pasid_fn;
+ void *data;
+ };
+ };
+};
+
+struct amdgpu_ras_mgr {
+ struct amdgpu_device *adev;
+ struct ras_core_context *ras_core;
+ struct delayed_work retire_page_dwork;
+ struct ras_event_manager ras_event_mgr;
+ uint64_t last_poison_consumption_seqno;
+ bool ras_is_ready;
+
+ bool is_paused;
+ struct completion ras_event_done;
+};
+
+extern const struct amdgpu_ip_block_version ras_v1_0_ip_block;
+
+struct amdgpu_ras_mgr *amdgpu_ras_mgr_get_context(
+ struct amdgpu_device *adev);
+int amdgpu_enable_uniras(struct amdgpu_device *adev, bool enable);
+bool amdgpu_uniras_enabled(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_handle_fatal_interrupt(struct amdgpu_device *adev, void *data);
+int amdgpu_ras_mgr_handle_controller_interrupt(struct amdgpu_device *adev, void *data);
+int amdgpu_ras_mgr_handle_consumer_interrupt(struct amdgpu_device *adev, void *data);
+int amdgpu_ras_mgr_update_ras_ecc(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_reset_gpu(struct amdgpu_device *adev, uint32_t flags);
+uint64_t amdgpu_ras_mgr_gen_ras_event_seqno(struct amdgpu_device *adev,
+ enum ras_seqno_type seqno_type);
+bool amdgpu_ras_mgr_check_eeprom_safety_watermark(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_get_curr_nps_mode(struct amdgpu_device *adev, uint32_t *nps_mode);
+bool amdgpu_ras_mgr_check_retired_addr(struct amdgpu_device *adev,
+ uint64_t addr);
+bool amdgpu_ras_mgr_is_rma(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_handle_ras_cmd(struct amdgpu_device *adev,
+ uint32_t cmd_id, void *input, uint32_t input_size,
+ void *output, uint32_t out_size);
+int amdgpu_ras_mgr_pre_reset(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_post_reset(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c
new file mode 100644
index 000000000000..79a51b1603ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu_smu.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_ras_mp1_v13_0.h"
+
+#define RAS_MP1_MSG_QueryValidMcaCeCount 0x3A
+#define RAS_MP1_MSG_McaBankCeDumpDW 0x3B
+
+static int mp1_v13_0_get_valid_bank_count(struct ras_core_context *ras_core,
+ u32 msg, u32 *count)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ u32 smu_msg;
+ int ret = 0;
+
+ if (!count)
+ return -EINVAL;
+
+ smu_msg = (msg == RAS_MP1_MSG_QueryValidMcaCeCount) ?
+ SMU_MSG_QueryValidMcaCeCount : SMU_MSG_QueryValidMcaCount;
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ ret = amdgpu_smu_ras_send_msg(adev, smu_msg, 0, count);
+ up_read(&adev->reset_domain->sem);
+ } else {
+ ret = -RAS_CORE_GPU_IN_MODE1_RESET;
+ }
+
+ if (ret)
+ *count = 0;
+
+ return ret;
+}
+
+static int mp1_v13_0_dump_valid_bank(struct ras_core_context *ras_core,
+ u32 msg, u32 idx, u32 reg_idx, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ uint32_t data[2] = {0, 0};
+ uint32_t param;
+ int ret = 0;
+ int i, offset;
+ u32 smu_msg = (msg == RAS_MP1_MSG_McaBankCeDumpDW) ?
+ SMU_MSG_McaBankCeDumpDW : SMU_MSG_McaBankDumpDW;
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ offset = reg_idx * 8;
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ param = ((idx & 0xffff) << 16) | ((offset + (i << 2)) & 0xfffc);
+ ret = amdgpu_smu_ras_send_msg(adev, smu_msg, param, &data[i]);
+ if (ret) {
+ RAS_DEV_ERR(adev, "ACA failed to read register[%d], offset:0x%x\n",
+ reg_idx, offset);
+ break;
+ }
+ }
+ up_read(&adev->reset_domain->sem);
+
+ if (!ret)
+ *val = (uint64_t)data[1] << 32 | data[0];
+ } else {
+ ret = -RAS_CORE_GPU_IN_MODE1_RESET;
+ }
+
+ return ret;
+}
+
+const struct ras_mp1_sys_func amdgpu_ras_mp1_sys_func_v13_0 = {
+ .mp1_get_valid_bank_count = mp1_v13_0_get_valid_bank_count,
+ .mp1_dump_valid_bank = mp1_v13_0_dump_valid_bank,
+};
+
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h
new file mode 100644
index 000000000000..71c614ae1ae4
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __AMDGPU_RAS_MP1_V13_0_H__
+#define __AMDGPU_RAS_MP1_V13_0_H__
+#include "ras.h"
+
+extern const struct ras_mp1_sys_func amdgpu_ras_mp1_sys_func_v13_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c
new file mode 100644
index 000000000000..2783f5875c7c
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_nbio_v7_9.h"
+#include "nbio/nbio_7_9_0_offset.h"
+#include "nbio/nbio_7_9_0_sh_mask.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
+
+static int nbio_v7_9_set_ras_controller_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ /* Dummy function, there is no initialization operation in driver */
+
+ return 0;
+}
+
+static int nbio_v7_9_process_ras_controller_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for ras_controller_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static int nbio_v7_9_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ /* Dummy function, there is no initialization operation in driver */
+
+ return 0;
+}
+
+static int nbio_v7_9_process_err_event_athub_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for err_event_athub_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_controller_irq_funcs = {
+ .set = nbio_v7_9_set_ras_controller_irq_state,
+ .process = nbio_v7_9_process_ras_controller_irq,
+};
+
+static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_err_event_athub_irq_funcs = {
+ .set = nbio_v7_9_set_ras_err_event_athub_irq_state,
+ .process = nbio_v7_9_process_err_event_athub_irq,
+};
+
+static int nbio_v7_9_init_ras_controller_interrupt(struct ras_core_context *ras_core, bool state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_controller_irq.funcs =
+ &nbio_v7_9_ras_controller_irq_funcs;
+ adev->nbio.ras_controller_irq.num_types = 1;
+
+ /* register ras controller interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
+ &adev->nbio.ras_controller_irq);
+
+ return r;
+}
+
+static int nbio_v7_9_init_ras_err_event_athub_interrupt(struct ras_core_context *ras_core,
+ bool state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_err_event_athub_irq.funcs =
+ &nbio_v7_9_ras_err_event_athub_irq_funcs;
+ adev->nbio.ras_err_event_athub_irq.num_types = 1;
+
+ /* register ras err event athub interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
+ &adev->nbio.ras_err_event_athub_irq);
+
+ return r;
+}
+
+const struct ras_nbio_sys_func amdgpu_ras_nbio_sys_func_v7_9 = {
+ .set_ras_controller_irq_state = nbio_v7_9_init_ras_controller_interrupt,
+ .set_ras_err_event_athub_irq_state = nbio_v7_9_init_ras_err_event_athub_interrupt,
+};
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h
new file mode 100644
index 000000000000..272259e9a0e7
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RAS_NBIO_V7_9_H__
+#define __AMDGPU_RAS_NBIO_V7_9_H__
+
+extern const struct ras_nbio_sys_func amdgpu_ras_nbio_sys_func_v7_9;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c
new file mode 100644
index 000000000000..5782c007de71
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_xgmi.h"
+#include "ras_sys.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_process.h"
+
+#define RAS_MGR_RETIRE_PAGE_INTERVAL 100
+#define RAS_EVENT_PROCESS_TIMEOUT 1200
+
+static void ras_process_retire_page_dwork(struct work_struct *work)
+{
+ struct amdgpu_ras_mgr *ras_mgr =
+ container_of(work, struct amdgpu_ras_mgr, retire_page_dwork.work);
+ struct amdgpu_device *adev = ras_mgr->adev;
+ int ret;
+
+ if (amdgpu_ras_is_rma(adev))
+ return;
+
+ /* If gpu reset is ongoing, delay retiring the bad pages */
+ if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
+ schedule_delayed_work(&ras_mgr->retire_page_dwork,
+ msecs_to_jiffies(RAS_MGR_RETIRE_PAGE_INTERVAL * 3));
+ return;
+ }
+
+ ret = ras_umc_handle_bad_pages(ras_mgr->ras_core, NULL);
+ if (!ret)
+ schedule_delayed_work(&ras_mgr->retire_page_dwork,
+ msecs_to_jiffies(RAS_MGR_RETIRE_PAGE_INTERVAL));
+}
+
+int amdgpu_ras_process_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ ras_mgr->is_paused = false;
+ init_completion(&ras_mgr->ras_event_done);
+
+ INIT_DELAYED_WORK(&ras_mgr->retire_page_dwork, ras_process_retire_page_dwork);
+
+ return 0;
+}
+
+int amdgpu_ras_process_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ ras_mgr->is_paused = false;
+ /* Save all cached bad pages to eeprom */
+ flush_delayed_work(&ras_mgr->retire_page_dwork);
+ cancel_delayed_work_sync(&ras_mgr->retire_page_dwork);
+ return 0;
+}
+
+int amdgpu_ras_process_handle_umc_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr->ras_core)
+ return -EINVAL;
+
+ return ras_process_add_interrupt_req(ras_mgr->ras_core, NULL, true);
+}
+
+int amdgpu_ras_process_handle_unexpected_interrupt(struct amdgpu_device *adev, void *data)
+{
+ amdgpu_ras_set_fed(adev, true);
+ return amdgpu_ras_mgr_reset_gpu(adev, AMDGPU_RAS_GPU_RESET_MODE1_RESET);
+}
+
+int amdgpu_ras_process_handle_consumption_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_ih_info *ih_info = (struct ras_ih_info *)data;
+ struct ras_event_req req;
+ uint64_t seqno;
+
+ if (!ih_info)
+ return -EINVAL;
+
+ memset(&req, 0, sizeof(req));
+ req.block = ih_info->block;
+ req.data = ih_info->data;
+ req.pasid = ih_info->pasid;
+ req.pasid_fn = ih_info->pasid_fn;
+ req.reset = ih_info->reset;
+
+ seqno = ras_core_get_seqno(ras_mgr->ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, false);
+
+ /* When the ACA register cannot be read from FW, the poison
+ * consumption seqno in the fifo will not pop up, so it is
+ * necessary to check whether the seqno is the previous seqno.
+ */
+ if (seqno == ras_mgr->last_poison_consumption_seqno) {
+ /* Pop and discard the previous seqno */
+ ras_core_get_seqno(ras_mgr->ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, true);
+ seqno = ras_core_get_seqno(ras_mgr->ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, false);
+ }
+ ras_mgr->last_poison_consumption_seqno = seqno;
+ req.seqno = seqno;
+
+ return ras_process_add_interrupt_req(ras_mgr->ras_core, &req, false);
+}
+
+int amdgpu_ras_process_begin(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (ras_mgr->is_paused)
+ return -EAGAIN;
+
+ reinit_completion(&ras_mgr->ras_event_done);
+ return 0;
+}
+
+int amdgpu_ras_process_end(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ complete(&ras_mgr->ras_event_done);
+ return 0;
+}
+
+int amdgpu_ras_process_pre_reset(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ long rc;
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EINVAL;
+
+ if (!ras_mgr->ras_core->is_initialized)
+ return -EPERM;
+
+ ras_mgr->is_paused = true;
+
+ /* Wait for RAS event processing to complete */
+ rc = wait_for_completion_interruptible_timeout(&ras_mgr->ras_event_done,
+ msecs_to_jiffies(RAS_EVENT_PROCESS_TIMEOUT));
+ if (rc <= 0)
+ RAS_DEV_WARN(adev, "Waiting for ras process to complete %s\n",
+ rc ? "interrupted" : "timeout");
+
+ flush_delayed_work(&ras_mgr->retire_page_dwork);
+ return 0;
+}
+
+int amdgpu_ras_process_post_reset(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EINVAL;
+
+ if (!ras_mgr->ras_core->is_initialized)
+ return -EPERM;
+
+ ras_mgr->is_paused = false;
+
+ schedule_delayed_work(&ras_mgr->retire_page_dwork, 0);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h
new file mode 100644
index 000000000000..d55cdaeac441
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __AMDGPU_RAS_PROCESS_H__
+#define __AMDGPU_RAS_PROCESS_H__
+#include "ras_process.h"
+#include "amdgpu_ras_mgr.h"
+
+enum ras_ih_type;
+int amdgpu_ras_process_init(struct amdgpu_device *adev);
+int amdgpu_ras_process_fini(struct amdgpu_device *adev);
+int amdgpu_ras_process_handle_umc_interrupt(struct amdgpu_device *adev,
+ void *data);
+int amdgpu_ras_process_handle_unexpected_interrupt(struct amdgpu_device *adev,
+ void *data);
+int amdgpu_ras_process_handle_consumption_interrupt(struct amdgpu_device *adev,
+ void *data);
+int amdgpu_ras_process_begin(struct amdgpu_device *adev);
+int amdgpu_ras_process_end(struct amdgpu_device *adev);
+int amdgpu_ras_process_pre_reset(struct amdgpu_device *adev);
+int amdgpu_ras_process_post_reset(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c
new file mode 100644
index 000000000000..45ed8c3b5563
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras_sys.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
+
+static int amdgpu_ras_sys_detect_fatal_event(struct ras_core_context *ras_core, void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret;
+ uint64_t seq_no;
+
+ ret = amdgpu_ras_global_ras_isr(adev);
+ if (ret)
+ return ret;
+
+ seq_no = amdgpu_ras_mgr_gen_ras_event_seqno(adev, RAS_SEQNO_TYPE_UE);
+ RAS_DEV_INFO(adev,
+ "{%llu} Uncorrectable hardware error(ERREVENT_ATHUB_INTERRUPT) detected!\n",
+ seq_no);
+
+ return amdgpu_ras_process_handle_unexpected_interrupt(adev, data);
+}
+
+static int amdgpu_ras_sys_poison_consumption_event(struct ras_core_context *ras_core,
+ void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_event_req *req = (struct ras_event_req *)data;
+ pasid_notify pasid_fn;
+
+ if (!req)
+ return -EINVAL;
+
+ if (req->pasid_fn) {
+ pasid_fn = (pasid_notify)req->pasid_fn;
+ pasid_fn(adev, req->pasid, req->data);
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_gen_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t *seqno)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_event_manager *event_mgr;
+ struct ras_event_state *event_state;
+ struct amdgpu_hive_info *hive;
+ enum ras_event_type event_type;
+ uint64_t seq_no;
+
+ if (!ras_mgr || !seqno ||
+ (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX))
+ return -EINVAL;
+
+ switch (seqno_type) {
+ case RAS_SEQNO_TYPE_UE:
+ event_type = RAS_EVENT_TYPE_FATAL;
+ break;
+ case RAS_SEQNO_TYPE_CE:
+ case RAS_SEQNO_TYPE_DE:
+ event_type = RAS_EVENT_TYPE_POISON_CREATION;
+ break;
+ case RAS_SEQNO_TYPE_POISON_CONSUMPTION:
+ event_type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
+ break;
+ default:
+ event_type = RAS_EVENT_TYPE_INVALID;
+ break;
+ }
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ event_mgr = hive ? &hive->event_mgr : &ras_mgr->ras_event_mgr;
+ event_state = &event_mgr->event_state[event_type];
+ if ((event_type == RAS_EVENT_TYPE_FATAL) && amdgpu_ras_in_recovery(adev)) {
+ seq_no = event_state->last_seqno;
+ } else {
+ seq_no = atomic64_inc_return(&event_mgr->seqno);
+ event_state->last_seqno = seq_no;
+ atomic64_inc(&event_state->count);
+ }
+ amdgpu_put_xgmi_hive(hive);
+
+ *seqno = seq_no;
+ return 0;
+
+}
+
+static int amdgpu_ras_sys_event_notifier(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(ras_core->dev);
+ int ret = 0;
+
+ switch (event_id) {
+ case RAS_EVENT_ID__BAD_PAGE_DETECTED:
+ schedule_delayed_work(&ras_mgr->retire_page_dwork, 0);
+ break;
+ case RAS_EVENT_ID__POISON_CONSUMPTION:
+ amdgpu_ras_sys_poison_consumption_event(ras_core, data);
+ break;
+ case RAS_EVENT_ID__RESERVE_BAD_PAGE:
+ ret = amdgpu_ras_reserve_page(ras_core->dev, *(uint64_t *)data);
+ break;
+ case RAS_EVENT_ID__FATAL_ERROR_DETECTED:
+ ret = amdgpu_ras_sys_detect_fatal_event(ras_core, data);
+ break;
+ case RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM:
+ ret = amdgpu_dpm_send_hbm_bad_pages_num(ras_core->dev, *(uint32_t *)data);
+ break;
+ case RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP:
+ ret = amdgpu_dpm_send_hbm_bad_channel_flag(ras_core->dev, *(uint32_t *)data);
+ break;
+ case RAS_EVENT_ID__DEVICE_RMA:
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_RMA, NULL, NULL);
+ ret = amdgpu_dpm_send_rma_reason(ras_core->dev);
+ break;
+ case RAS_EVENT_ID__RESET_GPU:
+ ret = amdgpu_ras_mgr_reset_gpu(ras_core->dev, *(uint32_t *)data);
+ break;
+ case RAS_EVENT_ID__RAS_EVENT_PROC_BEGIN:
+ ret = amdgpu_ras_process_begin(ras_core->dev);
+ break;
+ case RAS_EVENT_ID__RAS_EVENT_PROC_END:
+ ret = amdgpu_ras_process_end(ras_core->dev);
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev, "Invalid ras notify event:%d\n", event_id);
+ break;
+ }
+
+ return ret;
+}
+
+static u64 amdgpu_ras_sys_get_utc_second_timestamp(struct ras_core_context *ras_core)
+{
+ return ktime_get_real_seconds();
+}
+
+static int amdgpu_ras_sys_check_gpu_status(struct ras_core_context *ras_core,
+ uint32_t *status)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ uint32_t gpu_status = 0;
+
+ if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev))
+ gpu_status |= RAS_GPU_STATUS__IN_RESET;
+
+ if (amdgpu_sriov_vf(adev))
+ gpu_status |= RAS_GPU_STATUS__IS_VF;
+
+ *status = gpu_status;
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_get_device_system_info(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+
+ dev_info->device_id = adev->pdev->device;
+ dev_info->vendor_id = adev->pdev->vendor;
+ dev_info->socket_id = adev->smuio.funcs->get_socket_id(adev);
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_gpu_reset_lock(struct ras_core_context *ras_core,
+ bool down, bool try)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret = 0;
+
+ if (down && try)
+ ret = down_read_trylock(&adev->reset_domain->sem);
+ else if (down)
+ down_read(&adev->reset_domain->sem);
+ else
+ up_read(&adev->reset_domain->sem);
+
+ return ret;
+}
+
+static bool amdgpu_ras_sys_detect_ras_interrupt(struct ras_core_context *ras_core)
+{
+ return !!atomic_read(&amdgpu_ras_in_intr);
+}
+
+static int amdgpu_ras_sys_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct psp_context *psp = &adev->psp;
+ struct psp_ring *psp_ring;
+ struct ta_mem_context *mem_ctx;
+
+ if (mem_type == GPU_MEM_TYPE_RAS_PSP_RING) {
+ psp_ring = &psp->km_ring;
+ gpu_mem->mem_bo = adev->firmware.rbuf;
+ gpu_mem->mem_size = psp_ring->ring_size;
+ gpu_mem->mem_mc_addr = psp_ring->ring_mem_mc_addr;
+ gpu_mem->mem_cpu_addr = psp_ring->ring_mem;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_PSP_CMD) {
+ gpu_mem->mem_bo = psp->cmd_buf_bo;
+ gpu_mem->mem_size = PSP_CMD_BUFFER_SIZE;
+ gpu_mem->mem_mc_addr = psp->cmd_buf_mc_addr;
+ gpu_mem->mem_cpu_addr = psp->cmd_buf_mem;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_PSP_FENCE) {
+ gpu_mem->mem_bo = psp->fence_buf_bo;
+ gpu_mem->mem_size = PSP_FENCE_BUFFER_SIZE;
+ gpu_mem->mem_mc_addr = psp->fence_buf_mc_addr;
+ gpu_mem->mem_cpu_addr = psp->fence_buf;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_TA_FW) {
+ gpu_mem->mem_bo = psp->fw_pri_bo;
+ gpu_mem->mem_size = PSP_1_MEG;
+ gpu_mem->mem_mc_addr = psp->fw_pri_mc_addr;
+ gpu_mem->mem_cpu_addr = psp->fw_pri_buf;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_TA_CMD) {
+ mem_ctx = &psp->ras_context.context.mem_context;
+ gpu_mem->mem_bo = mem_ctx->shared_bo;
+ gpu_mem->mem_size = mem_ctx->shared_mem_size;
+ gpu_mem->mem_mc_addr = mem_ctx->shared_mc_addr;
+ gpu_mem->mem_cpu_addr = mem_ctx->shared_buf;
+ } else {
+ return -EINVAL;
+ }
+
+ if (!gpu_mem->mem_bo || !gpu_mem->mem_size ||
+ !gpu_mem->mem_mc_addr || !gpu_mem->mem_cpu_addr) {
+ RAS_DEV_ERR(ras_core->dev, "The ras psp gpu memory is invalid!\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_put_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+
+ return 0;
+}
+
+const struct ras_sys_func amdgpu_ras_sys_fn = {
+ .ras_notifier = amdgpu_ras_sys_event_notifier,
+ .get_utc_second_timestamp = amdgpu_ras_sys_get_utc_second_timestamp,
+ .gen_seqno = amdgpu_ras_sys_gen_seqno,
+ .check_gpu_status = amdgpu_ras_sys_check_gpu_status,
+ .get_device_system_info = amdgpu_ras_sys_get_device_system_info,
+ .gpu_reset_lock = amdgpu_ras_sys_gpu_reset_lock,
+ .detect_ras_interrupt = amdgpu_ras_sys_detect_ras_interrupt,
+ .get_gpu_mem = amdgpu_ras_sys_get_gpu_mem,
+ .put_gpu_mem = amdgpu_ras_sys_put_gpu_mem,
+};
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h b/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h
new file mode 100644
index 000000000000..8156531a7b63
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_SYS_H__
+#define __RAS_SYS_H__
+#include <linux/stdarg.h>
+#include <linux/printk.h>
+#include <linux/dev_printk.h>
+#include <linux/mempool.h>
+#include "amdgpu.h"
+
+#define RAS_DEV_ERR(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_err(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_ERR fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_DEV_WARN(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_warn(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_WARNING fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_DEV_INFO(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_info(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_INFO fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_DEV_DBG(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_dbg(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_INFO(fmt, ...) printk(KERN_INFO fmt, ##__VA_ARGS__)
+
+#define RAS_DEV_RREG32_SOC15(dev, ip, inst, reg) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
+ 0, ip##_HWIP, inst); \
+})
+
+#define RAS_DEV_WREG32_SOC15(dev, ip, inst, reg, value) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \
+ value, 0, ip##_HWIP, inst); \
+})
+
+/* GET_INST returns the physical instance corresponding to a logical instance */
+#define RAS_GET_INST(dev, ip, inst) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ adev->ip_map.logical_to_dev_inst ? \
+ adev->ip_map.logical_to_dev_inst(adev, ip##_HWIP, inst) : inst; \
+})
+
+#define RAS_GET_MASK(dev, ip, mask) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ (adev->ip_map.logical_to_dev_mask ? \
+ adev->ip_map.logical_to_dev_mask(adev, ip##_HWIP, mask) : mask); \
+})
+
+static inline void *ras_radix_tree_delete_iter(struct radix_tree_root *root, void *iter)
+{
+ return radix_tree_delete(root, ((struct radix_tree_iter *)iter)->index);
+}
+
+static inline long ras_wait_event_interruptible_timeout(void *wq_head,
+ int (*condition)(void *param), void *param, unsigned int timeout)
+{
+ return wait_event_interruptible_timeout(*(wait_queue_head_t *)wq_head,
+ condition(param), timeout);
+}
+
+extern const struct ras_sys_func amdgpu_ras_sys_fn;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/Makefile b/drivers/gpu/drm/amd/ras/rascore/Makefile
index e69de29bb2d1..e826a1f86424 100644
--- a/drivers/gpu/drm/amd/ras/rascore/Makefile
+++ b/drivers/gpu/drm/amd/ras/rascore/Makefile
@@ -0,0 +1,44 @@
+#
+# Copyright 2025 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+RAS_CORE_FILES = ras_core.o \
+ ras_mp1.o \
+ ras_mp1_v13_0.o \
+ ras_aca.o \
+ ras_aca_v1_0.o \
+ ras_eeprom.o \
+ ras_umc.o \
+ ras_umc_v12_0.o \
+ ras_cmd.o \
+ ras_gfx.o \
+ ras_gfx_v9_0.o \
+ ras_process.o \
+ ras_nbio.o \
+ ras_nbio_v7_9.o \
+ ras_log_ring.o \
+ ras_cper.o \
+ ras_psp.o \
+ ras_psp_v13_0.o
+
+
+RAS_CORE = $(addprefix $(AMD_GPU_RAS_PATH)/rascore/,$(RAS_CORE_FILES))
+
+AMD_GPU_RAS_FILES += $(RAS_CORE)
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras.h b/drivers/gpu/drm/amd/ras/rascore/ras.h
new file mode 100644
index 000000000000..3396b2e0949d
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras.h
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_H__
+#define __RAS_H__
+#include "ras_sys.h"
+#include "ras_umc.h"
+#include "ras_aca.h"
+#include "ras_eeprom.h"
+#include "ras_core_status.h"
+#include "ras_process.h"
+#include "ras_gfx.h"
+#include "ras_cmd.h"
+#include "ras_nbio.h"
+#include "ras_mp1.h"
+#include "ras_psp.h"
+#include "ras_log_ring.h"
+
+#define RAS_HW_ERR "[Hardware Error]: "
+
+#define RAS_GPU_PAGE_SHIFT 12
+#define RAS_ADDR_TO_PFN(addr) ((addr) >> RAS_GPU_PAGE_SHIFT)
+#define RAS_PFN_TO_ADDR(pfn) ((pfn) << RAS_GPU_PAGE_SHIFT)
+
+#define RAS_CORE_RESET_GPU 0x10000
+
+#define GPU_RESET_CAUSE_POISON (RAS_CORE_RESET_GPU | 0x0001)
+#define GPU_RESET_CAUSE_FATAL (RAS_CORE_RESET_GPU | 0x0002)
+#define GPU_RESET_CAUSE_RMA (RAS_CORE_RESET_GPU | 0x0004)
+
+enum ras_block_id {
+ RAS_BLOCK_ID__UMC = 0,
+ RAS_BLOCK_ID__SDMA,
+ RAS_BLOCK_ID__GFX,
+ RAS_BLOCK_ID__MMHUB,
+ RAS_BLOCK_ID__ATHUB,
+ RAS_BLOCK_ID__PCIE_BIF,
+ RAS_BLOCK_ID__HDP,
+ RAS_BLOCK_ID__XGMI_WAFL,
+ RAS_BLOCK_ID__DF,
+ RAS_BLOCK_ID__SMN,
+ RAS_BLOCK_ID__SEM,
+ RAS_BLOCK_ID__MP0,
+ RAS_BLOCK_ID__MP1,
+ RAS_BLOCK_ID__FUSE,
+ RAS_BLOCK_ID__MCA,
+ RAS_BLOCK_ID__VCN,
+ RAS_BLOCK_ID__JPEG,
+ RAS_BLOCK_ID__IH,
+ RAS_BLOCK_ID__MPIO,
+
+ RAS_BLOCK_ID__LAST
+};
+
+enum ras_ecc_err_type {
+ RAS_ECC_ERR__NONE = 0,
+ RAS_ECC_ERR__PARITY = 1,
+ RAS_ECC_ERR__SINGLE_CORRECTABLE = 2,
+ RAS_ECC_ERR__MULTI_UNCORRECTABLE = 4,
+ RAS_ECC_ERR__POISON = 8,
+};
+
+enum ras_err_type {
+ RAS_ERR_TYPE__UE = 0,
+ RAS_ERR_TYPE__CE,
+ RAS_ERR_TYPE__DE,
+ RAS_ERR_TYPE__LAST
+};
+
+enum ras_seqno_type {
+ RAS_SEQNO_TYPE_INVALID = 0,
+ RAS_SEQNO_TYPE_UE,
+ RAS_SEQNO_TYPE_CE,
+ RAS_SEQNO_TYPE_DE,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION,
+ RAS_SEQNO_TYPE_COUNT_MAX,
+};
+
+enum ras_seqno_fifo {
+ SEQNO_FIFO_INVALID = 0,
+ SEQNO_FIFO_POISON_CREATION,
+ SEQNO_FIFO_POISON_CONSUMPTION,
+ SEQNO_FIFO_COUNT_MAX
+};
+
+enum ras_notify_event {
+ RAS_EVENT_ID__NONE,
+ RAS_EVENT_ID__BAD_PAGE_DETECTED,
+ RAS_EVENT_ID__POISON_CONSUMPTION,
+ RAS_EVENT_ID__RESERVE_BAD_PAGE,
+ RAS_EVENT_ID__DEVICE_RMA,
+ RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM,
+ RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP,
+ RAS_EVENT_ID__FATAL_ERROR_DETECTED,
+ RAS_EVENT_ID__RESET_GPU,
+ RAS_EVENT_ID__RESET_VF,
+ RAS_EVENT_ID__RAS_EVENT_PROC_BEGIN,
+ RAS_EVENT_ID__RAS_EVENT_PROC_END,
+};
+
+enum ras_gpu_status {
+ RAS_GPU_STATUS__NOT_READY = 0,
+ RAS_GPU_STATUS__READY = 0x1,
+ RAS_GPU_STATUS__IN_RESET = 0x2,
+ RAS_GPU_STATUS__IS_RMA = 0x4,
+ RAS_GPU_STATUS__IS_VF = 0x8,
+};
+
+struct ras_core_context;
+struct ras_bank_ecc;
+struct ras_umc;
+struct ras_aca;
+struct ras_process;
+struct ras_nbio;
+struct ras_log_ring;
+struct ras_psp;
+
+struct ras_mp1_sys_func {
+ int (*mp1_get_valid_bank_count)(struct ras_core_context *ras_core,
+ u32 msg, u32 *count);
+ int (*mp1_dump_valid_bank)(struct ras_core_context *ras_core,
+ u32 msg, u32 idx, u32 reg_idx, u64 *val);
+};
+
+struct ras_eeprom_sys_func {
+ int (*eeprom_i2c_xfer)(struct ras_core_context *ras_core,
+ u32 eeprom_addr, u8 *eeprom_buf, u32 buf_size, bool read);
+ int (*update_eeprom_i2c_config)(struct ras_core_context *ras_core);
+};
+
+struct ras_nbio_sys_func {
+ int (*set_ras_controller_irq_state)(struct ras_core_context *ras_core,
+ bool state);
+ int (*set_ras_err_event_athub_irq_state)(struct ras_core_context *ras_core,
+ bool state);
+};
+
+struct ras_time {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ long tm_year;
+};
+
+struct device_system_info {
+ uint32_t device_id;
+ uint32_t vendor_id;
+ uint32_t socket_id;
+};
+
+enum gpu_mem_type {
+ GPU_MEM_TYPE_DEFAULT,
+ GPU_MEM_TYPE_RAS_PSP_RING,
+ GPU_MEM_TYPE_RAS_PSP_CMD,
+ GPU_MEM_TYPE_RAS_PSP_FENCE,
+ GPU_MEM_TYPE_RAS_TA_FW,
+ GPU_MEM_TYPE_RAS_TA_CMD,
+};
+
+struct ras_psp_sys_func {
+ int (*get_ras_psp_system_status)(struct ras_core_context *ras_core,
+ struct ras_psp_sys_status *status);
+ int (*get_ras_ta_init_param)(struct ras_core_context *ras_core,
+ struct ras_ta_init_param *ras_ta_param);
+};
+
+struct ras_sys_func {
+ int (*gpu_reset_lock)(struct ras_core_context *ras_core,
+ bool down, bool try);
+ int (*check_gpu_status)(struct ras_core_context *ras_core,
+ uint32_t *status);
+ int (*gen_seqno)(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t *seqno);
+ int (*async_handle_ras_event)(struct ras_core_context *ras_core, void *data);
+ int (*ras_notifier)(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data);
+ u64 (*get_utc_second_timestamp)(struct ras_core_context *ras_core);
+ int (*get_device_system_info)(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info);
+ bool (*detect_ras_interrupt)(struct ras_core_context *ras_core);
+ int (*get_gpu_mem)(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+ int (*put_gpu_mem)(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+};
+
+struct ras_ecc_count {
+ uint64_t new_ce_count;
+ uint64_t total_ce_count;
+ uint64_t new_ue_count;
+ uint64_t total_ue_count;
+ uint64_t new_de_count;
+ uint64_t total_de_count;
+};
+
+struct ras_bank_ecc {
+ uint32_t nps;
+ uint64_t seq_no;
+ uint64_t status;
+ uint64_t ipid;
+ uint64_t addr;
+};
+
+struct ras_bank_ecc_node {
+ struct list_head node;
+ struct ras_bank_ecc ecc;
+};
+
+struct ras_aca_config {
+ u32 socket_num_per_hive;
+ u32 aid_num_per_socket;
+ u32 xcd_num_per_aid;
+};
+
+struct ras_mp1_config {
+ const struct ras_mp1_sys_func *mp1_sys_fn;
+};
+
+struct ras_nbio_config {
+ const struct ras_nbio_sys_func *nbio_sys_fn;
+};
+
+struct ras_psp_config {
+ const struct ras_psp_sys_func *psp_sys_fn;
+};
+
+struct ras_umc_config {
+ uint32_t umc_vram_type;
+};
+
+struct ras_eeprom_config {
+ const struct ras_eeprom_sys_func *eeprom_sys_fn;
+ int eeprom_record_threshold_config;
+ uint32_t eeprom_record_threshold_count;
+ void *eeprom_i2c_adapter;
+ u32 eeprom_i2c_addr;
+ u32 eeprom_i2c_port;
+ u16 max_i2c_read_len;
+ u16 max_i2c_write_len;
+};
+
+struct ras_core_config {
+ u32 aca_ip_version;
+ u32 umc_ip_version;
+ u32 mp1_ip_version;
+ u32 gfx_ip_version;
+ u32 nbio_ip_version;
+ u32 psp_ip_version;
+
+ bool poison_supported;
+ bool ras_eeprom_supported;
+ const struct ras_sys_func *sys_fn;
+
+ struct ras_aca_config aca_cfg;
+ struct ras_mp1_config mp1_cfg;
+ struct ras_nbio_config nbio_cfg;
+ struct ras_psp_config psp_cfg;
+ struct ras_eeprom_config eeprom_cfg;
+ struct ras_umc_config umc_cfg;
+};
+
+struct ras_core_context {
+ void *dev;
+ struct ras_core_config *config;
+ u32 socket_num_per_hive;
+ u32 aid_num_per_socket;
+ u32 xcd_num_per_aid;
+ int max_ue_banks_per_query;
+ int max_ce_banks_per_query;
+ struct ras_aca ras_aca;
+
+ bool ras_eeprom_supported;
+ struct ras_eeprom_control ras_eeprom;
+
+ struct ras_psp ras_psp;
+ struct ras_umc ras_umc;
+ struct ras_nbio ras_nbio;
+ struct ras_gfx ras_gfx;
+ struct ras_mp1 ras_mp1;
+ struct ras_process ras_proc;
+ struct ras_cmd_mgr ras_cmd;
+ struct ras_log_ring ras_log_ring;
+
+ const struct ras_sys_func *sys_fn;
+
+ /* is poison mode supported */
+ bool poison_supported;
+
+ bool is_rma;
+ bool is_initialized;
+
+ struct kfifo de_seqno_fifo;
+ struct kfifo consumption_seqno_fifo;
+ spinlock_t seqno_lock;
+
+ bool ras_core_enabled;
+};
+
+struct ras_core_context *ras_core_create(struct ras_core_config *init_config);
+void ras_core_destroy(struct ras_core_context *ras_core);
+int ras_core_sw_init(struct ras_core_context *ras_core);
+int ras_core_sw_fini(struct ras_core_context *ras_core);
+int ras_core_hw_init(struct ras_core_context *ras_core);
+int ras_core_hw_fini(struct ras_core_context *ras_core);
+bool ras_core_is_ready(struct ras_core_context *ras_core);
+uint64_t ras_core_gen_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type);
+uint64_t ras_core_get_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, bool pop);
+
+int ras_core_put_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t seqno);
+
+int ras_core_update_ecc_info(struct ras_core_context *ras_core);
+int ras_core_query_block_ecc_data(struct ras_core_context *ras_core,
+ enum ras_block_id block, struct ras_ecc_count *ecc_count);
+
+bool ras_core_gpu_in_reset(struct ras_core_context *ras_core);
+bool ras_core_gpu_is_rma(struct ras_core_context *ras_core);
+bool ras_core_gpu_is_vf(struct ras_core_context *ras_core);
+bool ras_core_handle_nbio_irq(struct ras_core_context *ras_core, void *data);
+int ras_core_handle_fatal_error(struct ras_core_context *ras_core);
+
+uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core);
+const char *ras_core_get_ras_block_name(enum ras_block_id block_id);
+int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core,
+ uint64_t timestamp, struct ras_time *tm);
+
+int ras_core_set_status(struct ras_core_context *ras_core, bool enable);
+bool ras_core_is_enabled(struct ras_core_context *ras_core);
+uint64_t ras_core_get_utc_second_timestamp(struct ras_core_context *ras_core);
+int ras_core_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa);
+bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core);
+int ras_core_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+int ras_core_put_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+bool ras_core_check_safety_watermark(struct ras_core_context *ras_core);
+int ras_core_down_trylock_gpu_reset_lock(struct ras_core_context *ras_core);
+void ras_core_down_gpu_reset_lock(struct ras_core_context *ras_core);
+void ras_core_up_gpu_reset_lock(struct ras_core_context *ras_core);
+int ras_core_event_notify(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data);
+int ras_core_get_device_system_info(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca.c b/drivers/gpu/drm/amd/ras/rascore/ras_aca.c
new file mode 100644
index 000000000000..e433c70d2989
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca.c
@@ -0,0 +1,672 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_aca.h"
+#include "ras_aca_v1_0.h"
+#include "ras_mp1_v13_0.h"
+
+#define ACA_MARK_FATAL_FLAG 0x100
+#define ACA_MARK_UE_READ_FLAG 0x1
+
+#define blk_name(block_id) ras_core_get_ras_block_name(block_id)
+
+static struct aca_regs_dump {
+ const char *name;
+ int reg_idx;
+} aca_regs[] = {
+ {"CONTROL", ACA_REG_IDX__CTL},
+ {"STATUS", ACA_REG_IDX__STATUS},
+ {"ADDR", ACA_REG_IDX__ADDR},
+ {"MISC", ACA_REG_IDX__MISC0},
+ {"CONFIG", ACA_REG_IDX__CONFG},
+ {"IPID", ACA_REG_IDX__IPID},
+ {"SYND", ACA_REG_IDX__SYND},
+ {"DESTAT", ACA_REG_IDX__DESTAT},
+ {"DEADDR", ACA_REG_IDX__DEADDR},
+ {"CONTROL_MASK", ACA_REG_IDX__CTL_MASK},
+};
+
+
+static void aca_report_ecc_info(struct ras_core_context *ras_core,
+ u64 seq_no, u32 blk, u32 skt, u32 aid,
+ struct aca_aid_ecc *aid_ecc,
+ struct aca_bank_ecc *new_ecc)
+{
+ struct aca_ecc_count ecc_count = {0};
+
+ ecc_count.new_ue_count = new_ecc->ue_count;
+ ecc_count.new_de_count = new_ecc->de_count;
+ ecc_count.new_ce_count = new_ecc->ce_count;
+ if (blk == RAS_BLOCK_ID__GFX) {
+ struct aca_ecc_count *xcd_ecc;
+ int xcd_id;
+
+ for (xcd_id = 0; xcd_id < aid_ecc->xcd.xcd_num; xcd_id++) {
+ xcd_ecc = &aid_ecc->xcd.xcd[xcd_id].ecc_err;
+ ecc_count.total_ue_count += xcd_ecc->total_ue_count;
+ ecc_count.total_de_count += xcd_ecc->total_de_count;
+ ecc_count.total_ce_count += xcd_ecc->total_ce_count;
+ }
+ } else {
+ ecc_count.total_ue_count = aid_ecc->ecc_err.total_ue_count;
+ ecc_count.total_de_count = aid_ecc->ecc_err.total_de_count;
+ ecc_count.total_ce_count = aid_ecc->ecc_err.total_ce_count;
+ }
+
+ if (ecc_count.new_ue_count) {
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u new uncorrectable hardware errors detected in %s block\n",
+ seq_no, skt, aid, ecc_count.new_ue_count, blk_name(blk));
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u uncorrectable hardware errors detected in total in %s block\n",
+ seq_no, skt, aid, ecc_count.total_ue_count, blk_name(blk));
+ }
+
+ if (ecc_count.new_de_count) {
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u new %s detected in %s block\n",
+ seq_no, skt, aid, ecc_count.new_de_count,
+ (blk == RAS_BLOCK_ID__UMC) ?
+ "deferred hardware errors" : "poison consumption",
+ blk_name(blk));
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u %s detected in total in %s block\n",
+ seq_no, skt, aid, ecc_count.total_de_count,
+ (blk == RAS_BLOCK_ID__UMC) ?
+ "deferred hardware errors" : "poison consumption",
+ blk_name(blk));
+ }
+
+ if (ecc_count.new_ce_count) {
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u new correctable hardware errors detected in %s block\n",
+ seq_no, skt, aid, ecc_count.new_ce_count, blk_name(blk));
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u correctable hardware errors detected in total in %s block\n",
+ seq_no, skt, aid, ecc_count.total_ce_count, blk_name(blk));
+ }
+}
+
+static void aca_bank_log(struct ras_core_context *ras_core,
+ int idx, int total, struct aca_bank_reg *bank,
+ struct aca_bank_ecc *bank_ecc)
+{
+ int i;
+
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu}" RAS_HW_ERR "Accelerator Check Architecture events logged\n",
+ bank->seq_no);
+ /* plus 1 for output format, e.g: ACA[08/08]: xxxx */
+ for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu}" RAS_HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
+ bank->seq_no, idx + 1, total,
+ aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+}
+
+static void aca_log_bank_data(struct ras_core_context *ras_core,
+ struct aca_bank_reg *bank, struct aca_bank_ecc *bank_ecc,
+ struct ras_log_batch_tag *batch)
+{
+ if (bank_ecc->ue_count)
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_UE, bank->regs, batch);
+ else if (bank_ecc->de_count)
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_DE, bank->regs, batch);
+ else
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_CE, bank->regs, batch);
+}
+
+static int aca_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count)
+{
+ return ras_mp1_get_bank_count(ras_core, type, count);
+}
+
+static bool aca_match_bank(struct aca_block *aca_blk, struct aca_bank_reg *bank)
+{
+ const struct aca_bank_hw_ops *bank_ops;
+
+ if (!aca_blk->blk_info)
+ return false;
+
+ bank_ops = &aca_blk->blk_info->bank_ops;
+ if (!bank_ops->bank_match)
+ return false;
+
+ return bank_ops->bank_match(aca_blk, bank);
+}
+
+static int aca_parse_bank(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk,
+ struct aca_bank_reg *bank,
+ struct aca_bank_ecc *ecc)
+{
+ const struct aca_bank_hw_ops *bank_ops = &aca_blk->blk_info->bank_ops;
+
+ if (!bank_ops || !bank_ops->bank_parse)
+ return -RAS_CORE_NOT_SUPPORTED;
+
+ return bank_ops->bank_parse(ras_core, aca_blk, bank, ecc);
+}
+
+static int aca_check_block_ecc_info(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk, struct aca_ecc_info *info)
+{
+ if (info->socket_id >= aca_blk->ecc.socket_num_per_hive) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Socket id (%d) is out of config! max:%u\n",
+ info->socket_id, aca_blk->ecc.socket_num_per_hive);
+ return -ENODATA;
+ }
+
+ if (info->die_id >= aca_blk->ecc.socket[info->socket_id].aid_num) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Die id (%d) is out of config! max:%u\n",
+ info->die_id, aca_blk->ecc.socket[info->socket_id].aid_num);
+ return -ENODATA;
+ }
+
+ if ((aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX) &&
+ (info->xcd_id >=
+ aca_blk->ecc.socket[info->socket_id].aid[info->die_id].xcd.xcd_num)) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Xcd id (%d) is out of config! max:%u\n",
+ info->xcd_id,
+ aca_blk->ecc.socket[info->socket_id].aid[info->die_id].xcd.xcd_num);
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+static int aca_log_bad_bank(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk, struct aca_bank_reg *bank,
+ struct aca_bank_ecc *bank_ecc)
+{
+ struct aca_ecc_info *info;
+ struct aca_ecc_count *ecc_err;
+ struct aca_aid_ecc *aid_ecc;
+ int ret;
+
+ info = &bank_ecc->bank_info;
+
+ ret = aca_check_block_ecc_info(ras_core, aca_blk, info);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ aid_ecc = &aca_blk->ecc.socket[info->socket_id].aid[info->die_id];
+ if (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX)
+ ecc_err = &aid_ecc->xcd.xcd[info->xcd_id].ecc_err;
+ else
+ ecc_err = &aid_ecc->ecc_err;
+
+ ecc_err->new_ce_count += bank_ecc->ce_count;
+ ecc_err->total_ce_count += bank_ecc->ce_count;
+ ecc_err->new_ue_count += bank_ecc->ue_count;
+ ecc_err->total_ue_count += bank_ecc->ue_count;
+ ecc_err->new_de_count += bank_ecc->de_count;
+ ecc_err->total_de_count += bank_ecc->de_count;
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ if ((aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__UMC) &&
+ bank_ecc->de_count) {
+ struct ras_bank_ecc ras_ecc = {0};
+
+ ras_ecc.nps = ras_core_get_curr_nps_mode(ras_core);
+ ras_ecc.addr = bank_ecc->bank_info.addr;
+ ras_ecc.ipid = bank_ecc->bank_info.ipid;
+ ras_ecc.status = bank_ecc->bank_info.status;
+ ras_ecc.seq_no = bank->seq_no;
+
+ if (ras_core_gpu_in_reset(ras_core))
+ ras_umc_log_bad_bank_pending(ras_core, &ras_ecc);
+ else
+ ras_umc_log_bad_bank(ras_core, &ras_ecc);
+ }
+
+ aca_report_ecc_info(ras_core,
+ bank->seq_no, aca_blk->blk_info->ras_block_id, info->socket_id, info->die_id,
+ &aca_blk->ecc.socket[info->socket_id].aid[info->die_id], bank_ecc);
+
+ return 0;
+}
+
+static struct aca_block *aca_get_bank_aca_block(struct ras_core_context *ras_core,
+ struct aca_bank_reg *bank)
+{
+ int i = 0;
+
+ for (i = 0; i < RAS_BLOCK_ID__LAST; i++)
+ if (aca_match_bank(&ras_core->ras_aca.aca_blk[i], bank))
+ return &ras_core->ras_aca.aca_blk[i];
+
+ return NULL;
+}
+
+static int aca_dump_bank(struct ras_core_context *ras_core, u32 ecc_type,
+ int idx, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ int i, ret, reg_cnt;
+
+ reg_cnt = min_t(int, 16, ARRAY_SIZE(bank->regs));
+ for (i = 0; i < reg_cnt; i++) {
+ ret = ras_mp1_dump_bank(ras_core, ecc_type, idx, i, &bank->regs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static uint64_t aca_get_bank_seqno(struct ras_core_context *ras_core,
+ enum ras_err_type err_type, struct aca_block *aca_blk,
+ struct aca_bank_ecc *bank_ecc)
+{
+ uint64_t seq_no = 0;
+
+ if (bank_ecc->de_count) {
+ if (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__UMC)
+ seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_DE, true);
+ else
+ seq_no = ras_core_get_seqno(ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, true);
+ } else if (bank_ecc->ue_count) {
+ seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_UE, true);
+ } else {
+ seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_CE, true);
+ }
+
+ return seq_no;
+}
+
+static bool aca_dup_update_ue_in_fatal(struct ras_core_context *ras_core,
+ u32 ecc_type)
+{
+ struct ras_aca *aca = &ras_core->ras_aca;
+
+ if (ecc_type != RAS_ERR_TYPE__UE)
+ return false;
+
+ if (aca->ue_updated_mark & ACA_MARK_FATAL_FLAG) {
+ if (aca->ue_updated_mark & ACA_MARK_UE_READ_FLAG)
+ return true;
+
+ aca->ue_updated_mark |= ACA_MARK_UE_READ_FLAG;
+ }
+
+ return false;
+}
+
+void ras_aca_mark_fatal_flag(struct ras_core_context *ras_core)
+{
+ struct ras_aca *aca = &ras_core->ras_aca;
+
+ if (!aca)
+ return;
+
+ aca->ue_updated_mark |= ACA_MARK_FATAL_FLAG;
+}
+
+void ras_aca_clear_fatal_flag(struct ras_core_context *ras_core)
+{
+ struct ras_aca *aca = &ras_core->ras_aca;
+
+ if (!aca)
+ return;
+
+ if ((aca->ue_updated_mark & ACA_MARK_FATAL_FLAG) &&
+ (aca->ue_updated_mark & ACA_MARK_UE_READ_FLAG))
+ aca->ue_updated_mark = 0;
+}
+
+static int aca_banks_update(struct ras_core_context *ras_core,
+ u32 ecc_type, void *data)
+{
+ struct aca_bank_reg bank;
+ struct aca_block *aca_blk;
+ struct aca_bank_ecc bank_ecc;
+ struct ras_log_batch_tag *batch_tag = NULL;
+ u32 count = 0;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&ras_core->ras_aca.bank_op_lock);
+
+ if (aca_dup_update_ue_in_fatal(ras_core, ecc_type))
+ goto out;
+
+ ret = aca_get_bank_count(ras_core, ecc_type, &count);
+ if (ret)
+ goto out;
+
+ if (!count)
+ goto out;
+
+ batch_tag = ras_log_ring_create_batch_tag(ras_core);
+ for (i = 0; i < count; i++) {
+ memset(&bank, 0, sizeof(bank));
+ ret = aca_dump_bank(ras_core, ecc_type, i, &bank);
+ if (ret)
+ break;
+
+ bank.ecc_type = ecc_type;
+
+ memset(&bank_ecc, 0, sizeof(bank_ecc));
+ aca_blk = aca_get_bank_aca_block(ras_core, &bank);
+ if (aca_blk)
+ ret = aca_parse_bank(ras_core, aca_blk, &bank, &bank_ecc);
+
+ bank.seq_no = aca_get_bank_seqno(ras_core, ecc_type, aca_blk, &bank_ecc);
+
+ aca_log_bank_data(ras_core, &bank, &bank_ecc, batch_tag);
+ aca_bank_log(ras_core, i, count, &bank, &bank_ecc);
+
+ if (!ret && aca_blk)
+ ret = aca_log_bad_bank(ras_core, aca_blk, &bank, &bank_ecc);
+
+ if (ret)
+ break;
+ }
+ ras_log_ring_destroy_batch_tag(ras_core, batch_tag);
+
+out:
+ mutex_unlock(&ras_core->ras_aca.bank_op_lock);
+ return ret;
+}
+
+int ras_aca_update_ecc(struct ras_core_context *ras_core, u32 type, void *data)
+{
+ /* Update aca bank to aca source error_cache first */
+ return aca_banks_update(ras_core, type, data);
+}
+
+static struct aca_block *ras_aca_get_block_handle(struct ras_core_context *ras_core, uint32_t blk)
+{
+ return &ras_core->ras_aca.aca_blk[blk];
+}
+
+static int ras_aca_clear_block_ecc_count(struct ras_core_context *ras_core, u32 blk)
+{
+ struct aca_block *aca_blk;
+ struct aca_aid_ecc *aid_ecc;
+ int skt, aid, xcd;
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ aid_ecc = &aca_blk->ecc.socket[skt].aid[aid];
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (xcd = 0; xcd < aid_ecc->xcd.xcd_num; xcd++)
+ memset(&aid_ecc->xcd.xcd[xcd],
+ 0, sizeof(struct aca_xcd_ecc));
+ } else {
+ memset(&aid_ecc->ecc_err, 0, sizeof(aid_ecc->ecc_err));
+ }
+ }
+ }
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ return 0;
+}
+
+int ras_aca_clear_all_blocks_ecc_count(struct ras_core_context *ras_core)
+{
+ enum ras_block_id blk;
+ int ret;
+
+ for (blk = RAS_BLOCK_ID__UMC; blk < RAS_BLOCK_ID__LAST; blk++) {
+ ret = ras_aca_clear_block_ecc_count(ras_core, blk);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+int ras_aca_clear_block_new_ecc_count(struct ras_core_context *ras_core, u32 blk)
+{
+ struct aca_block *aca_blk;
+ int skt, aid, xcd;
+ struct aca_ecc_count *ecc_err;
+ struct aca_aid_ecc *aid_ecc;
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ aid_ecc = &aca_blk->ecc.socket[skt].aid[aid];
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (xcd = 0; xcd < aid_ecc->xcd.xcd_num; xcd++) {
+ ecc_err = &aid_ecc->xcd.xcd[xcd].ecc_err;
+ ecc_err->new_ce_count = 0;
+ ecc_err->new_ue_count = 0;
+ ecc_err->new_de_count = 0;
+ }
+ } else {
+ ecc_err = &aid_ecc->ecc_err;
+ ecc_err->new_ce_count = 0;
+ ecc_err->new_ue_count = 0;
+ ecc_err->new_de_count = 0;
+ }
+ }
+ }
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ return 0;
+}
+
+static int ras_aca_get_block_each_aid_ecc_count(struct ras_core_context *ras_core,
+ u32 blk, u32 skt, u32 aid, u32 xcd,
+ struct aca_ecc_count *ecc_count)
+{
+ struct aca_block *aca_blk;
+ struct aca_ecc_count *ecc_err;
+
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ if (blk == RAS_BLOCK_ID__GFX)
+ ecc_err = &aca_blk->ecc.socket[skt].aid[aid].xcd.xcd[xcd].ecc_err;
+ else
+ ecc_err = &aca_blk->ecc.socket[skt].aid[aid].ecc_err;
+
+ ecc_count->new_ce_count = ecc_err->new_ce_count;
+ ecc_count->total_ce_count = ecc_err->total_ce_count;
+ ecc_count->new_ue_count = ecc_err->new_ue_count;
+ ecc_count->total_ue_count = ecc_err->total_ue_count;
+ ecc_count->new_de_count = ecc_err->new_de_count;
+ ecc_count->total_de_count = ecc_err->total_de_count;
+
+ return 0;
+}
+
+static inline void _add_ecc_count(struct aca_ecc_count *des, struct aca_ecc_count *src)
+{
+ des->new_ce_count += src->new_ce_count;
+ des->total_ce_count += src->total_ce_count;
+ des->new_ue_count += src->new_ue_count;
+ des->total_ue_count += src->total_ue_count;
+ des->new_de_count += src->new_de_count;
+ des->total_de_count += src->total_de_count;
+}
+
+static const struct ras_aca_ip_func *aca_get_ip_func(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(1, 0, 0):
+ return &ras_aca_func_v1_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "ACA ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_aca_get_block_ecc_count(struct ras_core_context *ras_core,
+ u32 blk, void *data)
+{
+ struct ras_ecc_count *err_data = (struct ras_ecc_count *)data;
+ struct aca_block *aca_blk;
+ int skt, aid, xcd;
+ struct aca_ecc_count ecc_xcd;
+ struct aca_ecc_count ecc_aid;
+ struct aca_ecc_count ecc;
+
+ if (blk >= RAS_BLOCK_ID__LAST)
+ return -EINVAL;
+
+ if (!err_data)
+ return -EINVAL;
+
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ memset(&ecc, 0, sizeof(ecc));
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ memset(&ecc_aid, 0, sizeof(ecc_aid));
+ for (xcd = 0;
+ xcd < aca_blk->ecc.socket[skt].aid[aid].xcd.xcd_num;
+ xcd++) {
+ memset(&ecc_xcd, 0, sizeof(ecc_xcd));
+ if (ras_aca_get_block_each_aid_ecc_count(ras_core,
+ blk, skt, aid, xcd, &ecc_xcd))
+ continue;
+ _add_ecc_count(&ecc_aid, &ecc_xcd);
+ }
+ _add_ecc_count(&ecc, &ecc_aid);
+ }
+ }
+ } else {
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ memset(&ecc_aid, 0, sizeof(ecc_aid));
+ if (ras_aca_get_block_each_aid_ecc_count(ras_core,
+ blk, skt, aid, 0, &ecc_aid))
+ continue;
+ _add_ecc_count(&ecc, &ecc_aid);
+ }
+ }
+ }
+
+ err_data->new_ce_count = ecc.new_ce_count;
+ err_data->total_ce_count = ecc.total_ce_count;
+ err_data->new_ue_count = ecc.new_ue_count;
+ err_data->total_ue_count = ecc.total_ue_count;
+ err_data->new_de_count = ecc.new_de_count;
+ err_data->total_de_count = ecc.total_de_count;
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ return 0;
+}
+
+int ras_aca_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+ struct ras_aca_config *aca_cfg = &ras_core->config->aca_cfg;
+ struct aca_block *aca_blk;
+ uint32_t socket_num_per_hive;
+ uint32_t aid_num_per_socket;
+ uint32_t xcd_num_per_aid;
+ int blk, skt, aid;
+
+ socket_num_per_hive = aca_cfg->socket_num_per_hive;
+ aid_num_per_socket = aca_cfg->aid_num_per_socket;
+ xcd_num_per_aid = aca_cfg->xcd_num_per_aid;
+
+ if (!xcd_num_per_aid || !aid_num_per_socket ||
+ (socket_num_per_hive > MAX_SOCKET_NUM_PER_HIVE) ||
+ (aid_num_per_socket > MAX_AID_NUM_PER_SOCKET) ||
+ (xcd_num_per_aid > MAX_XCD_NUM_PER_AID)) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid ACA system configuration: %d, %d, %d\n",
+ socket_num_per_hive, aid_num_per_socket, xcd_num_per_aid);
+ return -EINVAL;
+ }
+
+ memset(ras_aca, 0, sizeof(*ras_aca));
+
+ for (blk = 0; blk < RAS_BLOCK_ID__LAST; blk++) {
+ aca_blk = &ras_aca->aca_blk[blk];
+ aca_blk->ecc.socket_num_per_hive = socket_num_per_hive;
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ aca_blk->ecc.socket[skt].aid_num = aid_num_per_socket;
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++)
+ aca_blk->ecc.socket[skt].aid[aid].xcd.xcd_num =
+ xcd_num_per_aid;
+ }
+ }
+ }
+
+ mutex_init(&ras_aca->aca_lock);
+ mutex_init(&ras_aca->bank_op_lock);
+
+ return 0;
+}
+
+int ras_aca_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+
+ mutex_destroy(&ras_aca->aca_lock);
+ mutex_destroy(&ras_aca->bank_op_lock);
+
+ return 0;
+}
+
+int ras_aca_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+ struct aca_block *aca_blk;
+ const struct ras_aca_ip_func *ip_func;
+ int i;
+
+ ras_aca->aca_ip_version = ras_core->config->aca_ip_version;
+ ip_func = aca_get_ip_func(ras_core, ras_aca->aca_ip_version);
+ if (!ip_func)
+ return -EINVAL;
+
+ for (i = 0; i < ip_func->block_num; i++) {
+ aca_blk = &ras_aca->aca_blk[ip_func->block_info[i]->ras_block_id];
+ aca_blk->blk_info = ip_func->block_info[i];
+ }
+
+ ras_aca->ue_updated_mark = 0;
+
+ return 0;
+}
+
+int ras_aca_hw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+
+ ras_aca->ue_updated_mark = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca.h b/drivers/gpu/drm/amd/ras/rascore/ras_aca.h
new file mode 100644
index 000000000000..f61b02a5f0fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_ACA_H__
+#define __RAS_ACA_H__
+#include "ras.h"
+
+#define MAX_SOCKET_NUM_PER_HIVE 8
+#define MAX_AID_NUM_PER_SOCKET 4
+#define MAX_XCD_NUM_PER_AID 2
+#define MAX_ACA_RAS_BLOCK 20
+
+#define ACA_ERROR__UE_MASK (0x1 << RAS_ERR_TYPE__UE)
+#define ACA_ERROR__CE_MASK (0x1 << RAS_ERR_TYPE__CE)
+#define ACA_ERROR__DE_MASK (0x1 << RAS_ERR_TYPE__DE)
+
+enum ras_aca_reg_idx {
+ ACA_REG_IDX__CTL = 0,
+ ACA_REG_IDX__STATUS = 1,
+ ACA_REG_IDX__ADDR = 2,
+ ACA_REG_IDX__MISC0 = 3,
+ ACA_REG_IDX__CONFG = 4,
+ ACA_REG_IDX__IPID = 5,
+ ACA_REG_IDX__SYND = 6,
+ ACA_REG_IDX__DESTAT = 8,
+ ACA_REG_IDX__DEADDR = 9,
+ ACA_REG_IDX__CTL_MASK = 10,
+ ACA_REG_MAX_COUNT = 16,
+};
+
+struct ras_core_context;
+struct aca_block;
+
+struct aca_bank_reg {
+ u32 ecc_type;
+ u64 seq_no;
+ u64 regs[ACA_REG_MAX_COUNT];
+};
+
+enum aca_ecc_hwip {
+ ACA_ECC_HWIP__UNKNOWN = -1,
+ ACA_ECC_HWIP__PSP = 0,
+ ACA_ECC_HWIP__UMC,
+ ACA_ECC_HWIP__SMU,
+ ACA_ECC_HWIP__PCS_XGMI,
+ ACA_ECC_HWIP_COUNT,
+};
+
+struct aca_ecc_info {
+ int die_id;
+ int socket_id;
+ int xcd_id;
+ int hwid;
+ int mcatype;
+ uint64_t status;
+ uint64_t ipid;
+ uint64_t addr;
+};
+
+struct aca_bank_ecc {
+ struct aca_ecc_info bank_info;
+ u32 ce_count;
+ u32 ue_count;
+ u32 de_count;
+};
+
+struct aca_ecc_count {
+ u32 new_ce_count;
+ u32 total_ce_count;
+ u32 new_ue_count;
+ u32 total_ue_count;
+ u32 new_de_count;
+ u32 total_de_count;
+};
+
+struct aca_xcd_ecc {
+ struct aca_ecc_count ecc_err;
+};
+
+struct aca_aid_ecc {
+ union {
+ struct aca_xcd {
+ struct aca_xcd_ecc xcd[MAX_XCD_NUM_PER_AID];
+ u32 xcd_num;
+ } xcd;
+ struct aca_ecc_count ecc_err;
+ };
+};
+
+struct aca_socket_ecc {
+ struct aca_aid_ecc aid[MAX_AID_NUM_PER_SOCKET];
+ u32 aid_num;
+};
+
+struct aca_block_ecc {
+ struct aca_socket_ecc socket[MAX_SOCKET_NUM_PER_HIVE];
+ u32 socket_num_per_hive;
+};
+
+struct aca_bank_hw_ops {
+ bool (*bank_match)(struct aca_block *ras_blk, void *data);
+ int (*bank_parse)(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk, void *data, void *buf);
+};
+
+struct aca_block_info {
+ char name[32];
+ u32 ras_block_id;
+ enum aca_ecc_hwip hwip;
+ struct aca_bank_hw_ops bank_ops;
+ u32 mask;
+};
+
+struct aca_block {
+ const struct aca_block_info *blk_info;
+ struct aca_block_ecc ecc;
+};
+
+struct ras_aca_ip_func {
+ uint32_t block_num;
+ const struct aca_block_info **block_info;
+};
+
+struct ras_aca {
+ uint32_t aca_ip_version;
+ const struct ras_aca_ip_func *ip_func;
+ struct mutex aca_lock;
+ struct mutex bank_op_lock;
+ struct aca_block aca_blk[MAX_ACA_RAS_BLOCK];
+ uint32_t ue_updated_mark;
+};
+
+int ras_aca_sw_init(struct ras_core_context *ras_core);
+int ras_aca_sw_fini(struct ras_core_context *ras_core);
+int ras_aca_hw_init(struct ras_core_context *ras_core);
+int ras_aca_hw_fini(struct ras_core_context *ras_core);
+int ras_aca_get_block_ecc_count(struct ras_core_context *ras_core, u32 blk, void *data);
+int ras_aca_clear_block_new_ecc_count(struct ras_core_context *ras_core, u32 blk);
+int ras_aca_clear_all_blocks_ecc_count(struct ras_core_context *ras_core);
+int ras_aca_update_ecc(struct ras_core_context *ras_core, u32 ecc_type, void *data);
+void ras_aca_mark_fatal_flag(struct ras_core_context *ras_core);
+void ras_aca_clear_fatal_flag(struct ras_core_context *ras_core);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c
new file mode 100644
index 000000000000..29df98948703
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_aca.h"
+#include "ras_core_status.h"
+#include "ras_aca_v1_0.h"
+
+struct ras_aca_hwip {
+ int hwid;
+ int mcatype;
+};
+
+static struct ras_aca_hwip aca_hwid_mcatypes[ACA_ECC_HWIP_COUNT] = {
+ [ACA_ECC_HWIP__SMU] = {0x01, 0x01},
+ [ACA_ECC_HWIP__PCS_XGMI] = {0x50, 0x00},
+ [ACA_ECC_HWIP__UMC] = {0x96, 0x00},
+};
+
+static int aca_decode_bank_info(struct aca_block *aca_blk,
+ struct aca_bank_reg *bank, struct aca_ecc_info *info)
+{
+ u64 ipid;
+ u32 instidhi, instidlo;
+
+ ipid = bank->regs[ACA_REG_IDX__IPID];
+ info->hwid = ACA_REG_IPID_HARDWAREID(ipid);
+ info->mcatype = ACA_REG_IPID_MCATYPE(ipid);
+ /*
+ * Unified DieID Format: SAASS. A:AID, S:Socket.
+ * Unified DieID[4:4] = InstanceId[0:0]
+ * Unified DieID[0:3] = InstanceIdHi[0:3]
+ */
+ instidhi = ACA_REG_IPID_INSTANCEIDHI(ipid);
+ instidlo = ACA_REG_IPID_INSTANCEIDLO(ipid);
+ info->die_id = ((instidhi >> 2) & 0x03);
+ info->socket_id = ((instidlo & 0x1) << 2) | (instidhi & 0x03);
+
+ if ((aca_blk->blk_info->hwip == ACA_ECC_HWIP__SMU) &&
+ (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX))
+ info->xcd_id =
+ ((instidlo & GENMASK_ULL(31, 1)) == mmSMNAID_XCD0_MCA_SMU) ? 0 : 1;
+
+ return 0;
+}
+
+static bool aca_check_bank_hwip(struct aca_bank_reg *bank, enum aca_ecc_hwip type)
+{
+ struct ras_aca_hwip *hwip;
+ int hwid, mcatype;
+ u64 ipid;
+
+ if (!bank || (type == ACA_ECC_HWIP__UNKNOWN))
+ return false;
+
+ hwip = &aca_hwid_mcatypes[type];
+ if (!hwip->hwid)
+ return false;
+
+ ipid = bank->regs[ACA_REG_IDX__IPID];
+ hwid = ACA_REG_IPID_HARDWAREID(ipid);
+ mcatype = ACA_REG_IPID_MCATYPE(ipid);
+
+ return hwip->hwid == hwid && hwip->mcatype == mcatype;
+}
+
+static bool aca_match_bank_default(struct aca_block *aca_blk, void *data)
+{
+ return aca_check_bank_hwip((struct aca_bank_reg *)data, aca_blk->blk_info->hwip);
+}
+
+static bool aca_match_gfx_bank(struct aca_block *aca_blk, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ u32 instlo;
+
+ if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip))
+ return false;
+
+ instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]);
+ instlo &= GENMASK_ULL(31, 1);
+ switch (instlo) {
+ case mmSMNAID_XCD0_MCA_SMU:
+ case mmSMNAID_XCD1_MCA_SMU:
+ case mmSMNXCD_XCD0_MCA_SMU:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static bool aca_match_sdma_bank(struct aca_block *aca_blk, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ /* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */
+ static int sdma_err_codes[] = { 33, 34, 35, 36 };
+ u32 instlo;
+ int errcode, i;
+
+ if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip))
+ return false;
+
+ instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]);
+ instlo &= GENMASK_ULL(31, 1);
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ errcode = ACA_REG_SYND_ERRORINFORMATION(bank->regs[ACA_REG_IDX__SYND]);
+ errcode &= 0xff;
+
+ /* Check SDMA error codes */
+ for (i = 0; i < ARRAY_SIZE(sdma_err_codes); i++) {
+ if (errcode == sdma_err_codes[i])
+ return true;
+ }
+
+ return false;
+}
+
+static bool aca_match_mmhub_bank(struct aca_block *aca_blk, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ /* reference to smu driver if header file */
+ const int mmhub_err_codes[] = {
+ 0, 1, 2, 3, 4, /* CODE_DAGB0 - 4 */
+ 5, 6, 7, 8, 9, /* CODE_EA0 - 4 */
+ 10, /* CODE_UTCL2_ROUTER */
+ 11, /* CODE_VML2 */
+ 12, /* CODE_VML2_WALKER */
+ 13, /* CODE_MMCANE */
+ };
+ u32 instlo;
+ int errcode, i;
+
+ if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip))
+ return false;
+
+ instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]);
+ instlo &= GENMASK_ULL(31, 1);
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ errcode = ACA_REG_SYND_ERRORINFORMATION(bank->regs[ACA_REG_IDX__SYND]);
+ errcode &= 0xff;
+
+ /* Check MMHUB error codes */
+ for (i = 0; i < ARRAY_SIZE(mmhub_err_codes); i++) {
+ if (errcode == mmhub_err_codes[i])
+ return true;
+ }
+
+ return false;
+}
+
+static bool aca_check_umc_de(struct ras_core_context *ras_core, uint64_t mc_umc_status)
+{
+ return (ras_core->poison_supported &&
+ ACA_REG_STATUS_VAL(mc_umc_status) &&
+ ACA_REG_STATUS_DEFERRED(mc_umc_status));
+}
+
+static bool aca_check_umc_ue(struct ras_core_context *ras_core, uint64_t mc_umc_status)
+{
+ if (aca_check_umc_de(ras_core, mc_umc_status))
+ return false;
+
+ return (ACA_REG_STATUS_VAL(mc_umc_status) &&
+ (ACA_REG_STATUS_PCC(mc_umc_status) ||
+ ACA_REG_STATUS_UC(mc_umc_status) ||
+ ACA_REG_STATUS_TCC(mc_umc_status)));
+}
+
+static bool aca_check_umc_ce(struct ras_core_context *ras_core, uint64_t mc_umc_status)
+{
+ if (aca_check_umc_de(ras_core, mc_umc_status))
+ return false;
+
+ return (ACA_REG_STATUS_VAL(mc_umc_status) &&
+ (ACA_REG_STATUS_CECC(mc_umc_status) ||
+ (ACA_REG_STATUS_UECC(mc_umc_status) &&
+ ACA_REG_STATUS_UC(mc_umc_status) == 0) ||
+ /* Identify data parity error in replay mode */
+ ((ACA_REG_STATUS_ERRORCODEEXT(mc_umc_status) == 0x5 ||
+ ACA_REG_STATUS_ERRORCODEEXT(mc_umc_status) == 0xb) &&
+ !(aca_check_umc_ue(ras_core, mc_umc_status)))));
+}
+
+static int aca_parse_umc_bank(struct ras_core_context *ras_core,
+ struct aca_block *ras_blk, void *data, void *buf)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf;
+ struct aca_ecc_info bank_info;
+ uint32_t ext_error_code;
+ uint64_t status0;
+
+ status0 = bank->regs[ACA_REG_IDX__STATUS];
+ if (!ACA_REG_STATUS_VAL(status0))
+ return 0;
+
+ memset(&bank_info, 0, sizeof(bank_info));
+ aca_decode_bank_info(ras_blk, bank, &bank_info);
+ memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info));
+ ecc->bank_info.status = bank->regs[ACA_REG_IDX__STATUS];
+ ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID];
+ ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR];
+
+ ext_error_code = ACA_REG_STATUS_ERRORCODEEXT(status0);
+
+ if (aca_check_umc_de(ras_core, status0))
+ ecc->de_count = 1;
+ else if (aca_check_umc_ue(ras_core, status0))
+ ecc->ue_count = ext_error_code ?
+ 1 : ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]);
+ else if (aca_check_umc_ce(ras_core, status0))
+ ecc->ce_count = ext_error_code ?
+ 1 : ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]);
+
+ return 0;
+}
+
+static bool aca_check_bank_is_de(struct ras_core_context *ras_core,
+ uint64_t status)
+{
+ return (ACA_REG_STATUS_POISON(status) ||
+ ACA_REG_STATUS_DEFERRED(status));
+}
+
+static int aca_parse_bank_default(struct ras_core_context *ras_core,
+ struct aca_block *ras_blk,
+ void *data, void *buf)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf;
+ struct aca_ecc_info bank_info;
+ u64 misc0 = bank->regs[ACA_REG_IDX__MISC0];
+ u64 status = bank->regs[ACA_REG_IDX__STATUS];
+
+ memset(&bank_info, 0, sizeof(bank_info));
+ aca_decode_bank_info(ras_blk, bank, &bank_info);
+ memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info));
+ ecc->bank_info.status = status;
+ ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID];
+ ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR];
+
+ if (aca_check_bank_is_de(ras_core, status)) {
+ ecc->de_count = 1;
+ } else {
+ if (bank->ecc_type == RAS_ERR_TYPE__UE)
+ ecc->ue_count = 1;
+ else if (bank->ecc_type == RAS_ERR_TYPE__CE)
+ ecc->ce_count = ACA_REG_MISC0_ERRCNT(misc0);
+ }
+
+ return 0;
+}
+
+static int aca_parse_xgmi_bank(struct ras_core_context *ras_core,
+ struct aca_block *ras_blk,
+ void *data, void *buf)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf;
+ struct aca_ecc_info bank_info;
+ u64 status, count;
+ int ext_error_code;
+
+ memset(&bank_info, 0, sizeof(bank_info));
+ aca_decode_bank_info(ras_blk, bank, &bank_info);
+ memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info));
+ ecc->bank_info.status = bank->regs[ACA_REG_IDX__STATUS];
+ ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID];
+ ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR];
+
+ status = bank->regs[ACA_REG_IDX__STATUS];
+ ext_error_code = ACA_REG_STATUS_ERRORCODEEXT(status);
+
+ count = ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]);
+ if (bank->ecc_type == RAS_ERR_TYPE__UE) {
+ if (ext_error_code != 0 && ext_error_code != 9)
+ count = 0ULL;
+ ecc->ue_count = count;
+ } else if (bank->ecc_type == RAS_ERR_TYPE__CE) {
+ count = ext_error_code == 6 ? count : 0ULL;
+ ecc->ce_count = count;
+ }
+
+ return 0;
+}
+
+static const struct aca_block_info aca_v1_0_umc = {
+ .name = "umc",
+ .ras_block_id = RAS_BLOCK_ID__UMC,
+ .hwip = ACA_ECC_HWIP__UMC,
+ .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK | ACA_ERROR__DE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_bank_default,
+ .bank_parse = aca_parse_umc_bank,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_gfx = {
+ .name = "gfx",
+ .ras_block_id = RAS_BLOCK_ID__GFX,
+ .hwip = ACA_ECC_HWIP__SMU,
+ .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_gfx_bank,
+ .bank_parse = aca_parse_bank_default,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_sdma = {
+ .name = "sdma",
+ .ras_block_id = RAS_BLOCK_ID__SDMA,
+ .hwip = ACA_ECC_HWIP__SMU,
+ .mask = ACA_ERROR__UE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_sdma_bank,
+ .bank_parse = aca_parse_bank_default,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_mmhub = {
+ .name = "mmhub",
+ .ras_block_id = RAS_BLOCK_ID__MMHUB,
+ .hwip = ACA_ECC_HWIP__SMU,
+ .mask = ACA_ERROR__UE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_mmhub_bank,
+ .bank_parse = aca_parse_bank_default,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_xgmi = {
+ .name = "xgmi",
+ .ras_block_id = RAS_BLOCK_ID__XGMI_WAFL,
+ .hwip = ACA_ECC_HWIP__PCS_XGMI,
+ .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_bank_default,
+ .bank_parse = aca_parse_xgmi_bank,
+ },
+};
+
+static const struct aca_block_info *aca_block_info_v1_0[] = {
+ &aca_v1_0_umc,
+ &aca_v1_0_gfx,
+ &aca_v1_0_sdma,
+ &aca_v1_0_mmhub,
+ &aca_v1_0_xgmi,
+};
+
+const struct ras_aca_ip_func ras_aca_func_v1_0 = {
+ .block_num = ARRAY_SIZE(aca_block_info_v1_0),
+ .block_info = aca_block_info_v1_0,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h
new file mode 100644
index 000000000000..40e5d94b037f
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_ACA_V1_0_H__
+#define __RAS_ACA_V1_0_H__
+#include "ras.h"
+
+#define ACA__REG__FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l)
+#define ACA_REG_STATUS_VAL(x) ACA__REG__FIELD(x, 63, 63)
+#define ACA_REG_STATUS_OVERFLOW(x) ACA__REG__FIELD(x, 62, 62)
+#define ACA_REG_STATUS_UC(x) ACA__REG__FIELD(x, 61, 61)
+#define ACA_REG_STATUS_EN(x) ACA__REG__FIELD(x, 60, 60)
+#define ACA_REG_STATUS_MISCV(x) ACA__REG__FIELD(x, 59, 59)
+#define ACA_REG_STATUS_ADDRV(x) ACA__REG__FIELD(x, 58, 58)
+#define ACA_REG_STATUS_PCC(x) ACA__REG__FIELD(x, 57, 57)
+#define ACA_REG_STATUS_ERRCOREIDVAL(x) ACA__REG__FIELD(x, 56, 56)
+#define ACA_REG_STATUS_TCC(x) ACA__REG__FIELD(x, 55, 55)
+#define ACA_REG_STATUS_SYNDV(x) ACA__REG__FIELD(x, 53, 53)
+#define ACA_REG_STATUS_CECC(x) ACA__REG__FIELD(x, 46, 46)
+#define ACA_REG_STATUS_UECC(x) ACA__REG__FIELD(x, 45, 45)
+#define ACA_REG_STATUS_DEFERRED(x) ACA__REG__FIELD(x, 44, 44)
+#define ACA_REG_STATUS_POISON(x) ACA__REG__FIELD(x, 43, 43)
+#define ACA_REG_STATUS_SCRUB(x) ACA__REG__FIELD(x, 40, 40)
+#define ACA_REG_STATUS_ERRCOREID(x) ACA__REG__FIELD(x, 37, 32)
+#define ACA_REG_STATUS_ADDRLSB(x) ACA__REG__FIELD(x, 29, 24)
+#define ACA_REG_STATUS_ERRORCODEEXT(x) ACA__REG__FIELD(x, 21, 16)
+#define ACA_REG_STATUS_ERRORCODE(x) ACA__REG__FIELD(x, 15, 0)
+
+#define ACA_REG_IPID_MCATYPE(x) ACA__REG__FIELD(x, 63, 48)
+#define ACA_REG_IPID_INSTANCEIDHI(x) ACA__REG__FIELD(x, 47, 44)
+#define ACA_REG_IPID_HARDWAREID(x) ACA__REG__FIELD(x, 43, 32)
+#define ACA_REG_IPID_INSTANCEIDLO(x) ACA__REG__FIELD(x, 31, 0)
+
+#define ACA_REG_MISC0_VALID(x) ACA__REG__FIELD(x, 63, 63)
+#define ACA_REG_MISC0_OVRFLW(x) ACA__REG__FIELD(x, 48, 48)
+#define ACA_REG_MISC0_ERRCNT(x) ACA__REG__FIELD(x, 43, 32)
+
+#define ACA_REG_SYND_ERRORINFORMATION(x) ACA__REG__FIELD(x, 17, 0)
+
+/* NOTE: The following codes refers to the smu header file */
+#define ACA_EXTERROR_CODE_CE 0x3a
+#define ACA_EXTERROR_CODE_FAULT 0x3b
+
+#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
+#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
+#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
+#define mmSMNAID_AID0_MCA_SMU 0x03b30400 /* SMN AID AID0 */
+
+extern const struct ras_aca_ip_func ras_aca_func_v1_0;
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c
new file mode 100644
index 000000000000..94e6d7420d94
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_cmd.h"
+
+#define RAS_CMD_MAJOR_VERSION 6
+#define RAS_CMD_MINOR_VERSION 0
+#define RAS_CMD_VERSION (((RAS_CMD_MAJOR_VERSION) << 10) | (RAS_CMD_MINOR_VERSION))
+
+static int ras_cmd_add_device(struct ras_core_context *ras_core)
+{
+ INIT_LIST_HEAD(&ras_core->ras_cmd.head);
+ ras_core->ras_cmd.ras_core = ras_core;
+ ras_core->ras_cmd.dev_handle = (uintptr_t)ras_core ^ RAS_CMD_DEV_HANDLE_MAGIC;
+ return 0;
+}
+
+static int ras_cmd_remove_device(struct ras_core_context *ras_core)
+{
+ memset(&ras_core->ras_cmd, 0, sizeof(ras_core->ras_cmd));
+ return 0;
+}
+
+static int ras_get_block_ecc_info(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_block_ecc_info_req *input_data =
+ (struct ras_cmd_block_ecc_info_req *)cmd->input_buff_raw;
+ struct ras_cmd_block_ecc_info_rsp *output_data =
+ (struct ras_cmd_block_ecc_info_rsp *)cmd->output_buff_raw;
+ struct ras_ecc_count err_data;
+ int ret;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_block_ecc_info_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ memset(&err_data, 0, sizeof(err_data));
+ ret = ras_aca_get_block_ecc_count(ras_core, input_data->block_id, &err_data);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ output_data->ce_count = err_data.total_ce_count;
+ output_data->ue_count = err_data.total_ue_count;
+ output_data->de_count = err_data.total_de_count;
+
+ cmd->output_size = sizeof(struct ras_cmd_block_ecc_info_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static void ras_cmd_update_bad_page_info(struct ras_cmd_bad_page_record *ras_cmd_record,
+ struct eeprom_umc_record *record)
+{
+ ras_cmd_record->retired_page = record->cur_nps_retired_row_pfn;
+ ras_cmd_record->ts = record->ts;
+ ras_cmd_record->err_type = record->err_type;
+ ras_cmd_record->mem_channel = record->mem_channel;
+ ras_cmd_record->mcumc_id = record->mcumc_id;
+ ras_cmd_record->address = record->address;
+ ras_cmd_record->bank = record->bank;
+ ras_cmd_record->valid = 1;
+}
+
+static int ras_cmd_get_group_bad_pages(struct ras_core_context *ras_core,
+ uint32_t group_index, struct ras_cmd_bad_pages_info_rsp *output_data)
+{
+ struct eeprom_umc_record record;
+ struct ras_cmd_bad_page_record *ras_cmd_record;
+ uint32_t i = 0, bp_cnt = 0, group_cnt = 0;
+
+ output_data->bp_in_group = 0;
+ output_data->group_index = 0;
+
+ bp_cnt = ras_umc_get_badpage_count(ras_core);
+ if (bp_cnt) {
+ output_data->group_index = group_index;
+ group_cnt = bp_cnt / RAS_CMD_MAX_BAD_PAGES_PER_GROUP
+ + ((bp_cnt % RAS_CMD_MAX_BAD_PAGES_PER_GROUP) ? 1 : 0);
+
+ if (group_index >= group_cnt)
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ i = group_index * RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ for (;
+ i < bp_cnt && output_data->bp_in_group < RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ i++) {
+ if (ras_umc_get_badpage_record(ras_core, i, &record))
+ return RAS_CMD__ERROR_GENERIC;
+
+ ras_cmd_record = &output_data->records[i % RAS_CMD_MAX_BAD_PAGES_PER_GROUP];
+
+ memset(ras_cmd_record, 0, sizeof(*ras_cmd_record));
+ ras_cmd_update_bad_page_info(ras_cmd_record, &record);
+ output_data->bp_in_group++;
+ }
+ }
+ output_data->bp_total_cnt = bp_cnt;
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_bad_pages(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_bad_pages_info_req *input_data =
+ (struct ras_cmd_bad_pages_info_req *)cmd->input_buff_raw;
+ struct ras_cmd_bad_pages_info_rsp *output_data =
+ (struct ras_cmd_bad_pages_info_rsp *)cmd->output_buff_raw;
+ int ret;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_bad_pages_info_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ ret = ras_cmd_get_group_bad_pages(ras_core, input_data->group_index, output_data);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ output_data->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_bad_pages_info_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_clear_bad_page_info(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ if (cmd->input_size != sizeof(struct ras_cmd_dev_handle))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if (ras_eeprom_reset_table(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ if (ras_umc_clean_badpage_data(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_reset_all_error_counts(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ if (cmd->input_size != sizeof(struct ras_cmd_dev_handle))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if (ras_aca_clear_all_blocks_ecc_count(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ if (ras_umc_clear_logged_ecc(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_cper_snapshot(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_cper_snapshot_rsp *output_data =
+ (struct ras_cmd_cper_snapshot_rsp *)cmd->output_buff_raw;
+ struct ras_log_batch_overview overview;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_cper_snapshot_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+
+ output_data->total_cper_num = overview.logged_batch_count;
+ output_data->start_cper_id = overview.first_batch_id;
+ output_data->latest_cper_id = overview.last_batch_id;
+
+ output_data->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_cper_snapshot_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_cper_records(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_cper_record_req *req =
+ (struct ras_cmd_cper_record_req *)cmd->input_buff_raw;
+ struct ras_cmd_cper_record_rsp *rsp =
+ (struct ras_cmd_cper_record_rsp *)cmd->output_buff_raw;
+ struct ras_log_info *trace[MAX_RECORD_PER_BATCH] = {0};
+ struct ras_log_batch_overview overview;
+ uint32_t offset = 0, real_data_len = 0;
+ uint64_t batch_id;
+ uint8_t *buffer;
+ int ret = 0, i, count;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_cper_record_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if (!req->buf_size || !req->buf_ptr || !req->cper_num)
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ buffer = kzalloc(req->buf_size, GFP_KERNEL);
+ if (!buffer)
+ return RAS_CMD__ERROR_GENERIC;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+ for (i = 0; i < req->cper_num; i++) {
+ batch_id = req->cper_start_id + i;
+ if (batch_id >= overview.last_batch_id)
+ break;
+
+ count = ras_log_ring_get_batch_records(ras_core, batch_id, trace,
+ ARRAY_SIZE(trace));
+ if (count > 0) {
+ ret = ras_cper_generate_cper(ras_core, trace, count,
+ &buffer[offset], req->buf_size - offset, &real_data_len);
+ if (ret)
+ break;
+
+ offset += real_data_len;
+ }
+ }
+
+ if ((ret && (ret != -ENOMEM)) ||
+ copy_to_user(u64_to_user_ptr(req->buf_ptr), buffer, offset)) {
+ kfree(buffer);
+ return RAS_CMD__ERROR_GENERIC;
+ }
+
+ rsp->real_data_size = offset;
+ rsp->real_cper_num = i;
+ rsp->remain_num = (ret == -ENOMEM) ? (req->cper_num - i) : 0;
+ rsp->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_cper_record_rsp);
+
+ kfree(buffer);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_batch_trace_snapshot(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_batch_trace_snapshot_rsp *rsp =
+ (struct ras_cmd_batch_trace_snapshot_rsp *)cmd->output_buff_raw;
+ struct ras_log_batch_overview overview;
+
+
+ if (cmd->input_size != sizeof(struct ras_cmd_batch_trace_snapshot_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+
+ rsp->total_batch_num = overview.logged_batch_count;
+ rsp->start_batch_id = overview.first_batch_id;
+ rsp->latest_batch_id = overview.last_batch_id;
+ rsp->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_batch_trace_snapshot_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_batch_trace_records(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_batch_trace_record_req *input_data =
+ (struct ras_cmd_batch_trace_record_req *)cmd->input_buff_raw;
+ struct ras_cmd_batch_trace_record_rsp *output_data =
+ (struct ras_cmd_batch_trace_record_rsp *)cmd->output_buff_raw;
+ struct ras_log_batch_overview overview;
+ struct ras_log_info *trace_arry[MAX_RECORD_PER_BATCH] = {0};
+ struct ras_log_info *record;
+ int i, j, count = 0, offset = 0;
+ uint64_t id;
+ bool completed = false;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_batch_trace_record_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if ((!input_data->batch_num) || (input_data->batch_num > RAS_CMD_MAX_BATCH_NUM))
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+ if ((input_data->start_batch_id < overview.first_batch_id) ||
+ (input_data->start_batch_id >= overview.last_batch_id))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ for (i = 0; i < input_data->batch_num; i++) {
+ id = input_data->start_batch_id + i;
+ if (id >= overview.last_batch_id) {
+ completed = true;
+ break;
+ }
+
+ count = ras_log_ring_get_batch_records(ras_core,
+ id, trace_arry, ARRAY_SIZE(trace_arry));
+ if (count > 0) {
+ if ((offset + count) > RAS_CMD_MAX_TRACE_NUM)
+ break;
+ for (j = 0; j < count; j++) {
+ record = &output_data->records[offset + j];
+ record->seqno = trace_arry[j]->seqno;
+ record->timestamp = trace_arry[j]->timestamp;
+ record->event = trace_arry[j]->event;
+ memcpy(&record->aca_reg,
+ &trace_arry[j]->aca_reg, sizeof(trace_arry[j]->aca_reg));
+ }
+ } else {
+ count = 0;
+ }
+
+ output_data->batchs[i].batch_id = id;
+ output_data->batchs[i].offset = offset;
+ output_data->batchs[i].trace_num = count;
+ offset += count;
+ }
+
+ output_data->start_batch_id = input_data->start_batch_id;
+ output_data->real_batch_num = i;
+ output_data->remain_num = completed ? 0 : (input_data->batch_num - i);
+ output_data->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_batch_trace_record_rsp);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static enum ras_ta_block __get_ras_ta_block(enum ras_block_id block)
+{
+ switch (block) {
+ case RAS_BLOCK_ID__UMC:
+ return RAS_TA_BLOCK__UMC;
+ case RAS_BLOCK_ID__SDMA:
+ return RAS_TA_BLOCK__SDMA;
+ case RAS_BLOCK_ID__GFX:
+ return RAS_TA_BLOCK__GFX;
+ case RAS_BLOCK_ID__MMHUB:
+ return RAS_TA_BLOCK__MMHUB;
+ case RAS_BLOCK_ID__ATHUB:
+ return RAS_TA_BLOCK__ATHUB;
+ case RAS_BLOCK_ID__PCIE_BIF:
+ return RAS_TA_BLOCK__PCIE_BIF;
+ case RAS_BLOCK_ID__HDP:
+ return RAS_TA_BLOCK__HDP;
+ case RAS_BLOCK_ID__XGMI_WAFL:
+ return RAS_TA_BLOCK__XGMI_WAFL;
+ case RAS_BLOCK_ID__DF:
+ return RAS_TA_BLOCK__DF;
+ case RAS_BLOCK_ID__SMN:
+ return RAS_TA_BLOCK__SMN;
+ case RAS_BLOCK_ID__SEM:
+ return RAS_TA_BLOCK__SEM;
+ case RAS_BLOCK_ID__MP0:
+ return RAS_TA_BLOCK__MP0;
+ case RAS_BLOCK_ID__MP1:
+ return RAS_TA_BLOCK__MP1;
+ case RAS_BLOCK_ID__FUSE:
+ return RAS_TA_BLOCK__FUSE;
+ case RAS_BLOCK_ID__MCA:
+ return RAS_TA_BLOCK__MCA;
+ case RAS_BLOCK_ID__VCN:
+ return RAS_TA_BLOCK__VCN;
+ case RAS_BLOCK_ID__JPEG:
+ return RAS_TA_BLOCK__JPEG;
+ default:
+ return RAS_TA_BLOCK__UMC;
+ }
+}
+
+static enum ras_ta_error_type __get_ras_ta_err_type(enum ras_ecc_err_type error)
+{
+ switch (error) {
+ case RAS_ECC_ERR__NONE:
+ return RAS_TA_ERROR__NONE;
+ case RAS_ECC_ERR__PARITY:
+ return RAS_TA_ERROR__PARITY;
+ case RAS_ECC_ERR__SINGLE_CORRECTABLE:
+ return RAS_TA_ERROR__SINGLE_CORRECTABLE;
+ case RAS_ECC_ERR__MULTI_UNCORRECTABLE:
+ return RAS_TA_ERROR__MULTI_UNCORRECTABLE;
+ case RAS_ECC_ERR__POISON:
+ return RAS_TA_ERROR__POISON;
+ default:
+ return RAS_TA_ERROR__NONE;
+ }
+}
+
+static int ras_cmd_inject_error(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_inject_error_req *req =
+ (struct ras_cmd_inject_error_req *)cmd->input_buff_raw;
+ struct ras_cmd_inject_error_rsp *output_data =
+ (struct ras_cmd_inject_error_rsp *)cmd->output_buff_raw;
+ int ret = 0;
+ struct ras_ta_trigger_error_input block_info = {
+ .block_id = __get_ras_ta_block(req->block_id),
+ .sub_block_index = req->subblock_id,
+ .inject_error_type = __get_ras_ta_err_type(req->error_type),
+ .address = req->address,
+ .value = req->method,
+ };
+
+ ret = ras_psp_trigger_error(ras_core, &block_info, req->instance_mask);
+ if (!ret) {
+ output_data->version = 0;
+ output_data->address = block_info.address;
+ cmd->output_size = sizeof(struct ras_cmd_inject_error_rsp);
+ } else {
+ RAS_DEV_ERR(ras_core->dev, "ras inject block %u failed %d\n", req->block_id, ret);
+ ret = RAS_CMD__ERROR_ACCESS_DENIED;
+ }
+
+ return ret;
+}
+
+static struct ras_cmd_func_map ras_cmd_maps[] = {
+ {RAS_CMD__INJECT_ERROR, ras_cmd_inject_error},
+ {RAS_CMD__GET_BLOCK_ECC_STATUS, ras_get_block_ecc_info},
+ {RAS_CMD__GET_BAD_PAGES, ras_cmd_get_bad_pages},
+ {RAS_CMD__CLEAR_BAD_PAGE_INFO, ras_cmd_clear_bad_page_info},
+ {RAS_CMD__RESET_ALL_ERROR_COUNTS, ras_cmd_reset_all_error_counts},
+ {RAS_CMD__GET_CPER_SNAPSHOT, ras_cmd_get_cper_snapshot},
+ {RAS_CMD__GET_CPER_RECORD, ras_cmd_get_cper_records},
+ {RAS_CMD__GET_BATCH_TRACE_SNAPSHOT, ras_cmd_get_batch_trace_snapshot},
+ {RAS_CMD__GET_BATCH_TRACE_RECORD, ras_cmd_get_batch_trace_records},
+};
+
+int rascore_handle_cmd(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_func_map *ras_cmd = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ras_cmd_maps); i++) {
+ if (cmd->cmd_id == ras_cmd_maps[i].cmd_id) {
+ ras_cmd = &ras_cmd_maps[i];
+ break;
+ }
+ }
+
+ if (!ras_cmd)
+ return RAS_CMD__ERROR_UKNOWN_CMD;
+
+ return ras_cmd->func(ras_core, cmd, data);
+}
+
+int ras_cmd_init(struct ras_core_context *ras_core)
+{
+ return ras_cmd_add_device(ras_core);
+}
+
+int ras_cmd_fini(struct ras_core_context *ras_core)
+{
+ ras_cmd_remove_device(ras_core);
+ return 0;
+}
+
+int ras_cmd_query_interface_info(struct ras_core_context *ras_core,
+ struct ras_query_interface_info_rsp *rsp)
+{
+ rsp->ras_cmd_major_ver = RAS_CMD_MAJOR_VERSION;
+ rsp->ras_cmd_minor_ver = RAS_CMD_MINOR_VERSION;
+
+ return 0;
+}
+
+int ras_cmd_translate_soc_pa_to_bank(struct ras_core_context *ras_core,
+ uint64_t soc_pa, struct ras_fb_bank_addr *bank_addr)
+{
+ struct umc_bank_addr umc_bank = {0};
+ int ret;
+
+ ret = ras_umc_translate_soc_pa_and_bank(ras_core, &soc_pa, &umc_bank, false);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ bank_addr->stack_id = umc_bank.stack_id;
+ bank_addr->bank_group = umc_bank.bank_group;
+ bank_addr->bank = umc_bank.bank;
+ bank_addr->row = umc_bank.row;
+ bank_addr->column = umc_bank.column;
+ bank_addr->channel = umc_bank.channel;
+ bank_addr->subchannel = umc_bank.subchannel;
+
+ return 0;
+}
+
+int ras_cmd_translate_bank_to_soc_pa(struct ras_core_context *ras_core,
+ struct ras_fb_bank_addr bank_addr, uint64_t *soc_pa)
+{
+ struct umc_bank_addr umc_bank = {0};
+
+ umc_bank.stack_id = bank_addr.stack_id;
+ umc_bank.bank_group = bank_addr.bank_group;
+ umc_bank.bank = bank_addr.bank;
+ umc_bank.row = bank_addr.row;
+ umc_bank.column = bank_addr.column;
+ umc_bank.channel = bank_addr.channel;
+ umc_bank.subchannel = bank_addr.subchannel;
+
+ return ras_umc_translate_soc_pa_and_bank(ras_core, soc_pa, &umc_bank, true);
+}
+
+uint64_t ras_cmd_get_dev_handle(struct ras_core_context *ras_core)
+{
+ return ras_core->ras_cmd.dev_handle;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h
new file mode 100644
index 000000000000..48a0715eb821
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h
@@ -0,0 +1,426 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_CMD_H__
+#define __RAS_CMD_H__
+#include "ras.h"
+#include "ras_eeprom.h"
+#include "ras_log_ring.h"
+#include "ras_cper.h"
+
+#define RAS_CMD_DEV_HANDLE_MAGIC 0xFEEDAD00UL
+
+#define RAS_CMD_MAX_IN_SIZE 256
+#define RAS_CMD_MAX_GPU_NUM 32
+#define RAS_CMD_MAX_BAD_PAGES_PER_GROUP 32
+
+/* position of instance value in sub_block_index of
+ * ta_ras_trigger_error_input, the sub block uses lower 12 bits
+ */
+#define RAS_TA_INST_MASK 0xfffff000
+#define RAS_TA_INST_SHIFT 0xc
+
+enum ras_cmd_interface_type {
+ RAS_CMD_INTERFACE_TYPE_NONE,
+ RAS_CMD_INTERFACE_TYPE_AMDGPU,
+ RAS_CMD_INTERFACE_TYPE_VF,
+ RAS_CMD_INTERFACE_TYPE_PF,
+};
+
+enum ras_cmd_id_range {
+ RAS_CMD_ID_COMMON_START = 0,
+ RAS_CMD_ID_COMMON_END = 0x10000,
+ RAS_CMD_ID_AMDGPU_START = RAS_CMD_ID_COMMON_END,
+ RAS_CMD_ID_AMDGPU_END = 0x20000,
+ RAS_CMD_ID_MXGPU_START = RAS_CMD_ID_AMDGPU_END,
+ RAS_CMD_ID_MXGPU_END = 0x30000,
+ RAS_CMD_ID_MXGPU_VF_START = RAS_CMD_ID_MXGPU_END,
+ RAS_CMD_ID_MXGPU_VF_END = 0x40000,
+};
+
+enum ras_cmd_id {
+ RAS_CMD__BEGIN = RAS_CMD_ID_COMMON_START,
+ RAS_CMD__QUERY_INTERFACE_INFO,
+ RAS_CMD__GET_DEVICES_INFO,
+ RAS_CMD__GET_BLOCK_ECC_STATUS,
+ RAS_CMD__INJECT_ERROR,
+ RAS_CMD__GET_BAD_PAGES,
+ RAS_CMD__CLEAR_BAD_PAGE_INFO,
+ RAS_CMD__RESET_ALL_ERROR_COUNTS,
+ RAS_CMD__GET_SAFE_FB_ADDRESS_RANGES,
+ RAS_CMD__TRANSLATE_FB_ADDRESS,
+ RAS_CMD__GET_LINK_TOPOLOGY,
+ RAS_CMD__GET_CPER_SNAPSHOT,
+ RAS_CMD__GET_CPER_RECORD,
+ RAS_CMD__GET_BATCH_TRACE_SNAPSHOT,
+ RAS_CMD__GET_BATCH_TRACE_RECORD,
+ RAS_CMD__SUPPORTED_MAX = RAS_CMD_ID_COMMON_END,
+};
+
+enum ras_cmd_response {
+ RAS_CMD__SUCCESS = 0,
+ RAS_CMD__SUCCESS_EXEED_BUFFER,
+ RAS_CMD__ERROR_UKNOWN_CMD,
+ RAS_CMD__ERROR_INVALID_CMD,
+ RAS_CMD__ERROR_VERSION,
+ RAS_CMD__ERROR_INVALID_INPUT_SIZE,
+ RAS_CMD__ERROR_INVALID_INPUT_DATA,
+ RAS_CMD__ERROR_DRV_INIT_FAIL,
+ RAS_CMD__ERROR_ACCESS_DENIED,
+ RAS_CMD__ERROR_GENERIC,
+ RAS_CMD__ERROR_TIMEOUT,
+};
+
+enum ras_error_type {
+ RAS_TYPE_ERROR__NONE = 0,
+ RAS_TYPE_ERROR__PARITY = 1,
+ RAS_TYPE_ERROR__SINGLE_CORRECTABLE = 2,
+ RAS_TYPE_ERROR__MULTI_UNCORRECTABLE = 4,
+ RAS_TYPE_ERROR__POISON = 8,
+};
+
+struct ras_core_context;
+struct ras_cmd_ctx;
+
+struct ras_cmd_mgr {
+ struct list_head head;
+ struct ras_core_context *ras_core;
+ uint64_t dev_handle;
+};
+
+struct ras_cmd_func_map {
+ uint32_t cmd_id;
+ int (*func)(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data);
+};
+
+struct ras_device_bdf {
+ union {
+ struct {
+ uint32_t function : 3;
+ uint32_t device : 5;
+ uint32_t bus : 8;
+ uint32_t domain : 16;
+ };
+ uint32_t u32_all;
+ };
+};
+
+struct ras_cmd_param {
+ uint32_t idx_vf;
+ void *data;
+};
+
+#pragma pack(push, 8)
+struct ras_cmd_ctx {
+ uint32_t magic;
+ union {
+ struct {
+ uint16_t ras_cmd_minor_ver : 10;
+ uint16_t ras_cmd_major_ver : 6;
+ };
+ uint16_t ras_cmd_ver;
+ };
+ union {
+ struct {
+ uint16_t plat_major_ver : 10;
+ uint16_t plat_minor_ver : 6;
+ };
+ uint16_t plat_ver;
+ };
+ uint32_t cmd_id;
+ uint32_t cmd_res;
+ uint32_t input_size;
+ uint32_t output_size;
+ uint32_t output_buf_size;
+ uint32_t reserved[5];
+ uint8_t input_buff_raw[RAS_CMD_MAX_IN_SIZE];
+ uint8_t output_buff_raw[];
+};
+
+struct ras_cmd_dev_handle {
+ uint64_t dev_handle;
+};
+
+struct ras_cmd_block_ecc_info_req {
+ struct ras_cmd_dev_handle dev;
+ uint32_t block_id;
+ uint32_t subblock_id;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_block_ecc_info_rsp {
+ uint32_t version;
+ uint32_t ce_count;
+ uint32_t ue_count;
+ uint32_t de_count;
+ uint32_t reserved[6];
+};
+
+struct ras_cmd_inject_error_req {
+ struct ras_cmd_dev_handle dev;
+ uint32_t block_id;
+ uint32_t subblock_id;
+ uint64_t address;
+ uint32_t error_type;
+ uint32_t instance_mask;
+ union {
+ struct {
+ /* vf index */
+ uint64_t vf_idx : 6;
+ /* method of error injection. i.e persistent, coherent etc */
+ uint64_t method : 10;
+ uint64_t rsv : 48;
+ };
+ uint64_t value;
+ };
+ uint32_t reserved[8];
+};
+
+struct ras_cmd_inject_error_rsp {
+ uint32_t version;
+ uint32_t reserved[5];
+ uint64_t address;
+};
+
+struct ras_cmd_dev_info {
+ uint64_t dev_handle;
+ uint32_t location_id;
+ uint32_t ecc_enabled;
+ uint32_t ecc_supported;
+ uint32_t vf_num;
+ uint32_t asic_type;
+ uint32_t oam_id;
+ uint32_t reserved[8];
+};
+
+struct ras_cmd_devices_info_rsp {
+ uint32_t version;
+ uint32_t dev_num;
+ uint32_t reserved[6];
+ struct ras_cmd_dev_info devs[RAS_CMD_MAX_GPU_NUM];
+};
+
+struct ras_cmd_bad_page_record {
+ union {
+ uint64_t address;
+ uint64_t offset;
+ };
+ uint64_t retired_page;
+ uint64_t ts;
+
+ uint32_t err_type;
+
+ union {
+ unsigned char bank;
+ unsigned char cu;
+ };
+
+ unsigned char mem_channel;
+ unsigned char mcumc_id;
+
+ unsigned char valid;
+ unsigned char reserved[8];
+};
+
+struct ras_cmd_bad_pages_info_req {
+ struct ras_cmd_dev_handle device;
+ uint32_t group_index;
+ uint32_t reserved[5];
+};
+
+struct ras_cmd_bad_pages_info_rsp {
+ uint32_t version;
+ uint32_t group_index;
+ uint32_t bp_in_group;
+ uint32_t bp_total_cnt;
+ uint32_t reserved[4];
+ struct ras_cmd_bad_page_record records[RAS_CMD_MAX_BAD_PAGES_PER_GROUP];
+};
+
+struct ras_query_interface_info_req {
+ uint32_t reserved[8];
+};
+
+struct ras_query_interface_info_rsp {
+ uint32_t version;
+ uint32_t ras_cmd_major_ver;
+ uint32_t ras_cmd_minor_ver;
+ uint32_t plat_major_ver;
+ uint32_t plat_minor_ver;
+ uint8_t interface_type;
+ uint8_t rsv[3];
+ uint32_t reserved[8];
+};
+
+#define RAS_MAX_NUM_SAFE_RANGES 64
+struct ras_cmd_ras_safe_fb_address_ranges_rsp {
+ uint32_t version;
+ uint32_t num_ranges;
+ uint32_t reserved[4];
+ struct {
+ uint64_t start;
+ uint64_t size;
+ uint32_t idx;
+ uint32_t reserved[3];
+ } range[RAS_MAX_NUM_SAFE_RANGES];
+};
+
+enum ras_fb_addr_type {
+ RAS_FB_ADDR_SOC_PHY, /* SPA */
+ RAS_FB_ADDR_BANK,
+ RAS_FB_ADDR_VF_PHY, /* GPA */
+ RAS_FB_ADDR_UNKNOWN
+};
+
+struct ras_fb_bank_addr {
+ uint32_t stack_id; /* SID */
+ uint32_t bank_group;
+ uint32_t bank;
+ uint32_t row;
+ uint32_t column;
+ uint32_t channel;
+ uint32_t subchannel; /* Also called Pseudochannel (PC) */
+ uint32_t reserved[3];
+};
+
+struct ras_fb_vf_phy_addr {
+ uint32_t vf_idx;
+ uint32_t reserved;
+ uint64_t addr;
+};
+
+union ras_translate_fb_address {
+ struct ras_fb_bank_addr bank_addr;
+ uint64_t soc_phy_addr;
+ struct ras_fb_vf_phy_addr vf_phy_addr;
+};
+
+struct ras_cmd_translate_fb_address_req {
+ struct ras_cmd_dev_handle dev;
+ enum ras_fb_addr_type src_addr_type;
+ enum ras_fb_addr_type dest_addr_type;
+ union ras_translate_fb_address trans_addr;
+};
+
+struct ras_cmd_translate_fb_address_rsp {
+ uint32_t version;
+ uint32_t reserved[5];
+ union ras_translate_fb_address trans_addr;
+};
+
+struct ras_dev_link_topology_req {
+ struct ras_cmd_dev_handle src;
+ struct ras_cmd_dev_handle dst;
+};
+
+struct ras_dev_link_topology_rsp {
+ uint32_t version;
+ uint32_t link_status; /* HW status of the link */
+ uint32_t link_type; /* type of the link */
+ uint32_t num_hops; /* number of hops */
+ uint32_t reserved[8];
+};
+
+struct ras_cmd_cper_snapshot_req {
+ struct ras_cmd_dev_handle dev;
+};
+
+struct ras_cmd_cper_snapshot_rsp {
+ uint32_t version;
+ uint32_t reserved[4];
+ uint32_t total_cper_num;
+ uint64_t start_cper_id;
+ uint64_t latest_cper_id;
+};
+
+struct ras_cmd_cper_record_req {
+ struct ras_cmd_dev_handle dev;
+ uint64_t cper_start_id;
+ uint32_t cper_num;
+ uint32_t buf_size;
+ uint64_t buf_ptr;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_cper_record_rsp {
+ uint32_t version;
+ uint32_t real_data_size;
+ uint32_t real_cper_num;
+ uint32_t remain_num;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_batch_trace_snapshot_req {
+ struct ras_cmd_dev_handle dev;
+};
+
+struct ras_cmd_batch_trace_snapshot_rsp {
+ uint32_t version;
+ uint32_t reserved[4];
+ uint32_t total_batch_num;
+ uint64_t start_batch_id;
+ uint64_t latest_batch_id;
+};
+
+struct ras_cmd_batch_trace_record_req {
+ struct ras_cmd_dev_handle dev;
+ uint64_t start_batch_id;
+ uint32_t batch_num;
+ uint32_t reserved[5];
+};
+
+struct batch_ras_trace_info {
+ uint64_t batch_id;
+ uint16_t offset;
+ uint8_t trace_num;
+ uint8_t rsv;
+ uint32_t reserved;
+};
+
+#define RAS_CMD_MAX_BATCH_NUM 300
+#define RAS_CMD_MAX_TRACE_NUM 300
+struct ras_cmd_batch_trace_record_rsp {
+ uint32_t version;
+ uint16_t real_batch_num;
+ uint16_t remain_num;
+ uint64_t start_batch_id;
+ uint32_t reserved[2];
+ struct batch_ras_trace_info batchs[RAS_CMD_MAX_BATCH_NUM];
+ struct ras_log_info records[RAS_CMD_MAX_TRACE_NUM];
+};
+
+#pragma pack(pop)
+
+int ras_cmd_init(struct ras_core_context *ras_core);
+int ras_cmd_fini(struct ras_core_context *ras_core);
+int rascore_handle_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd, void *data);
+uint64_t ras_cmd_get_dev_handle(struct ras_core_context *ras_core);
+int ras_cmd_query_interface_info(struct ras_core_context *ras_core,
+ struct ras_query_interface_info_rsp *rsp);
+int ras_cmd_translate_soc_pa_to_bank(struct ras_core_context *ras_core,
+ uint64_t soc_pa, struct ras_fb_bank_addr *bank_addr);
+int ras_cmd_translate_bank_to_soc_pa(struct ras_core_context *ras_core,
+ struct ras_fb_bank_addr bank_addr, uint64_t *soc_pa);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_core.c b/drivers/gpu/drm/amd/ras/rascore/ras_core.c
new file mode 100644
index 000000000000..01122b55c98a
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_core.c
@@ -0,0 +1,603 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_core_status.h"
+
+#define RAS_SEQNO_FIFO_SIZE (128 * sizeof(uint64_t))
+
+#define IS_LEAP_YEAR(x) ((x % 4 == 0 && x % 100 != 0) || x % 400 == 0)
+
+static const char * const ras_block_name[] = {
+ "umc",
+ "sdma",
+ "gfx",
+ "mmhub",
+ "athub",
+ "pcie_bif",
+ "hdp",
+ "xgmi_wafl",
+ "df",
+ "smn",
+ "sem",
+ "mp0",
+ "mp1",
+ "fuse",
+ "mca",
+ "vcn",
+ "jpeg",
+ "ih",
+ "mpio",
+};
+
+const char *ras_core_get_ras_block_name(enum ras_block_id block_id)
+{
+ if (block_id >= ARRAY_SIZE(ras_block_name))
+ return "";
+
+ return ras_block_name[block_id];
+}
+
+int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core,
+ uint64_t timestamp, struct ras_time *tm)
+{
+ int days_in_month[] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+ uint64_t month = 0, day = 0, hour = 0, minute = 0, second = 0;
+ uint32_t year = 0;
+ int seconds_per_day = 24 * 60 * 60;
+ int seconds_per_hour = 60 * 60;
+ int seconds_per_minute = 60;
+ int days, remaining_seconds;
+
+ days = div64_u64_rem(timestamp, seconds_per_day, (uint64_t *)&remaining_seconds);
+
+ /* utc_timestamp follows the Unix epoch */
+ year = 1970;
+ while (days >= 365) {
+ if (IS_LEAP_YEAR(year)) {
+ if (days < 366)
+ break;
+ days -= 366;
+ } else {
+ days -= 365;
+ }
+ year++;
+ }
+
+ days_in_month[1] += IS_LEAP_YEAR(year);
+
+ month = 0;
+ while (days >= days_in_month[month]) {
+ days -= days_in_month[month];
+ month++;
+ }
+ month++;
+ day = days + 1;
+
+ if (remaining_seconds) {
+ hour = remaining_seconds / seconds_per_hour;
+ minute = (remaining_seconds % seconds_per_hour) / seconds_per_minute;
+ second = remaining_seconds % seconds_per_minute;
+ }
+
+ tm->tm_year = year;
+ tm->tm_mon = month;
+ tm->tm_mday = day;
+ tm->tm_hour = hour;
+ tm->tm_min = minute;
+ tm->tm_sec = second;
+
+ return 0;
+}
+
+bool ras_core_gpu_in_reset(struct ras_core_context *ras_core)
+{
+ uint32_t status = 0;
+
+ if (ras_core->sys_fn &&
+ ras_core->sys_fn->check_gpu_status)
+ ras_core->sys_fn->check_gpu_status(ras_core, &status);
+
+ return (status & RAS_GPU_STATUS__IN_RESET) ? true : false;
+}
+
+bool ras_core_gpu_is_vf(struct ras_core_context *ras_core)
+{
+ uint32_t status = 0;
+
+ if (ras_core->sys_fn &&
+ ras_core->sys_fn->check_gpu_status)
+ ras_core->sys_fn->check_gpu_status(ras_core, &status);
+
+ return (status & RAS_GPU_STATUS__IS_VF) ? true : false;
+}
+
+bool ras_core_gpu_is_rma(struct ras_core_context *ras_core)
+{
+ if (!ras_core)
+ return false;
+
+ return ras_core->is_rma;
+}
+
+static int ras_core_seqno_fifo_write(struct ras_core_context *ras_core,
+ enum ras_seqno_fifo fifo_type, uint64_t seqno)
+{
+ int ret = 0;
+ struct kfifo *seqno_fifo = NULL;
+
+ if (fifo_type == SEQNO_FIFO_POISON_CREATION)
+ seqno_fifo = &ras_core->de_seqno_fifo;
+ else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION)
+ seqno_fifo = &ras_core->consumption_seqno_fifo;
+
+ if (seqno_fifo)
+ ret = kfifo_in_spinlocked(seqno_fifo,
+ &seqno, sizeof(seqno), &ras_core->seqno_lock);
+
+ return ret ? 0 : -EINVAL;
+}
+
+static int ras_core_seqno_fifo_read(struct ras_core_context *ras_core,
+ enum ras_seqno_fifo fifo_type, uint64_t *seqno, bool pop)
+{
+ int ret = 0;
+ struct kfifo *seqno_fifo = NULL;
+
+ if (fifo_type == SEQNO_FIFO_POISON_CREATION)
+ seqno_fifo = &ras_core->de_seqno_fifo;
+ else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION)
+ seqno_fifo = &ras_core->consumption_seqno_fifo;
+
+ if (seqno_fifo) {
+ if (pop)
+ ret = kfifo_out_spinlocked(seqno_fifo,
+ seqno, sizeof(*seqno), &ras_core->seqno_lock);
+ else
+ ret = kfifo_out_peek(seqno_fifo, seqno, sizeof(*seqno));
+ }
+
+ return ret ? 0 : -EINVAL;
+}
+
+uint64_t ras_core_gen_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type type)
+{
+ uint64_t seqno = 0;
+
+ if (ras_core->sys_fn &&
+ ras_core->sys_fn->gen_seqno)
+ ras_core->sys_fn->gen_seqno(ras_core, type, &seqno);
+
+ return seqno;
+}
+
+int ras_core_put_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t seqno)
+{
+ int ret = 0;
+
+ if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)
+ return -EINVAL;
+
+ if (seqno_type == RAS_SEQNO_TYPE_DE)
+ ret = ras_core_seqno_fifo_write(ras_core,
+ SEQNO_FIFO_POISON_CREATION, seqno);
+ else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)
+ ret = ras_core_seqno_fifo_write(ras_core,
+ SEQNO_FIFO_POISON_CONSUMPTION, seqno);
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+uint64_t ras_core_get_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, bool pop)
+{
+ uint64_t seq_no;
+ int ret = -ENODATA;
+
+ if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)
+ return 0;
+
+ if (seqno_type == RAS_SEQNO_TYPE_DE)
+ ret = ras_core_seqno_fifo_read(ras_core,
+ SEQNO_FIFO_POISON_CREATION, &seq_no, pop);
+ else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)
+ ret = ras_core_seqno_fifo_read(ras_core,
+ SEQNO_FIFO_POISON_CONSUMPTION, &seq_no, pop);
+
+ if (ret)
+ seq_no = ras_core_gen_seqno(ras_core, seqno_type);
+
+ return seq_no;
+}
+
+static int ras_core_eeprom_recovery(struct ras_core_context *ras_core)
+{
+ int count;
+ int ret;
+
+ count = ras_eeprom_get_record_count(ras_core);
+ if (!count)
+ return 0;
+
+ /* Avoid bad page to be loaded again after gpu reset */
+ if (ras_umc_get_saved_eeprom_count(ras_core) >= count)
+ return 0;
+
+ ret = ras_umc_load_bad_pages(ras_core);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "ras_umc_load_bad_pages failed: %d\n", ret);
+ return ret;
+ }
+
+ ras_eeprom_sync_info(ras_core);
+
+ return ret;
+}
+
+struct ras_core_context *ras_core_create(struct ras_core_config *init_config)
+{
+ struct ras_core_context *ras_core;
+ struct ras_core_config *config;
+
+ ras_core = kzalloc(sizeof(*ras_core), GFP_KERNEL);
+ if (!ras_core)
+ return NULL;
+
+ config = kzalloc(sizeof(*config), GFP_KERNEL);
+ if (!config) {
+ kfree(ras_core);
+ return NULL;
+ }
+
+ memcpy(config, init_config, sizeof(*config));
+ ras_core->config = config;
+
+ return ras_core;
+}
+
+void ras_core_destroy(struct ras_core_context *ras_core)
+{
+ if (ras_core)
+ kfree(ras_core->config);
+
+ kfree(ras_core);
+}
+
+int ras_core_sw_init(struct ras_core_context *ras_core)
+{
+ int ret;
+
+ if (!ras_core->config) {
+ RAS_DEV_ERR(ras_core->dev, "No ras core config!\n");
+ return -EINVAL;
+ }
+
+ ras_core->sys_fn = ras_core->config->sys_fn;
+ if (!ras_core->sys_fn)
+ return -EINVAL;
+
+ ret = kfifo_alloc(&ras_core->de_seqno_fifo,
+ RAS_SEQNO_FIFO_SIZE, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ ret = kfifo_alloc(&ras_core->consumption_seqno_fifo,
+ RAS_SEQNO_FIFO_SIZE, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&ras_core->seqno_lock);
+
+ ret = ras_aca_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_umc_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_cmd_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_log_ring_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_psp_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int ras_core_sw_fini(struct ras_core_context *ras_core)
+{
+ kfifo_free(&ras_core->de_seqno_fifo);
+ kfifo_free(&ras_core->consumption_seqno_fifo);
+
+ ras_psp_sw_fini(ras_core);
+ ras_log_ring_sw_fini(ras_core);
+ ras_cmd_fini(ras_core);
+ ras_umc_sw_fini(ras_core);
+ ras_aca_sw_fini(ras_core);
+
+ return 0;
+}
+
+int ras_core_hw_init(struct ras_core_context *ras_core)
+{
+ int ret;
+
+ ras_core->ras_eeprom_supported =
+ ras_core->config->ras_eeprom_supported;
+
+ ras_core->poison_supported = ras_core->config->poison_supported;
+
+ ret = ras_psp_hw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_aca_hw_init(ras_core);
+ if (ret)
+ goto init_err1;
+
+ ret = ras_mp1_hw_init(ras_core);
+ if (ret)
+ goto init_err2;
+
+ ret = ras_nbio_hw_init(ras_core);
+ if (ret)
+ goto init_err3;
+
+ ret = ras_umc_hw_init(ras_core);
+ if (ret)
+ goto init_err4;
+
+ ret = ras_gfx_hw_init(ras_core);
+ if (ret)
+ goto init_err5;
+
+ ret = ras_eeprom_hw_init(ras_core);
+ if (ret)
+ goto init_err6;
+
+ ret = ras_core_eeprom_recovery(ras_core);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to recovery ras core, ret:%d\n", ret);
+ goto init_err6;
+ }
+
+ ret = ras_eeprom_check_storage_status(ras_core);
+ if (ret)
+ goto init_err6;
+
+ ret = ras_process_init(ras_core);
+ if (ret)
+ goto init_err7;
+
+ ras_core->is_initialized = true;
+
+ return 0;
+
+init_err7:
+ ras_eeprom_hw_fini(ras_core);
+init_err6:
+ ras_gfx_hw_fini(ras_core);
+init_err5:
+ ras_umc_hw_fini(ras_core);
+init_err4:
+ ras_nbio_hw_fini(ras_core);
+init_err3:
+ ras_mp1_hw_fini(ras_core);
+init_err2:
+ ras_aca_hw_fini(ras_core);
+init_err1:
+ ras_psp_hw_fini(ras_core);
+ return ret;
+}
+
+int ras_core_hw_fini(struct ras_core_context *ras_core)
+{
+ ras_core->is_initialized = false;
+
+ ras_process_fini(ras_core);
+ ras_eeprom_hw_fini(ras_core);
+ ras_gfx_hw_fini(ras_core);
+ ras_nbio_hw_fini(ras_core);
+ ras_umc_hw_fini(ras_core);
+ ras_mp1_hw_fini(ras_core);
+ ras_aca_hw_fini(ras_core);
+ ras_psp_hw_fini(ras_core);
+
+ return 0;
+}
+
+bool ras_core_handle_nbio_irq(struct ras_core_context *ras_core, void *data)
+{
+ return ras_nbio_handle_irq_error(ras_core, data);
+}
+
+int ras_core_handle_fatal_error(struct ras_core_context *ras_core)
+{
+ int ret = 0;
+
+ ras_aca_mark_fatal_flag(ras_core);
+
+ ret = ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__FATAL_ERROR_DETECTED, NULL);
+
+ return ret;
+}
+
+uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core)
+{
+ if (ras_core->ras_nbio.ip_func &&
+ ras_core->ras_nbio.ip_func->get_memory_partition_mode)
+ return ras_core->ras_nbio.ip_func->get_memory_partition_mode(ras_core);
+
+ RAS_DEV_ERR(ras_core->dev, "Failed to get gpu memory nps mode!\n");
+ return 0;
+}
+
+int ras_core_update_ecc_info(struct ras_core_context *ras_core)
+{
+ int ret;
+
+ ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__CE, NULL);
+ if (!ret)
+ ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__UE, NULL);
+
+ return ret;
+}
+
+int ras_core_query_block_ecc_data(struct ras_core_context *ras_core,
+ enum ras_block_id block, struct ras_ecc_count *ecc_count)
+{
+ int ret;
+
+ if (!ecc_count || (block >= RAS_BLOCK_ID__LAST) || !ras_core)
+ return -EINVAL;
+
+ ret = ras_aca_get_block_ecc_count(ras_core, block, ecc_count);
+ if (!ret)
+ ras_aca_clear_block_new_ecc_count(ras_core, block);
+
+ return ret;
+}
+
+int ras_core_set_status(struct ras_core_context *ras_core, bool enable)
+{
+ ras_core->ras_core_enabled = enable;
+
+ return 0;
+}
+
+bool ras_core_is_enabled(struct ras_core_context *ras_core)
+{
+ return ras_core->ras_core_enabled;
+}
+
+uint64_t ras_core_get_utc_second_timestamp(struct ras_core_context *ras_core)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->get_utc_second_timestamp)
+ return ras_core->sys_fn->get_utc_second_timestamp(ras_core);
+
+ RAS_DEV_ERR(ras_core->dev, "Failed to get system timestamp!\n");
+ return 0;
+}
+
+int ras_core_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa)
+{
+ if (!ras_core || !soc_pa || !bank_addr)
+ return -EINVAL;
+
+ return ras_umc_translate_soc_pa_and_bank(ras_core, soc_pa, bank_addr, bank_to_pa);
+}
+
+bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->detect_ras_interrupt)
+ return ras_core->sys_fn->detect_ras_interrupt(ras_core);
+
+ RAS_DEV_ERR(ras_core->dev, "Failed to detect ras interrupt!\n");
+ return false;
+}
+
+int ras_core_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->get_gpu_mem)
+ return ras_core->sys_fn->get_gpu_mem(ras_core, mem_type, gpu_mem);
+
+ RAS_DEV_ERR(ras_core->dev, "Not config get gpu memory API!\n");
+ return -EACCES;
+}
+
+int ras_core_put_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->put_gpu_mem)
+ return ras_core->sys_fn->put_gpu_mem(ras_core, mem_type, gpu_mem);
+
+ RAS_DEV_ERR(ras_core->dev, "Not config put gpu memory API!!\n");
+ return -EACCES;
+}
+
+bool ras_core_is_ready(struct ras_core_context *ras_core)
+{
+ return ras_core ? ras_core->is_initialized : false;
+}
+
+bool ras_core_check_safety_watermark(struct ras_core_context *ras_core)
+{
+ return ras_eeprom_check_safety_watermark(ras_core);
+}
+
+int ras_core_down_trylock_gpu_reset_lock(struct ras_core_context *ras_core)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock)
+ return ras_core->sys_fn->gpu_reset_lock(ras_core, true, true);
+
+ return 1;
+}
+
+void ras_core_down_gpu_reset_lock(struct ras_core_context *ras_core)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock)
+ ras_core->sys_fn->gpu_reset_lock(ras_core, true, false);
+}
+
+void ras_core_up_gpu_reset_lock(struct ras_core_context *ras_core)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock)
+ ras_core->sys_fn->gpu_reset_lock(ras_core, false, false);
+}
+
+int ras_core_event_notify(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->ras_notifier)
+ return ras_core->sys_fn->ras_notifier(ras_core, event_id, data);
+
+ return -RAS_CORE_NOT_SUPPORTED;
+}
+
+int ras_core_get_device_system_info(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->get_device_system_info)
+ return ras_core->sys_fn->get_device_system_info(ras_core, dev_info);
+
+ return -RAS_CORE_NOT_SUPPORTED;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cper.c b/drivers/gpu/drm/amd/ras/rascore/ras_cper.c
new file mode 100644
index 000000000000..0fc7522b7ab6
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cper.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_core_status.h"
+#include "ras_log_ring.h"
+#include "ras_cper.h"
+
+static const struct ras_cper_guid MCE = CPER_NOTIFY__MCE;
+static const struct ras_cper_guid CMC = CPER_NOTIFY__CMC;
+static const struct ras_cper_guid BOOT = BOOT__TYPE;
+
+static const struct ras_cper_guid CRASHDUMP = GPU__CRASHDUMP;
+static const struct ras_cper_guid RUNTIME = GPU__NONSTANDARD_ERROR;
+
+static void cper_get_timestamp(struct ras_core_context *ras_core,
+ struct ras_cper_timestamp *timestamp, uint64_t utc_second_timestamp)
+{
+ struct ras_time tm = {0};
+
+ ras_core_convert_timestamp_to_time(ras_core, utc_second_timestamp, &tm);
+ timestamp->seconds = tm.tm_sec;
+ timestamp->minutes = tm.tm_min;
+ timestamp->hours = tm.tm_hour;
+ timestamp->flag = 0;
+ timestamp->day = tm.tm_mday;
+ timestamp->month = tm.tm_mon;
+ timestamp->year = tm.tm_year % 100;
+ timestamp->century = tm.tm_year / 100;
+}
+
+static void fill_section_hdr(struct ras_core_context *ras_core,
+ struct cper_section_hdr *hdr, enum ras_cper_type type,
+ enum ras_cper_severity sev, struct ras_log_info *trace)
+{
+ struct device_system_info dev_info = {0};
+ char record_id[32];
+
+ hdr->signature[0] = 'C';
+ hdr->signature[1] = 'P';
+ hdr->signature[2] = 'E';
+ hdr->signature[3] = 'R';
+ hdr->revision = CPER_HDR__REV_1;
+ hdr->signature_end = 0xFFFFFFFF;
+ hdr->error_severity = (sev == RAS_CPER_SEV_RMA ? RAS_CPER_SEV_FATAL_UE : sev);
+
+ hdr->valid_bits.platform_id = 1;
+ hdr->valid_bits.timestamp = 1;
+
+ ras_core_get_device_system_info(ras_core, &dev_info);
+
+ cper_get_timestamp(ras_core, &hdr->timestamp, trace->timestamp);
+
+ snprintf(record_id, sizeof(record_id), "%d:%llX", dev_info.socket_id,
+ RAS_LOG_SEQNO_TO_BATCH_IDX(trace->seqno));
+ memcpy(hdr->record_id, record_id, 8);
+
+ snprintf(hdr->platform_id, 16, "0x%04X:0x%04X",
+ dev_info.vendor_id, dev_info.device_id);
+ /* pmfw version should be part of creator_id according to CPER spec */
+ snprintf(hdr->creator_id, 16, "%s", CPER_CREATOR_ID__AMDGPU);
+
+ switch (type) {
+ case RAS_CPER_TYPE_BOOT:
+ hdr->notify_type = BOOT;
+ break;
+ case RAS_CPER_TYPE_FATAL:
+ case RAS_CPER_TYPE_RMA:
+ hdr->notify_type = MCE;
+ break;
+ case RAS_CPER_TYPE_RUNTIME:
+ if (sev == RAS_CPER_SEV_NON_FATAL_CE)
+ hdr->notify_type = CMC;
+ else
+ hdr->notify_type = MCE;
+ break;
+ default:
+ RAS_DEV_ERR(ras_core->dev, "Unknown CPER Type\n");
+ break;
+ }
+}
+
+static int fill_section_descriptor(struct ras_core_context *ras_core,
+ struct cper_section_descriptor *descriptor,
+ enum ras_cper_severity sev,
+ struct ras_cper_guid sec_type,
+ uint32_t section_offset,
+ uint32_t section_length)
+{
+ struct device_system_info dev_info = {0};
+
+ descriptor->revision_minor = CPER_SEC__MINOR_REV_1;
+ descriptor->revision_major = CPER_SEC__MAJOR_REV_22;
+ descriptor->sec_offset = section_offset;
+ descriptor->sec_length = section_length;
+ descriptor->valid_bits.fru_text = 1;
+ descriptor->flag_bits.primary = 1;
+ descriptor->severity = (sev == RAS_CPER_SEV_RMA ? RAS_CPER_SEV_FATAL_UE : sev);
+ descriptor->sec_type = sec_type;
+
+ ras_core_get_device_system_info(ras_core, &dev_info);
+
+ snprintf(descriptor->fru_text, 20, "OAM%d", dev_info.socket_id);
+
+ if (sev == RAS_CPER_SEV_RMA)
+ descriptor->flag_bits.exceed_err_threshold = 1;
+
+ if (sev == RAS_CPER_SEV_NON_FATAL_UE)
+ descriptor->flag_bits.latent_err = 1;
+
+ return 0;
+}
+
+static int fill_section_fatal(struct ras_core_context *ras_core,
+ struct cper_section_fatal *fatal, struct ras_log_info *trace)
+{
+ fatal->data.reg_ctx_type = CPER_CTX_TYPE__CRASH;
+ fatal->data.reg_arr_size = sizeof(fatal->data.reg);
+
+ fatal->data.reg.status = trace->aca_reg.regs[RAS_CPER_ACA_REG_STATUS];
+ fatal->data.reg.addr = trace->aca_reg.regs[RAS_CPER_ACA_REG_ADDR];
+ fatal->data.reg.ipid = trace->aca_reg.regs[RAS_CPER_ACA_REG_IPID];
+ fatal->data.reg.synd = trace->aca_reg.regs[RAS_CPER_ACA_REG_SYND];
+
+ return 0;
+}
+
+static int fill_section_runtime(struct ras_core_context *ras_core,
+ struct cper_section_runtime *runtime, struct ras_log_info *trace,
+ enum ras_cper_severity sev)
+{
+ runtime->hdr.valid_bits.err_info_cnt = 1;
+ runtime->hdr.valid_bits.err_context_cnt = 1;
+
+ runtime->descriptor.error_type = RUNTIME;
+ runtime->descriptor.ms_chk_bits.err_type_valid = 1;
+ if (sev == RAS_CPER_SEV_RMA) {
+ runtime->descriptor.valid_bits.ms_chk = 1;
+ runtime->descriptor.ms_chk_bits.err_type = 1;
+ runtime->descriptor.ms_chk_bits.pcc = 1;
+ }
+
+ runtime->reg.reg_ctx_type = CPER_CTX_TYPE__CRASH;
+ runtime->reg.reg_arr_size = sizeof(runtime->reg.reg_dump);
+
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_CTL] = trace->aca_reg.regs[ACA_REG_IDX__CTL];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_STATUS] = trace->aca_reg.regs[ACA_REG_IDX__STATUS];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_ADDR] = trace->aca_reg.regs[ACA_REG_IDX__ADDR];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_MISC0] = trace->aca_reg.regs[ACA_REG_IDX__MISC0];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_CONFIG] = trace->aca_reg.regs[ACA_REG_IDX__CONFG];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_IPID] = trace->aca_reg.regs[ACA_REG_IDX__IPID];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_SYND] = trace->aca_reg.regs[ACA_REG_IDX__SYND];
+
+ return 0;
+}
+
+static int cper_generate_runtime_record(struct ras_core_context *ras_core,
+ struct cper_section_hdr *hdr, struct ras_log_info **trace_arr, uint32_t arr_num,
+ enum ras_cper_severity sev)
+{
+ struct cper_section_descriptor *descriptor;
+ struct cper_section_runtime *runtime;
+ int i;
+
+ fill_section_hdr(ras_core, hdr, RAS_CPER_TYPE_RUNTIME, sev, trace_arr[0]);
+ hdr->record_length = RAS_HDR_LEN + ((RAS_SEC_DESC_LEN + RAS_NONSTD_SEC_LEN) * arr_num);
+ hdr->sec_cnt = arr_num;
+ for (i = 0; i < arr_num; i++) {
+ descriptor = (struct cper_section_descriptor *)((uint8_t *)hdr +
+ RAS_SEC_DESC_OFFSET(i));
+ runtime = (struct cper_section_runtime *)((uint8_t *)hdr +
+ RAS_NONSTD_SEC_OFFSET(hdr->sec_cnt, i));
+
+ fill_section_descriptor(ras_core, descriptor, sev, RUNTIME,
+ RAS_NONSTD_SEC_OFFSET(hdr->sec_cnt, i),
+ sizeof(struct cper_section_runtime));
+ fill_section_runtime(ras_core, runtime, trace_arr[i], sev);
+ }
+
+ return 0;
+}
+
+static int cper_generate_fatal_record(struct ras_core_context *ras_core,
+ uint8_t *buffer, struct ras_log_info **trace_arr, uint32_t arr_num)
+{
+ struct ras_cper_fatal_record record = {0};
+ int i = 0;
+
+ for (i = 0; i < arr_num; i++) {
+ fill_section_hdr(ras_core, &record.hdr, RAS_CPER_TYPE_FATAL,
+ RAS_CPER_SEV_FATAL_UE, trace_arr[i]);
+ record.hdr.record_length = RAS_HDR_LEN + RAS_SEC_DESC_LEN + RAS_FATAL_SEC_LEN;
+ record.hdr.sec_cnt = 1;
+
+ fill_section_descriptor(ras_core, &record.descriptor, RAS_CPER_SEV_FATAL_UE,
+ CRASHDUMP, offsetof(struct ras_cper_fatal_record, fatal),
+ sizeof(struct cper_section_fatal));
+
+ fill_section_fatal(ras_core, &record.fatal, trace_arr[i]);
+
+ memcpy(buffer + (i * record.hdr.record_length),
+ &record, record.hdr.record_length);
+ }
+
+ return 0;
+}
+
+static int cper_get_record_size(enum ras_cper_type type, uint16_t section_count)
+{
+ int size = 0;
+
+ size += RAS_HDR_LEN;
+ size += (RAS_SEC_DESC_LEN * section_count);
+
+ switch (type) {
+ case RAS_CPER_TYPE_RUNTIME:
+ case RAS_CPER_TYPE_RMA:
+ size += (RAS_NONSTD_SEC_LEN * section_count);
+ break;
+ case RAS_CPER_TYPE_FATAL:
+ size += (RAS_FATAL_SEC_LEN * section_count);
+ size += (RAS_HDR_LEN * (section_count - 1));
+ break;
+ case RAS_CPER_TYPE_BOOT:
+ size += (RAS_BOOT_SEC_LEN * section_count);
+ break;
+ default:
+ /* should never reach here */
+ break;
+ }
+
+ return size;
+}
+
+static enum ras_cper_type cper_ras_log_event_to_cper_type(enum ras_log_event event)
+{
+ switch (event) {
+ case RAS_LOG_EVENT_UE:
+ return RAS_CPER_TYPE_FATAL;
+ case RAS_LOG_EVENT_DE:
+ case RAS_LOG_EVENT_CE:
+ case RAS_LOG_EVENT_POISON_CREATION:
+ case RAS_LOG_EVENT_POISON_CONSUMPTION:
+ return RAS_CPER_TYPE_RUNTIME;
+ case RAS_LOG_EVENT_RMA:
+ return RAS_CPER_TYPE_RMA;
+ default:
+ /* should never reach here */
+ return RAS_CPER_TYPE_RUNTIME;
+ }
+}
+
+int ras_cper_generate_cper(struct ras_core_context *ras_core,
+ struct ras_log_info **trace_list, uint32_t count,
+ uint8_t *buf, uint32_t buf_len, uint32_t *real_data_len)
+{
+ uint8_t *buffer = buf;
+ uint64_t buf_size = buf_len;
+ int record_size, saved_size = 0;
+ struct cper_section_hdr *hdr;
+
+ /* All the batch traces share the same event */
+ record_size = cper_get_record_size(
+ cper_ras_log_event_to_cper_type(trace_list[0]->event), count);
+
+ if ((record_size + saved_size) > buf_size)
+ return -ENOMEM;
+
+ hdr = (struct cper_section_hdr *)(buffer + saved_size);
+
+ switch (trace_list[0]->event) {
+ case RAS_LOG_EVENT_RMA:
+ cper_generate_runtime_record(ras_core, hdr, trace_list, count, RAS_CPER_SEV_RMA);
+ break;
+ case RAS_LOG_EVENT_DE:
+ cper_generate_runtime_record(ras_core,
+ hdr, trace_list, count, RAS_CPER_SEV_NON_FATAL_UE);
+ break;
+ case RAS_LOG_EVENT_CE:
+ cper_generate_runtime_record(ras_core,
+ hdr, trace_list, count, RAS_CPER_SEV_NON_FATAL_CE);
+ break;
+ case RAS_LOG_EVENT_UE:
+ cper_generate_fatal_record(ras_core, buffer + saved_size, trace_list, count);
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev, "Unprocessed trace event: %d\n", trace_list[0]->event);
+ break;
+ }
+
+ saved_size += record_size;
+
+ *real_data_len = saved_size;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cper.h b/drivers/gpu/drm/amd/ras/rascore/ras_cper.h
new file mode 100644
index 000000000000..076c1883c1ce
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cper.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_CPER_H__
+#define __RAS_CPER_H__
+
+#define CPER_UUID_MAX_SIZE 16
+struct ras_cper_guid {
+ uint8_t b[CPER_UUID_MAX_SIZE];
+};
+
+#define CPER_GUID__INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+ ((struct ras_cper_guid) \
+ {{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+ (b) & 0xff, ((b) >> 8) & 0xff, \
+ (c) & 0xff, ((c) >> 8) & 0xff, \
+ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
+
+#define CPER_HDR__REV_1 (0x100)
+#define CPER_SEC__MINOR_REV_1 (0x01)
+#define CPER_SEC__MAJOR_REV_22 (0x22)
+#define CPER_OAM_MAX_COUNT (8)
+
+#define CPER_CTX_TYPE__CRASH (1)
+#define CPER_CTX_TYPE__BOOT (9)
+
+#define CPER_CREATOR_ID__AMDGPU "amdgpu"
+
+#define CPER_NOTIFY__MCE \
+ CPER_GUID__INIT(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
+ 0xE1, 0x49, 0x13, 0xBB)
+#define CPER_NOTIFY__CMC \
+ CPER_GUID__INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
+ 0xEB, 0xD4, 0xF8, 0x90)
+#define BOOT__TYPE \
+ CPER_GUID__INIT(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
+ 0xD4, 0x64, 0xB3, 0x8F)
+
+#define GPU__CRASHDUMP \
+ CPER_GUID__INIT(0x32AC0C78, 0x2623, 0x48F6, 0xB0, 0xD0, 0x73, 0x65, \
+ 0x72, 0x5F, 0xD6, 0xAE)
+#define GPU__NONSTANDARD_ERROR \
+ CPER_GUID__INIT(0x32AC0C78, 0x2623, 0x48F6, 0x81, 0xA2, 0xAC, 0x69, \
+ 0x17, 0x80, 0x55, 0x1D)
+#define PROC_ERR__SECTION_TYPE \
+ CPER_GUID__INIT(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
+ 0x24, 0x2B, 0x6E, 0x1D)
+
+enum ras_cper_type {
+ RAS_CPER_TYPE_RUNTIME,
+ RAS_CPER_TYPE_FATAL,
+ RAS_CPER_TYPE_BOOT,
+ RAS_CPER_TYPE_RMA,
+};
+
+enum ras_cper_severity {
+ RAS_CPER_SEV_NON_FATAL_UE = 0,
+ RAS_CPER_SEV_FATAL_UE = 1,
+ RAS_CPER_SEV_NON_FATAL_CE = 2,
+ RAS_CPER_SEV_RMA = 3,
+
+ RAS_CPER_SEV_UNUSED = 10,
+};
+
+enum ras_cper_aca_reg {
+ RAS_CPER_ACA_REG_CTL = 0,
+ RAS_CPER_ACA_REG_STATUS = 1,
+ RAS_CPER_ACA_REG_ADDR = 2,
+ RAS_CPER_ACA_REG_MISC0 = 3,
+ RAS_CPER_ACA_REG_CONFIG = 4,
+ RAS_CPER_ACA_REG_IPID = 5,
+ RAS_CPER_ACA_REG_SYND = 6,
+ RAS_CPER_ACA_REG_DESTAT = 8,
+ RAS_CPER_ACA_REG_DEADDR = 9,
+ RAS_CPER_ACA_REG_MASK = 10,
+
+ RAS_CPER_ACA_REG_COUNT = 16,
+};
+
+#pragma pack(push, 1)
+
+struct ras_cper_timestamp {
+ uint8_t seconds;
+ uint8_t minutes;
+ uint8_t hours;
+ uint8_t flag;
+ uint8_t day;
+ uint8_t month;
+ uint8_t year;
+ uint8_t century;
+};
+
+struct cper_section_hdr {
+ char signature[4]; /* "CPER" */
+ uint16_t revision;
+ uint32_t signature_end; /* 0xFFFFFFFF */
+ uint16_t sec_cnt;
+ enum ras_cper_severity error_severity;
+ union {
+ struct {
+ uint32_t platform_id : 1;
+ uint32_t timestamp : 1;
+ uint32_t partition_id : 1;
+ uint32_t reserved : 29;
+ } valid_bits;
+ uint32_t valid_mask;
+ };
+ uint32_t record_length; /* Total size of CPER Entry */
+ struct ras_cper_timestamp timestamp;
+ char platform_id[16];
+ struct ras_cper_guid partition_id; /* Reserved */
+ char creator_id[16];
+ struct ras_cper_guid notify_type; /* CMC, MCE */
+ char record_id[8]; /* Unique CPER Entry ID */
+ uint32_t flags; /* Reserved */
+ uint64_t persistence_info; /* Reserved */
+ uint8_t reserved[12]; /* Reserved */
+};
+
+struct cper_section_descriptor {
+ uint32_t sec_offset; /* Offset from the start of CPER entry */
+ uint32_t sec_length;
+ uint8_t revision_minor; /* CPER_SEC_MINOR_REV_1 */
+ uint8_t revision_major; /* CPER_SEC_MAJOR_REV_22 */
+ union {
+ struct {
+ uint8_t fru_id : 1;
+ uint8_t fru_text : 1;
+ uint8_t reserved : 6;
+ } valid_bits;
+ uint8_t valid_mask;
+ };
+ uint8_t reserved;
+ union {
+ struct {
+ uint32_t primary : 1;
+ uint32_t reserved1 : 2;
+ uint32_t exceed_err_threshold : 1;
+ uint32_t latent_err : 1;
+ uint32_t reserved2 : 27;
+ } flag_bits;
+ uint32_t flag_mask;
+ };
+ struct ras_cper_guid sec_type;
+ char fru_id[16];
+ enum ras_cper_severity severity;
+ char fru_text[20];
+};
+
+struct runtime_hdr {
+ union {
+ struct {
+ uint64_t apic_id : 1;
+ uint64_t fw_id : 1;
+ uint64_t err_info_cnt : 6;
+ uint64_t err_context_cnt : 6;
+ } valid_bits;
+ uint64_t valid_mask;
+ };
+ uint64_t apic_id;
+ char fw_id[48];
+};
+
+struct runtime_descriptor {
+ struct ras_cper_guid error_type;
+ union {
+ struct {
+ uint64_t ms_chk : 1;
+ uint64_t target_addr_id : 1;
+ uint64_t req_id : 1;
+ uint64_t resp_id : 1;
+ uint64_t instr_ptr : 1;
+ uint64_t reserved : 59;
+ } valid_bits;
+ uint64_t valid_mask;
+ };
+ union {
+ struct {
+ uint64_t err_type_valid : 1;
+ uint64_t pcc_valid : 1;
+ uint64_t uncorr_valid : 1;
+ uint64_t precise_ip_valid : 1;
+ uint64_t restartable_ip_valid : 1;
+ uint64_t overflow_valid : 1;
+ uint64_t reserved1 : 10;
+ uint64_t err_type : 2;
+ uint64_t pcc : 1;
+ uint64_t uncorr : 1;
+ uint64_t precised_ip : 1;
+ uint64_t restartable_ip : 1;
+ uint64_t overflow : 1;
+ uint64_t reserved2 : 41;
+ } ms_chk_bits;
+ uint64_t ms_chk_mask;
+ };
+ uint64_t target_addr_id;
+ uint64_t req_id;
+ uint64_t resp_id;
+ uint64_t instr_ptr;
+};
+
+struct runtime_error_reg {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t msr_addr;
+ uint64_t mm_reg_addr;
+ uint64_t reg_dump[RAS_CPER_ACA_REG_COUNT];
+};
+
+struct cper_section_runtime {
+ struct runtime_hdr hdr;
+ struct runtime_descriptor descriptor;
+ struct runtime_error_reg reg;
+};
+
+struct crashdump_hdr {
+ uint64_t reserved1;
+ uint64_t reserved2;
+ char fw_id[48];
+ uint64_t reserved3[8];
+};
+
+struct fatal_reg_info {
+ uint64_t status;
+ uint64_t addr;
+ uint64_t ipid;
+ uint64_t synd;
+};
+
+struct crashdump_fatal {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ struct fatal_reg_info reg;
+};
+
+struct crashdump_boot {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ uint64_t msg[CPER_OAM_MAX_COUNT];
+};
+
+struct cper_section_fatal {
+ struct crashdump_hdr hdr;
+ struct crashdump_fatal data;
+};
+
+struct cper_section_boot {
+ struct crashdump_hdr hdr;
+ struct crashdump_boot data;
+};
+
+struct ras_cper_fatal_record {
+ struct cper_section_hdr hdr;
+ struct cper_section_descriptor descriptor;
+ struct cper_section_fatal fatal;
+};
+#pragma pack(pop)
+
+#define RAS_HDR_LEN (sizeof(struct cper_section_hdr))
+#define RAS_SEC_DESC_LEN (sizeof(struct cper_sec_desc))
+
+#define RAS_BOOT_SEC_LEN (sizeof(struct cper_sec_crashdump_boot))
+#define RAS_FATAL_SEC_LEN (sizeof(struct cper_sec_crashdump_fatal))
+#define RAS_NONSTD_SEC_LEN (sizeof(struct cper_sec_nonstd_err))
+
+#define RAS_SEC_DESC_OFFSET(idx) (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * idx))
+
+#define RAS_BOOT_SEC_OFFSET(count, idx) \
+ (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_BOOT_SEC_LEN * idx))
+#define RAS_FATAL_SEC_OFFSET(count, idx) \
+ (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_FATAL_SEC_LEN * idx))
+#define RAS_NONSTD_SEC_OFFSET(count, idx) \
+ (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_NONSTD_SEC_LEN * idx))
+
+struct ras_core_context;
+struct ras_log_info;
+int ras_cper_generate_cper(struct ras_core_context *ras_core,
+ struct ras_log_info **trace_list, uint32_t count,
+ uint8_t *buf, uint32_t buf_len, uint32_t *real_data_len);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c
new file mode 100644
index 000000000000..cd6b057bdaf3
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c
@@ -0,0 +1,1339 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras_eeprom.h"
+#include "ras.h"
+
+/* These are memory addresses as would be seen by one or more EEPROM
+ * chips strung on the I2C bus, usually by manipulating pins 1-3 of a
+ * set of EEPROM devices. They form a continuous memory space.
+ *
+ * The I2C device address includes the device type identifier, 1010b,
+ * which is a reserved value and indicates that this is an I2C EEPROM
+ * device. It also includes the top 3 bits of the 19 bit EEPROM memory
+ * address, namely bits 18, 17, and 16. This makes up the 7 bit
+ * address sent on the I2C bus with bit 0 being the direction bit,
+ * which is not represented here, and sent by the hardware directly.
+ *
+ * For instance,
+ * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0.
+ * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h.
+ * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h.
+ * Depending on the size of the I2C EEPROM device(s), bits 18:16 may
+ * address memory in a device or a device on the I2C bus, depending on
+ * the status of pins 1-3.
+ *
+ * The RAS table lives either at address 0 or address 40000h of EEPROM.
+ */
+#define EEPROM_I2C_MADDR_0 0x0
+#define EEPROM_I2C_MADDR_4 0x40000
+
+#define EEPROM_PAGE_BITS 8
+#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS)
+#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1)
+
+#define EEPROM_OFFSET_SIZE 2
+#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF))
+
+/*
+ * The 2 macros bellow represent the actual size in bytes that
+ * those entities occupy in the EEPROM memory.
+ * RAS_TABLE_RECORD_SIZE is different than sizeof(eeprom_umc_record) which
+ * uses uint64 to store 6b fields such as retired_page.
+ */
+#define RAS_TABLE_HEADER_SIZE 20
+#define RAS_TABLE_RECORD_SIZE 24
+
+/* Table hdr is 'AMDR' */
+#define RAS_TABLE_HDR_VAL 0x414d4452
+
+/* Bad GPU tag ‘BADG’ */
+#define RAS_TABLE_HDR_BAD 0x42414447
+
+/*
+ * EEPROM Table structure v1
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE HEADER |
+ * | ( size 20 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | BAD PAGE RECORD AREA |
+ * | |
+ * ---------------------------------
+ */
+
+/* Assume 2-Mbit size EEPROM and take up the whole space. */
+#define RAS_TBL_SIZE_BYTES (256 * 1024)
+#define RAS_TABLE_START 0
+#define RAS_HDR_START RAS_TABLE_START
+#define RAS_RECORD_START (RAS_HDR_START + RAS_TABLE_HEADER_SIZE)
+#define RAS_MAX_RECORD_COUNT ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE) \
+ / RAS_TABLE_RECORD_SIZE)
+
+/*
+ * EEPROM Table structrue v2.1
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE HEADER |
+ * | ( size 20 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE RAS INFO |
+ * | (available info size 4 Bytes) |
+ * | ( reserved size 252 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | BAD PAGE RECORD AREA |
+ * | |
+ * ---------------------------------
+ */
+
+/* EEPROM Table V2_1 */
+#define RAS_TABLE_V2_1_INFO_SIZE 256
+#define RAS_TABLE_V2_1_INFO_START RAS_TABLE_HEADER_SIZE
+#define RAS_RECORD_START_V2_1 (RAS_HDR_START + RAS_TABLE_HEADER_SIZE + \
+ RAS_TABLE_V2_1_INFO_SIZE)
+#define RAS_MAX_RECORD_COUNT_V2_1 ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE - \
+ RAS_TABLE_V2_1_INFO_SIZE) \
+ / RAS_TABLE_RECORD_SIZE)
+
+/* Given a zero-based index of an EEPROM RAS record, yields the EEPROM
+ * offset off of RAS_TABLE_START. That is, this is something you can
+ * add to control->i2c_address, and then tell I2C layer to read
+ * from/write to there. _N is the so called absolute index,
+ * because it starts right after the table header.
+ */
+#define RAS_INDEX_TO_OFFSET(_C, _N) ((_C)->ras_record_offset + \
+ (_N) * RAS_TABLE_RECORD_SIZE)
+
+#define RAS_OFFSET_TO_INDEX(_C, _O) (((_O) - \
+ (_C)->ras_record_offset) / RAS_TABLE_RECORD_SIZE)
+
+/* Given a 0-based relative record index, 0, 1, 2, ..., etc., off
+ * of "fri", return the absolute record index off of the end of
+ * the table header.
+ */
+#define RAS_RI_TO_AI(_C, _I) (((_I) + (_C)->ras_fri) % \
+ (_C)->ras_max_record_count)
+
+#define RAS_NUM_RECS(_tbl_hdr) (((_tbl_hdr)->tbl_size - \
+ RAS_TABLE_HEADER_SIZE) / RAS_TABLE_RECORD_SIZE)
+
+#define RAS_NUM_RECS_V2_1(_tbl_hdr) (((_tbl_hdr)->tbl_size - \
+ RAS_TABLE_HEADER_SIZE - \
+ RAS_TABLE_V2_1_INFO_SIZE) / RAS_TABLE_RECORD_SIZE)
+
+#define to_ras_core_context(x) (container_of(x, struct ras_core_context, ras_eeprom))
+
+static bool __is_ras_eeprom_supported(struct ras_core_context *ras_core)
+{
+ return ras_core->ras_eeprom_supported;
+}
+
+static bool __get_eeprom_i2c_addr(struct ras_core_context *ras_core,
+ struct ras_eeprom_control *control)
+{
+ int ret = -EINVAL;
+
+ if (control->sys_func &&
+ control->sys_func->update_eeprom_i2c_config)
+ ret = control->sys_func->update_eeprom_i2c_config(ras_core);
+ else
+ RAS_DEV_WARN(ras_core->dev,
+ "No eeprom i2c system config!\n");
+
+ return !ret ? true : false;
+}
+
+static int __ras_eeprom_xfer(struct ras_core_context *ras_core, u32 eeprom_addr,
+ u8 *eeprom_buf, u32 buf_size, bool read)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ int ret;
+
+ if (control->sys_func && control->sys_func->eeprom_i2c_xfer) {
+ ret = control->sys_func->eeprom_i2c_xfer(ras_core,
+ eeprom_addr, eeprom_buf, buf_size, read);
+
+ if ((ret > 0) && !read) {
+ /* According to EEPROM specs the length of the
+ * self-writing cycle, tWR (tW), is 10 ms.
+ *
+ * TODO: Use polling on ACK, aka Acknowledge
+ * Polling, to minimize waiting for the
+ * internal write cycle to complete, as it is
+ * usually smaller than tWR (tW).
+ */
+ msleep(10);
+ }
+
+ return ret;
+ }
+
+ RAS_DEV_ERR(ras_core->dev, "Error: No eeprom i2c system xfer function!\n");
+ return -EINVAL;
+}
+
+static int __eeprom_xfer(struct ras_core_context *ras_core, u32 eeprom_addr,
+ u8 *eeprom_buf, u32 buf_size, bool read)
+{
+ u16 limit;
+ u16 ps; /* Partial size */
+ int res = 0, r;
+
+ if (read)
+ limit = ras_core->ras_eeprom.max_read_len;
+ else
+ limit = ras_core->ras_eeprom.max_write_len;
+
+ if (limit && (limit <= EEPROM_OFFSET_SIZE)) {
+ RAS_DEV_ERR(ras_core->dev,
+ "maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d",
+ eeprom_addr, buf_size,
+ read ? "read" : "write", EEPROM_OFFSET_SIZE);
+ return -EINVAL;
+ }
+
+ ras_core_down_gpu_reset_lock(ras_core);
+
+ if (limit == 0) {
+ res = __ras_eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, buf_size, read);
+ } else {
+ /* The "limit" includes all data bytes sent/received,
+ * which would include the EEPROM_OFFSET_SIZE bytes.
+ * Account for them here.
+ */
+ limit -= EEPROM_OFFSET_SIZE;
+ for ( ; buf_size > 0;
+ buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) {
+ ps = (buf_size < limit) ? buf_size : limit;
+
+ r = __ras_eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, ps, read);
+ if (r < 0)
+ break;
+
+ res += r;
+ }
+ }
+
+ ras_core_up_gpu_reset_lock(ras_core);
+
+ return res;
+}
+
+static int __eeprom_read(struct ras_core_context *ras_core,
+ u32 eeprom_addr, u8 *eeprom_buf, u32 bytes)
+{
+ return __eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, bytes, true);
+}
+
+static int __eeprom_write(struct ras_core_context *ras_core,
+ u32 eeprom_addr, u8 *eeprom_buf, u32 bytes)
+{
+ return __eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, bytes, false);
+}
+
+static void
+__encode_table_header_to_buf(struct ras_eeprom_table_header *hdr,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+
+ pp[0] = cpu_to_le32(hdr->header);
+ pp[1] = cpu_to_le32(hdr->version);
+ pp[2] = cpu_to_le32(hdr->first_rec_offset);
+ pp[3] = cpu_to_le32(hdr->tbl_size);
+ pp[4] = cpu_to_le32(hdr->checksum);
+}
+
+static void
+__decode_table_header_from_buf(struct ras_eeprom_table_header *hdr,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+
+ hdr->header = le32_to_cpu(pp[0]);
+ hdr->version = le32_to_cpu(pp[1]);
+ hdr->first_rec_offset = le32_to_cpu(pp[2]);
+ hdr->tbl_size = le32_to_cpu(pp[3]);
+ hdr->checksum = le32_to_cpu(pp[4]);
+}
+
+static int __write_table_header(struct ras_eeprom_control *control)
+{
+ u8 buf[RAS_TABLE_HEADER_SIZE];
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ int res;
+
+ memset(buf, 0, sizeof(buf));
+ __encode_table_header_to_buf(&control->tbl_hdr, buf);
+
+ /* i2c may be unstable in gpu reset */
+ res = __eeprom_write(ras_core,
+ control->i2c_address +
+ control->ras_header_offset,
+ buf, RAS_TABLE_HEADER_SIZE);
+
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to write EEPROM table header:%d\n", res);
+ } else if (res < RAS_TABLE_HEADER_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Short write:%d out of %d\n", res, RAS_TABLE_HEADER_SIZE);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+static void
+__encode_table_ras_info_to_buf(struct ras_eeprom_table_ras_info *rai,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+ u32 tmp;
+
+ tmp = ((uint32_t)(rai->rma_status) & 0xFF) |
+ (((uint32_t)(rai->health_percent) << 8) & 0xFF00) |
+ (((uint32_t)(rai->ecc_page_threshold) << 16) & 0xFFFF0000);
+ pp[0] = cpu_to_le32(tmp);
+}
+
+static void
+__decode_table_ras_info_from_buf(struct ras_eeprom_table_ras_info *rai,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+ u32 tmp;
+
+ tmp = le32_to_cpu(pp[0]);
+ rai->rma_status = tmp & 0xFF;
+ rai->health_percent = (tmp >> 8) & 0xFF;
+ rai->ecc_page_threshold = (tmp >> 16) & 0xFFFF;
+}
+
+static int __write_table_ras_info(struct ras_eeprom_control *control)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ u8 *buf;
+ int res;
+
+ buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to alloc buf to write table ras info\n");
+ return -ENOMEM;
+ }
+
+ __encode_table_ras_info_to_buf(&control->tbl_rai, buf);
+
+ /* i2c may be unstable in gpu reset */
+ res = __eeprom_write(ras_core,
+ control->i2c_address +
+ control->ras_info_offset,
+ buf, RAS_TABLE_V2_1_INFO_SIZE);
+
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to write EEPROM table ras info:%d\n", res);
+ } else if (res < RAS_TABLE_V2_1_INFO_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Short write:%d out of %d\n", res, RAS_TABLE_V2_1_INFO_SIZE);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ kfree(buf);
+
+ return res;
+}
+
+static u8 __calc_hdr_byte_sum(const struct ras_eeprom_control *control)
+{
+ int ii;
+ u8 *pp, csum;
+ u32 sz;
+
+ /* Header checksum, skip checksum field in the calculation */
+ sz = sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum);
+ pp = (u8 *) &control->tbl_hdr;
+ csum = 0;
+ for (ii = 0; ii < sz; ii++, pp++)
+ csum += *pp;
+
+ return csum;
+}
+
+static u8 __calc_ras_info_byte_sum(const struct ras_eeprom_control *control)
+{
+ int ii;
+ u8 *pp, csum;
+ u32 sz;
+
+ sz = sizeof(control->tbl_rai);
+ pp = (u8 *) &control->tbl_rai;
+ csum = 0;
+ for (ii = 0; ii < sz; ii++, pp++)
+ csum += *pp;
+
+ return csum;
+}
+
+static int ras_eeprom_correct_header_tag(
+ struct ras_eeprom_control *control,
+ uint32_t header)
+{
+ struct ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ u8 *hh;
+ int res;
+ u8 csum;
+
+ csum = -hdr->checksum;
+
+ hh = (void *) &hdr->header;
+ csum -= (hh[0] + hh[1] + hh[2] + hh[3]);
+ hh = (void *) &header;
+ csum += hh[0] + hh[1] + hh[2] + hh[3];
+ csum = -csum;
+ mutex_lock(&control->ras_tbl_mutex);
+ hdr->header = header;
+ hdr->checksum = csum;
+ res = __write_table_header(control);
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+static void ras_set_eeprom_table_version(struct ras_eeprom_control *control)
+{
+ struct ras_eeprom_table_header *hdr = &control->tbl_hdr;
+
+ hdr->version = RAS_TABLE_VER_V3;
+}
+
+int ras_eeprom_reset_table(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ struct ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct ras_eeprom_table_ras_info *rai = &control->tbl_rai;
+ u8 csum;
+ int res;
+
+ mutex_lock(&control->ras_tbl_mutex);
+
+ hdr->header = RAS_TABLE_HDR_VAL;
+ ras_set_eeprom_table_version(control);
+
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ hdr->first_rec_offset = RAS_RECORD_START_V2_1;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE;
+ rai->rma_status = RAS_GPU_HEALTH_USABLE;
+ /**
+ * GPU health represented as a percentage.
+ * 0 means worst health, 100 means fully health.
+ */
+ rai->health_percent = 100;
+ /* ecc_page_threshold = 0 means disable bad page retirement */
+ rai->ecc_page_threshold = control->record_threshold_count;
+ } else {
+ hdr->first_rec_offset = RAS_RECORD_START;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
+ }
+
+ csum = __calc_hdr_byte_sum(control);
+ if (hdr->version >= RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
+ csum = -csum;
+ hdr->checksum = csum;
+ res = __write_table_header(control);
+ if (!res && hdr->version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
+
+ control->ras_num_recs = 0;
+ control->ras_fri = 0;
+
+ control->bad_channel_bitmap = 0;
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM,
+ &control->ras_num_recs);
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP,
+ &control->bad_channel_bitmap);
+ control->update_channel_flag = false;
+
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+static void
+__encode_table_record_to_buf(struct ras_eeprom_control *control,
+ struct eeprom_umc_record *record,
+ unsigned char *buf)
+{
+ __le64 tmp = 0;
+ int i = 0;
+
+ /* Next are all record fields according to EEPROM page spec in LE foramt */
+ buf[i++] = record->err_type;
+
+ buf[i++] = record->bank;
+
+ tmp = cpu_to_le64(record->ts);
+ memcpy(buf + i, &tmp, 8);
+ i += 8;
+
+ tmp = cpu_to_le64((record->offset & 0xffffffffffff));
+ memcpy(buf + i, &tmp, 6);
+ i += 6;
+
+ buf[i++] = record->mem_channel;
+ buf[i++] = record->mcumc_id;
+
+ tmp = cpu_to_le64((record->retired_row_pfn & 0xffffffffffff));
+ memcpy(buf + i, &tmp, 6);
+}
+
+static void
+__decode_table_record_from_buf(struct ras_eeprom_control *control,
+ struct eeprom_umc_record *record,
+ unsigned char *buf)
+{
+ __le64 tmp = 0;
+ int i = 0;
+
+ /* Next are all record fields according to EEPROM page spec in LE foramt */
+ record->err_type = buf[i++];
+
+ record->bank = buf[i++];
+
+ memcpy(&tmp, buf + i, 8);
+ record->ts = le64_to_cpu(tmp);
+ i += 8;
+
+ memcpy(&tmp, buf + i, 6);
+ record->offset = (le64_to_cpu(tmp) & 0xffffffffffff);
+ i += 6;
+
+ record->mem_channel = buf[i++];
+ record->mcumc_id = buf[i++];
+
+ memcpy(&tmp, buf + i, 6);
+ record->retired_row_pfn = (le64_to_cpu(tmp) & 0xffffffffffff);
+}
+
+bool ras_eeprom_check_safety_watermark(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ bool ret = false;
+ int bad_page_count;
+
+ if (!__is_ras_eeprom_supported(ras_core) ||
+ !control->record_threshold_config)
+ return false;
+
+ bad_page_count = ras_umc_get_badpage_count(ras_core);
+ if (control->tbl_hdr.header == RAS_TABLE_HDR_BAD) {
+ if (bad_page_count > control->record_threshold_count)
+ RAS_DEV_WARN(ras_core->dev, "RAS records:%d exceed threshold:%d",
+ bad_page_count, control->record_threshold_count);
+
+ if ((control->record_threshold_config == WARN_NONSTOP_OVER_THRESHOLD) ||
+ (control->record_threshold_config == NONSTOP_OVER_THRESHOLD)) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures.\n");
+ ret = false;
+ } else {
+ ras_core->is_rma = true;
+ RAS_DEV_WARN(ras_core->dev,
+ "Please consider adjusting the customized threshold.\n");
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * __ras_eeprom_write -- write indexed from buffer to EEPROM
+ * @control: pointer to control structure
+ * @buf: pointer to buffer containing data to write
+ * @fri: start writing at this index
+ * @num: number of records to write
+ *
+ * The caller must hold the table mutex in @control.
+ * Return 0 on success, -errno otherwise.
+ */
+static int __ras_eeprom_write(struct ras_eeprom_control *control,
+ u8 *buf, const u32 fri, const u32 num)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ u32 buf_size;
+ int res;
+
+ /* i2c may be unstable in gpu reset */
+ buf_size = num * RAS_TABLE_RECORD_SIZE;
+ res = __eeprom_write(ras_core,
+ control->i2c_address + RAS_INDEX_TO_OFFSET(control, fri),
+ buf, buf_size);
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Writing %d EEPROM table records error:%d\n", num, res);
+ } else if (res < buf_size) {
+ /* Short write, return error.*/
+ RAS_DEV_ERR(ras_core->dev,
+ "Wrote %d records out of %d\n",
+ (res/RAS_TABLE_RECORD_SIZE), num);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+static int ras_eeprom_append_table(struct ras_eeprom_control *control,
+ struct eeprom_umc_record *record,
+ const u32 num)
+{
+ u32 a, b, i;
+ u8 *buf, *pp;
+ int res;
+
+ buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Encode all of them in one go.
+ */
+ pp = buf;
+ for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) {
+ __encode_table_record_to_buf(control, &record[i], pp);
+
+ /* update bad channel bitmap */
+ if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) &&
+ !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) {
+ control->bad_channel_bitmap |= 1 << record[i].mem_channel;
+ control->update_channel_flag = true;
+ }
+ }
+
+ /* a, first record index to write into.
+ * b, last record index to write into.
+ * a = first index to read (fri) + number of records in the table,
+ * b = a + @num - 1.
+ * Let N = control->ras_max_num_record_count, then we have,
+ * case 0: 0 <= a <= b < N,
+ * just append @num records starting at a;
+ * case 1: 0 <= a < N <= b,
+ * append (N - a) records starting at a, and
+ * append the remainder, b % N + 1, starting at 0.
+ * case 2: 0 <= fri < N <= a <= b, then modulo N we get two subcases,
+ * case 2a: 0 <= a <= b < N
+ * append num records starting at a; and fix fri if b overwrote it,
+ * and since a <= b, if b overwrote it then a must've also,
+ * and if b didn't overwrite it, then a didn't also.
+ * case 2b: 0 <= b < a < N
+ * write num records starting at a, which wraps around 0=N
+ * and overwrite fri unconditionally. Now from case 2a,
+ * this means that b eclipsed fri to overwrite it and wrap
+ * around 0 again, i.e. b = 2N+r pre modulo N, so we unconditionally
+ * set fri = b + 1 (mod N).
+ * Now, since fri is updated in every case, except the trivial case 0,
+ * the number of records present in the table after writing, is,
+ * num_recs - 1 = b - fri (mod N), and we take the positive value,
+ * by adding an arbitrary multiple of N before taking the modulo N
+ * as shown below.
+ */
+ a = control->ras_fri + control->ras_num_recs;
+ b = a + num - 1;
+ if (b < control->ras_max_record_count) {
+ res = __ras_eeprom_write(control, buf, a, num);
+ } else if (a < control->ras_max_record_count) {
+ u32 g0, g1;
+
+ g0 = control->ras_max_record_count - a;
+ g1 = b % control->ras_max_record_count + 1;
+ res = __ras_eeprom_write(control, buf, a, g0);
+ if (res)
+ goto Out;
+ res = __ras_eeprom_write(control,
+ buf + g0 * RAS_TABLE_RECORD_SIZE,
+ 0, g1);
+ if (res)
+ goto Out;
+ if (g1 > control->ras_fri)
+ control->ras_fri = g1 % control->ras_max_record_count;
+ } else {
+ a %= control->ras_max_record_count;
+ b %= control->ras_max_record_count;
+
+ if (a <= b) {
+ /* Note that, b - a + 1 = num. */
+ res = __ras_eeprom_write(control, buf, a, num);
+ if (res)
+ goto Out;
+ if (b >= control->ras_fri)
+ control->ras_fri = (b + 1) % control->ras_max_record_count;
+ } else {
+ u32 g0, g1;
+
+ /* b < a, which means, we write from
+ * a to the end of the table, and from
+ * the start of the table to b.
+ */
+ g0 = control->ras_max_record_count - a;
+ g1 = b + 1;
+ res = __ras_eeprom_write(control, buf, a, g0);
+ if (res)
+ goto Out;
+ res = __ras_eeprom_write(control,
+ buf + g0 * RAS_TABLE_RECORD_SIZE, 0, g1);
+ if (res)
+ goto Out;
+ control->ras_fri = g1 % control->ras_max_record_count;
+ }
+ }
+ control->ras_num_recs = 1 +
+ (control->ras_max_record_count + b - control->ras_fri)
+ % control->ras_max_record_count;
+Out:
+ kfree(buf);
+ return res;
+}
+
+static int ras_eeprom_update_header(struct ras_eeprom_control *control)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ int threshold_config = control->record_threshold_config;
+ u8 *buf, *pp, csum;
+ u32 buf_size;
+ int bad_page_count;
+ int res;
+
+ bad_page_count = ras_umc_get_badpage_count(ras_core);
+ /* Modify the header if it exceeds.
+ */
+ if (threshold_config != 0 &&
+ bad_page_count > control->record_threshold_count) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Saved bad pages %d reaches threshold value %d\n",
+ bad_page_count, control->record_threshold_count);
+ control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
+ control->tbl_rai.rma_status = RAS_GPU_RETIRED__ECC_REACH_THRESHOLD;
+ control->tbl_rai.health_percent = 0;
+ }
+
+ if ((threshold_config != WARN_NONSTOP_OVER_THRESHOLD) &&
+ (threshold_config != NONSTOP_OVER_THRESHOLD))
+ ras_core->is_rma = true;
+
+ /* ignore the -ENOTSUPP return value */
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__DEVICE_RMA, NULL);
+ }
+
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ else
+ control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ control->tbl_hdr.checksum = 0;
+
+ buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "allocating memory for table of size %d bytes failed\n",
+ control->tbl_hdr.tbl_size);
+ res = -ENOMEM;
+ goto Out;
+ }
+
+ res = __eeprom_read(ras_core,
+ control->i2c_address +
+ control->ras_record_offset,
+ buf, buf_size);
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "EEPROM failed reading records:%d\n", res);
+ goto Out;
+ } else if (res < buf_size) {
+ RAS_DEV_ERR(ras_core->dev,
+ "EEPROM read %d out of %d bytes\n", res, buf_size);
+ res = -EIO;
+ goto Out;
+ }
+
+ /**
+ * bad page records have been stored in eeprom,
+ * now calculate gpu health percent
+ */
+ if (threshold_config != 0 &&
+ control->tbl_hdr.version >= RAS_TABLE_VER_V2_1 &&
+ bad_page_count <= control->record_threshold_count)
+ control->tbl_rai.health_percent = ((control->record_threshold_count -
+ bad_page_count) * 100) / control->record_threshold_count;
+
+ /* Recalc the checksum.
+ */
+ csum = 0;
+ for (pp = buf; pp < buf + buf_size; pp++)
+ csum += *pp;
+
+ csum += __calc_hdr_byte_sum(control);
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
+ /* avoid sign extension when assigning to "checksum" */
+ csum = -csum;
+ control->tbl_hdr.checksum = csum;
+ res = __write_table_header(control);
+ if (!res && control->tbl_hdr.version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
+Out:
+ kfree(buf);
+ return res;
+}
+
+/**
+ * ras_core_eeprom_append -- append records to the EEPROM RAS table
+ * @control: pointer to control structure
+ * @record: array of records to append
+ * @num: number of records in @record array
+ *
+ * Append @num records to the table, calculate the checksum and write
+ * the table back to EEPROM. The maximum number of records that
+ * can be appended is between 1 and control->ras_max_record_count,
+ * regardless of how many records are already stored in the table.
+ *
+ * Return 0 on success or if EEPROM is not supported, -errno on error.
+ */
+int ras_eeprom_append(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, const u32 num)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ int res;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (num == 0) {
+ RAS_DEV_ERR(ras_core->dev, "will not append 0 records\n");
+ return -EINVAL;
+ } else if ((num + control->ras_num_recs) > control->ras_max_record_count) {
+ RAS_DEV_ERR(ras_core->dev,
+ "cannot append %d records than the size of table %d\n",
+ num, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
+ mutex_lock(&control->ras_tbl_mutex);
+ res = ras_eeprom_append_table(control, record, num);
+ if (!res)
+ res = ras_eeprom_update_header(control);
+
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+/**
+ * __ras_eeprom_read -- read indexed from EEPROM into buffer
+ * @control: pointer to control structure
+ * @buf: pointer to buffer to read into
+ * @fri: first record index, start reading at this index, absolute index
+ * @num: number of records to read
+ *
+ * The caller must hold the table mutex in @control.
+ * Return 0 on success, -errno otherwise.
+ */
+static int __ras_eeprom_read(struct ras_eeprom_control *control,
+ u8 *buf, const u32 fri, const u32 num)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ u32 buf_size;
+ int res;
+
+ /* i2c may be unstable in gpu reset */
+ buf_size = num * RAS_TABLE_RECORD_SIZE;
+ res = __eeprom_read(ras_core,
+ control->i2c_address +
+ RAS_INDEX_TO_OFFSET(control, fri),
+ buf, buf_size);
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Reading %d EEPROM table records error:%d\n", num, res);
+ } else if (res < buf_size) {
+ /* Short read, return error.
+ */
+ RAS_DEV_ERR(ras_core->dev,
+ "Read %d records out of %d\n",
+ (res/RAS_TABLE_RECORD_SIZE), num);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+int ras_eeprom_read(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, const u32 num)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ int i, res;
+ u8 *buf, *pp;
+ u32 g0, g1;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (num == 0) {
+ RAS_DEV_ERR(ras_core->dev, "will not read 0 records\n");
+ return -EINVAL;
+ } else if (num > control->ras_num_recs) {
+ RAS_DEV_ERR(ras_core->dev,
+ "too many records to read:%d available:%d\n",
+ num, control->ras_num_recs);
+ return -EINVAL;
+ }
+
+ buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Determine how many records to read, from the first record
+ * index, fri, to the end of the table, and from the beginning
+ * of the table, such that the total number of records is
+ * @num, and we handle wrap around when fri > 0 and
+ * fri + num > RAS_MAX_RECORD_COUNT.
+ *
+ * First we compute the index of the last element
+ * which would be fetched from each region,
+ * g0 is in [fri, fri + num - 1], and
+ * g1 is in [0, RAS_MAX_RECORD_COUNT - 1].
+ * Then, if g0 < RAS_MAX_RECORD_COUNT, the index of
+ * the last element to fetch, we set g0 to _the number_
+ * of elements to fetch, @num, since we know that the last
+ * indexed to be fetched does not exceed the table.
+ *
+ * If, however, g0 >= RAS_MAX_RECORD_COUNT, then
+ * we set g0 to the number of elements to read
+ * until the end of the table, and g1 to the number of
+ * elements to read from the beginning of the table.
+ */
+ g0 = control->ras_fri + num - 1;
+ g1 = g0 % control->ras_max_record_count;
+ if (g0 < control->ras_max_record_count) {
+ g0 = num;
+ g1 = 0;
+ } else {
+ g0 = control->ras_max_record_count - control->ras_fri;
+ g1 += 1;
+ }
+
+ mutex_lock(&control->ras_tbl_mutex);
+ res = __ras_eeprom_read(control, buf, control->ras_fri, g0);
+ if (res)
+ goto Out;
+ if (g1) {
+ res = __ras_eeprom_read(control,
+ buf + g0 * RAS_TABLE_RECORD_SIZE, 0, g1);
+ if (res)
+ goto Out;
+ }
+
+ res = 0;
+
+ /* Read up everything? Then transform.
+ */
+ pp = buf;
+ for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) {
+ __decode_table_record_from_buf(control, &record[i], pp);
+
+ /* update bad channel bitmap */
+ if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) &&
+ !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) {
+ control->bad_channel_bitmap |= 1 << record[i].mem_channel;
+ control->update_channel_flag = true;
+ }
+ }
+Out:
+ kfree(buf);
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+uint32_t ras_eeprom_max_record_count(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+
+ /* get available eeprom table version first before eeprom table init */
+ ras_set_eeprom_table_version(control);
+
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ return RAS_MAX_RECORD_COUNT_V2_1;
+ else
+ return RAS_MAX_RECORD_COUNT;
+}
+
+/**
+ * __verify_ras_table_checksum -- verify the RAS EEPROM table checksum
+ * @control: pointer to control structure
+ *
+ * Check the checksum of the stored in EEPROM RAS table.
+ *
+ * Return 0 if the checksum is correct,
+ * positive if it is not correct, and
+ * -errno on I/O error.
+ */
+static int __verify_ras_table_checksum(struct ras_eeprom_control *control)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ int buf_size, res;
+ u8 csum, *buf, *pp;
+
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ buf_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ else
+ buf_size = RAS_TABLE_HEADER_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Out of memory checking RAS table checksum.\n");
+ return -ENOMEM;
+ }
+
+ res = __eeprom_read(ras_core,
+ control->i2c_address +
+ control->ras_header_offset,
+ buf, buf_size);
+ if (res < buf_size) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Partial read for checksum, res:%d\n", res);
+ /* On partial reads, return -EIO.
+ */
+ if (res >= 0)
+ res = -EIO;
+ goto Out;
+ }
+
+ csum = 0;
+ for (pp = buf; pp < buf + buf_size; pp++)
+ csum += *pp;
+Out:
+ kfree(buf);
+ return res < 0 ? res : csum;
+}
+
+static int __read_table_ras_info(struct ras_eeprom_control *control)
+{
+ struct ras_eeprom_table_ras_info *rai = &control->tbl_rai;
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ unsigned char *buf;
+ int res;
+
+ buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to alloc buf to read EEPROM table ras info\n");
+ return -ENOMEM;
+ }
+
+ /**
+ * EEPROM table V2_1 supports ras info,
+ * read EEPROM table ras info
+ */
+ res = __eeprom_read(ras_core,
+ control->i2c_address + control->ras_info_offset,
+ buf, RAS_TABLE_V2_1_INFO_SIZE);
+ if (res < RAS_TABLE_V2_1_INFO_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to read EEPROM table ras info, res:%d\n", res);
+ res = res >= 0 ? -EIO : res;
+ goto Out;
+ }
+
+ __decode_table_ras_info_from_buf(rai, buf);
+
+Out:
+ kfree(buf);
+ return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res;
+}
+
+static int __check_ras_table_status(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ unsigned char buf[RAS_TABLE_HEADER_SIZE] = { 0 };
+ struct ras_eeprom_table_header *hdr;
+ int res;
+
+ hdr = &control->tbl_hdr;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (!__get_eeprom_i2c_addr(ras_core, control))
+ return -EINVAL;
+
+ control->ras_header_offset = RAS_HDR_START;
+ control->ras_info_offset = RAS_TABLE_V2_1_INFO_START;
+ mutex_init(&control->ras_tbl_mutex);
+
+ /* Read the table header from EEPROM address */
+ res = __eeprom_read(ras_core,
+ control->i2c_address + control->ras_header_offset,
+ buf, RAS_TABLE_HEADER_SIZE);
+ if (res < RAS_TABLE_HEADER_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to read EEPROM table header, res:%d\n", res);
+ return res >= 0 ? -EIO : res;
+ }
+
+ __decode_table_header_from_buf(hdr, buf);
+
+ if (hdr->header != RAS_TABLE_HDR_VAL &&
+ hdr->header != RAS_TABLE_HDR_BAD) {
+ RAS_DEV_INFO(ras_core->dev, "Creating a new EEPROM table");
+ return ras_eeprom_reset_table(ras_core);
+ }
+
+ switch (hdr->version) {
+ case RAS_TABLE_VER_V2_1:
+ case RAS_TABLE_VER_V3:
+ control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr);
+ control->ras_record_offset = RAS_RECORD_START_V2_1;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
+ break;
+ case RAS_TABLE_VER_V1:
+ control->ras_num_recs = RAS_NUM_RECS(hdr);
+ control->ras_record_offset = RAS_RECORD_START;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ break;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS header invalid, unsupported version: %u",
+ hdr->version);
+ return -EINVAL;
+ }
+
+ if (control->ras_num_recs > control->ras_max_record_count) {
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS header invalid, records in header: %u max allowed :%u",
+ control->ras_num_recs, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
+ control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+
+ return 0;
+}
+
+int ras_eeprom_check_storage_status(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ struct ras_eeprom_table_header *hdr;
+ int bad_page_count;
+ int res = 0;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (!__get_eeprom_i2c_addr(ras_core, control))
+ return -EINVAL;
+
+ hdr = &control->tbl_hdr;
+
+ bad_page_count = ras_umc_get_badpage_count(ras_core);
+ if (hdr->header == RAS_TABLE_HDR_VAL) {
+ RAS_DEV_INFO(ras_core->dev,
+ "Found existing EEPROM table with %d records\n",
+ bad_page_count);
+
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ res = __read_table_ras_info(control);
+ if (res)
+ return res;
+ }
+
+ res = __verify_ras_table_checksum(control);
+ if (res)
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS table incorrect checksum or error:%d\n", res);
+
+ /* Warn if we are at 90% of the threshold or above
+ */
+ if (10 * bad_page_count >= 9 * control->record_threshold_count)
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS records:%u exceeds 90%% of threshold:%d\n",
+ bad_page_count,
+ control->record_threshold_count);
+
+ } else if (hdr->header == RAS_TABLE_HDR_BAD &&
+ control->record_threshold_config != 0) {
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ res = __read_table_ras_info(control);
+ if (res)
+ return res;
+ }
+
+ res = __verify_ras_table_checksum(control);
+ if (res)
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS Table incorrect checksum or error:%d\n", res);
+
+ if (control->record_threshold_count >= bad_page_count) {
+ /* This means that, the threshold was increased since
+ * the last time the system was booted, and now,
+ * ras->record_threshold_count - control->num_recs > 0,
+ * so that at least one more record can be saved,
+ * before the page count threshold is reached.
+ */
+ RAS_DEV_INFO(ras_core->dev,
+ "records:%d threshold:%d, resetting RAS table header signature",
+ bad_page_count,
+ control->record_threshold_count);
+ res = ras_eeprom_correct_header_tag(control, RAS_TABLE_HDR_VAL);
+ } else {
+ RAS_DEV_ERR(ras_core->dev, "RAS records:%d exceed threshold:%d",
+ bad_page_count, control->record_threshold_count);
+ if ((control->record_threshold_config == WARN_NONSTOP_OVER_THRESHOLD) ||
+ (control->record_threshold_config == NONSTOP_OVER_THRESHOLD)) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n");
+ res = 0;
+ } else {
+ ras_core->is_rma = true;
+ RAS_DEV_ERR(ras_core->dev,
+ "User defined threshold is set, runtime service will be halt when threshold is reached\n");
+ }
+ }
+ }
+
+ return res < 0 ? res : 0;
+}
+
+int ras_eeprom_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control;
+ struct ras_eeprom_config *eeprom_cfg;
+
+ if (!ras_core)
+ return -EINVAL;
+
+ ras_core->is_rma = false;
+
+ control = &ras_core->ras_eeprom;
+
+ memset(control, 0, sizeof(*control));
+
+ eeprom_cfg = &ras_core->config->eeprom_cfg;
+ control->record_threshold_config =
+ eeprom_cfg->eeprom_record_threshold_config;
+
+ control->record_threshold_count = ras_eeprom_max_record_count(ras_core);
+ if (eeprom_cfg->eeprom_record_threshold_count <
+ control->record_threshold_count)
+ control->record_threshold_count =
+ eeprom_cfg->eeprom_record_threshold_count;
+
+ control->sys_func = eeprom_cfg->eeprom_sys_fn;
+ control->max_read_len = eeprom_cfg->max_i2c_read_len;
+ control->max_write_len = eeprom_cfg->max_i2c_write_len;
+ control->i2c_adapter = eeprom_cfg->eeprom_i2c_adapter;
+ control->i2c_port = eeprom_cfg->eeprom_i2c_port;
+ control->i2c_address = eeprom_cfg->eeprom_i2c_addr;
+
+ control->update_channel_flag = false;
+
+ return __check_ras_table_status(ras_core);
+}
+
+int ras_eeprom_hw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control;
+
+ if (!ras_core)
+ return -EINVAL;
+
+ control = &ras_core->ras_eeprom;
+ mutex_destroy(&control->ras_tbl_mutex);
+
+ return 0;
+}
+
+uint32_t ras_eeprom_get_record_count(struct ras_core_context *ras_core)
+{
+ if (!ras_core)
+ return 0;
+
+ return ras_core->ras_eeprom.ras_num_recs;
+}
+
+void ras_eeprom_sync_info(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control;
+
+ if (!ras_core)
+ return;
+
+ control = &ras_core->ras_eeprom;
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM,
+ &control->ras_num_recs);
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP,
+ &control->bad_channel_bitmap);
+}
+
+enum ras_gpu_health_status
+ ras_eeprom_check_gpu_status(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ struct ras_eeprom_table_ras_info *rai = &control->tbl_rai;
+
+ if (!__is_ras_eeprom_supported(ras_core) ||
+ !control->record_threshold_config)
+ return RAS_GPU_HEALTH_NONE;
+
+ if (control->tbl_hdr.header == RAS_TABLE_HDR_BAD)
+ return RAS_GPU_IN_BAD_STATUS;
+
+ return rai->rma_status;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h
new file mode 100644
index 000000000000..2abe566c18b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_EEPROM_H__
+#define __RAS_EEPROM_H__
+#include "ras_sys.h"
+
+#define RAS_TABLE_VER_V1 0x00010000
+#define RAS_TABLE_VER_V2_1 0x00021000
+#define RAS_TABLE_VER_V3 0x00030000
+
+#define NONSTOP_OVER_THRESHOLD -2
+#define WARN_NONSTOP_OVER_THRESHOLD -1
+#define DISABLE_RETIRE_PAGE 0
+
+/*
+ * Bad address pfn : eeprom_umc_record.retired_row_pfn[39:0],
+ * nps mode: eeprom_umc_record.retired_row_pfn[47:40]
+ */
+#define EEPROM_RECORD_UMC_ADDR_MASK 0xFFFFFFFFFFULL
+#define EEPROM_RECORD_UMC_NPS_MASK 0xFF0000000000ULL
+#define EEPROM_RECORD_UMC_NPS_SHIFT 40
+
+#define EEPROM_RECORD_UMC_NPS_MODE(RECORD) \
+ (((RECORD)->retired_row_pfn & EEPROM_RECORD_UMC_NPS_MASK) >> \
+ EEPROM_RECORD_UMC_NPS_SHIFT)
+
+#define EEPROM_RECORD_UMC_ADDR_PFN(RECORD) \
+ ((RECORD)->retired_row_pfn & EEPROM_RECORD_UMC_ADDR_MASK)
+
+#define EEPROM_RECORD_SETUP_UMC_ADDR_AND_NPS(RECORD, ADDR, NPS) \
+do { \
+ uint64_t tmp = (NPS); \
+ tmp = ((tmp << EEPROM_RECORD_UMC_NPS_SHIFT) & EEPROM_RECORD_UMC_NPS_MASK); \
+ tmp |= (ADDR) & EEPROM_RECORD_UMC_ADDR_MASK; \
+ (RECORD)->retired_row_pfn = tmp; \
+} while (0)
+
+enum ras_gpu_health_status {
+ RAS_GPU_HEALTH_NONE = 0,
+ RAS_GPU_HEALTH_USABLE = 1,
+ RAS_GPU_RETIRED__ECC_REACH_THRESHOLD = 2,
+ RAS_GPU_IN_BAD_STATUS = 3,
+};
+
+enum ras_eeprom_err_type {
+ RAS_EEPROM_ERR_NA,
+ RAS_EEPROM_ERR_RECOVERABLE,
+ RAS_EEPROM_ERR_NON_RECOVERABLE,
+ RAS_EEPROM_ERR_COUNT,
+};
+
+struct ras_eeprom_table_header {
+ uint32_t header;
+ uint32_t version;
+ uint32_t first_rec_offset;
+ uint32_t tbl_size;
+ uint32_t checksum;
+} __packed;
+
+struct ras_eeprom_table_ras_info {
+ u8 rma_status;
+ u8 health_percent;
+ u16 ecc_page_threshold;
+ u32 padding[64 - 1];
+} __packed;
+
+struct ras_eeprom_control {
+ struct ras_eeprom_table_header tbl_hdr;
+ struct ras_eeprom_table_ras_info tbl_rai;
+
+ /* record threshold */
+ int record_threshold_config;
+ uint32_t record_threshold_count;
+ bool update_channel_flag;
+
+ const struct ras_eeprom_sys_func *sys_func;
+ void *i2c_adapter;
+ u32 i2c_port;
+ u16 max_read_len;
+ u16 max_write_len;
+
+ /* Base I2C EEPPROM 19-bit memory address,
+ * where the table is located. For more information,
+ * see top of amdgpu_eeprom.c.
+ */
+ u32 i2c_address;
+
+ /* The byte offset off of @i2c_address
+ * where the table header is found,
+ * and where the records start--always
+ * right after the header.
+ */
+ u32 ras_header_offset;
+ u32 ras_info_offset;
+ u32 ras_record_offset;
+
+ /* Number of records in the table.
+ */
+ u32 ras_num_recs;
+
+ /* First record index to read, 0-based.
+ * Range is [0, num_recs-1]. This is
+ * an absolute index, starting right after
+ * the table header.
+ */
+ u32 ras_fri;
+
+ /* Maximum possible number of records
+ * we could store, i.e. the maximum capacity
+ * of the table.
+ */
+ u32 ras_max_record_count;
+
+ /* Protect table access via this mutex.
+ */
+ struct mutex ras_tbl_mutex;
+
+ /* Record channel info which occurred bad pages
+ */
+ u32 bad_channel_bitmap;
+};
+
+/*
+ * Represents single table record. Packed to be easily serialized into byte
+ * stream.
+ */
+struct eeprom_umc_record {
+
+ union {
+ uint64_t address;
+ uint64_t offset;
+ };
+
+ uint64_t retired_row_pfn;
+ uint64_t ts;
+
+ enum ras_eeprom_err_type err_type;
+
+ union {
+ unsigned char bank;
+ unsigned char cu;
+ };
+
+ unsigned char mem_channel;
+ unsigned char mcumc_id;
+
+ /* The following variables will not be saved to eeprom.
+ */
+ uint64_t cur_nps_retired_row_pfn;
+ uint32_t cur_nps_bank;
+ uint32_t cur_nps;
+};
+
+struct ras_core_context;
+int ras_eeprom_hw_init(struct ras_core_context *ras_core);
+int ras_eeprom_hw_fini(struct ras_core_context *ras_core);
+
+int ras_eeprom_reset_table(struct ras_core_context *ras_core);
+
+bool ras_eeprom_check_safety_watermark(struct ras_core_context *ras_core);
+
+int ras_eeprom_read(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *records, const u32 num);
+
+int ras_eeprom_append(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *records, const u32 num);
+
+uint32_t ras_eeprom_max_record_count(struct ras_core_context *ras_core);
+uint32_t ras_eeprom_get_record_count(struct ras_core_context *ras_core);
+void ras_eeprom_sync_info(struct ras_core_context *ras_core);
+
+int ras_eeprom_check_storage_status(struct ras_core_context *ras_core);
+enum ras_gpu_health_status
+ ras_eeprom_check_gpu_status(struct ras_core_context *ras_core);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c
new file mode 100644
index 000000000000..f5ce28777705
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_gfx_v9_0.h"
+#include "ras_gfx.h"
+#include "ras_core_status.h"
+
+static const struct ras_gfx_ip_func *ras_gfx_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
+ return &gfx_ras_func_v9_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "GFX ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_gfx_get_ta_subblock(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock)
+{
+ struct ras_gfx *gfx = &ras_core->ras_gfx;
+
+ return gfx->ip_func->get_ta_subblock(ras_core,
+ error_type, subblock, ta_subblock);
+}
+
+int ras_gfx_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_gfx *gfx = &ras_core->ras_gfx;
+
+ gfx->gfx_ip_version = ras_core->config->gfx_ip_version;
+
+ gfx->ip_func = ras_gfx_get_ip_funcs(ras_core, gfx->gfx_ip_version);
+
+ return gfx->ip_func ? RAS_CORE_OK : -EINVAL;
+}
+
+int ras_gfx_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h
new file mode 100644
index 000000000000..8a42d69fb0ad
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_GFX_H__
+#define __RAS_GFX_H__
+
+struct ras_gfx_ip_func {
+ int (*get_ta_subblock)(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock);
+};
+
+struct ras_gfx {
+ uint32_t gfx_ip_version;
+ const struct ras_gfx_ip_func *ip_func;
+};
+
+int ras_gfx_hw_init(struct ras_core_context *ras_core);
+int ras_gfx_hw_fini(struct ras_core_context *ras_core);
+
+int ras_gfx_get_ta_subblock(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock);
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c
new file mode 100644
index 000000000000..6213d3f125be
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_gfx_v9_0.h"
+#include "ras_core_status.h"
+
+enum ta_gfx_v9_subblock {
+ /*CPC*/
+ TA_GFX_V9__GFX_CPC_INDEX_START = 0,
+ TA_GFX_V9__GFX_CPC_SCRATCH = TA_GFX_V9__GFX_CPC_INDEX_START,
+ TA_GFX_V9__GFX_CPC_UCODE,
+ TA_GFX_V9__GFX_DC_STATE_ME1,
+ TA_GFX_V9__GFX_DC_CSINVOC_ME1,
+ TA_GFX_V9__GFX_DC_RESTORE_ME1,
+ TA_GFX_V9__GFX_DC_STATE_ME2,
+ TA_GFX_V9__GFX_DC_CSINVOC_ME2,
+ TA_GFX_V9__GFX_DC_RESTORE_ME2,
+ TA_GFX_V9__GFX_CPC_INDEX_END = TA_GFX_V9__GFX_DC_RESTORE_ME2,
+ /* CPF*/
+ TA_GFX_V9__GFX_CPF_INDEX_START,
+ TA_GFX_V9__GFX_CPF_ROQ_ME2 = TA_GFX_V9__GFX_CPF_INDEX_START,
+ TA_GFX_V9__GFX_CPF_ROQ_ME1,
+ TA_GFX_V9__GFX_CPF_TAG,
+ TA_GFX_V9__GFX_CPF_INDEX_END = TA_GFX_V9__GFX_CPF_TAG,
+ /* CPG*/
+ TA_GFX_V9__GFX_CPG_INDEX_START,
+ TA_GFX_V9__GFX_CPG_DMA_ROQ = TA_GFX_V9__GFX_CPG_INDEX_START,
+ TA_GFX_V9__GFX_CPG_DMA_TAG,
+ TA_GFX_V9__GFX_CPG_TAG,
+ TA_GFX_V9__GFX_CPG_INDEX_END = TA_GFX_V9__GFX_CPG_TAG,
+ /* GDS*/
+ TA_GFX_V9__GFX_GDS_INDEX_START,
+ TA_GFX_V9__GFX_GDS_MEM = TA_GFX_V9__GFX_GDS_INDEX_START,
+ TA_GFX_V9__GFX_GDS_INPUT_QUEUE,
+ TA_GFX_V9__GFX_GDS_OA_PHY_CMD_RAM_MEM,
+ TA_GFX_V9__GFX_GDS_OA_PHY_DATA_RAM_MEM,
+ TA_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ TA_GFX_V9__GFX_GDS_INDEX_END = TA_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ /* SPI*/
+ TA_GFX_V9__GFX_SPI_SR_MEM,
+ /* SQ*/
+ TA_GFX_V9__GFX_SQ_INDEX_START,
+ TA_GFX_V9__GFX_SQ_SGPR = TA_GFX_V9__GFX_SQ_INDEX_START,
+ TA_GFX_V9__GFX_SQ_LDS_D,
+ TA_GFX_V9__GFX_SQ_LDS_I,
+ TA_GFX_V9__GFX_SQ_VGPR, /* VGPR = SP*/
+ TA_GFX_V9__GFX_SQ_INDEX_END = TA_GFX_V9__GFX_SQ_VGPR,
+ /* SQC (3 ranges)*/
+ TA_GFX_V9__GFX_SQC_INDEX_START,
+ /* SQC range 0*/
+ TA_GFX_V9__GFX_SQC_INDEX0_START = TA_GFX_V9__GFX_SQC_INDEX_START,
+ TA_GFX_V9__GFX_SQC_INST_UTCL1_LFIFO =
+ TA_GFX_V9__GFX_SQC_INDEX0_START,
+ TA_GFX_V9__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
+ TA_GFX_V9__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
+ TA_GFX_V9__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
+ TA_GFX_V9__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
+ TA_GFX_V9__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
+ TA_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ TA_GFX_V9__GFX_SQC_INDEX0_END =
+ TA_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ /* SQC range 1*/
+ TA_GFX_V9__GFX_SQC_INDEX1_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_TAG_RAM =
+ TA_GFX_V9__GFX_SQC_INDEX1_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_TAG_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_HIT_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_INDEX1_END =
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ /* SQC range 2*/
+ TA_GFX_V9__GFX_SQC_INDEX2_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_TAG_RAM =
+ TA_GFX_V9__GFX_SQC_INDEX2_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_TAG_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_HIT_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_INDEX2_END =
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_INDEX_END = TA_GFX_V9__GFX_SQC_INDEX2_END,
+ /* TA*/
+ TA_GFX_V9__GFX_TA_INDEX_START,
+ TA_GFX_V9__GFX_TA_FS_DFIFO = TA_GFX_V9__GFX_TA_INDEX_START,
+ TA_GFX_V9__GFX_TA_FS_AFIFO,
+ TA_GFX_V9__GFX_TA_FL_LFIFO,
+ TA_GFX_V9__GFX_TA_FX_LFIFO,
+ TA_GFX_V9__GFX_TA_FS_CFIFO,
+ TA_GFX_V9__GFX_TA_INDEX_END = TA_GFX_V9__GFX_TA_FS_CFIFO,
+ /* TCA*/
+ TA_GFX_V9__GFX_TCA_INDEX_START,
+ TA_GFX_V9__GFX_TCA_HOLE_FIFO = TA_GFX_V9__GFX_TCA_INDEX_START,
+ TA_GFX_V9__GFX_TCA_REQ_FIFO,
+ TA_GFX_V9__GFX_TCA_INDEX_END = TA_GFX_V9__GFX_TCA_REQ_FIFO,
+ /* TCC (5 sub-ranges)*/
+ TA_GFX_V9__GFX_TCC_INDEX_START,
+ /* TCC range 0*/
+ TA_GFX_V9__GFX_TCC_INDEX0_START = TA_GFX_V9__GFX_TCC_INDEX_START,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA = TA_GFX_V9__GFX_TCC_INDEX0_START,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_0_1,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_0,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_1,
+ TA_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_0,
+ TA_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_1,
+ TA_GFX_V9__GFX_TCC_HIGH_RATE_TAG,
+ TA_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ TA_GFX_V9__GFX_TCC_INDEX0_END = TA_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ /* TCC range 1*/
+ TA_GFX_V9__GFX_TCC_INDEX1_START,
+ TA_GFX_V9__GFX_TCC_IN_USE_DEC = TA_GFX_V9__GFX_TCC_INDEX1_START,
+ TA_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ TA_GFX_V9__GFX_TCC_INDEX1_END =
+ TA_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ /* TCC range 2*/
+ TA_GFX_V9__GFX_TCC_INDEX2_START,
+ TA_GFX_V9__GFX_TCC_RETURN_DATA = TA_GFX_V9__GFX_TCC_INDEX2_START,
+ TA_GFX_V9__GFX_TCC_RETURN_CONTROL,
+ TA_GFX_V9__GFX_TCC_UC_ATOMIC_FIFO,
+ TA_GFX_V9__GFX_TCC_WRITE_RETURN,
+ TA_GFX_V9__GFX_TCC_WRITE_CACHE_READ,
+ TA_GFX_V9__GFX_TCC_SRC_FIFO,
+ TA_GFX_V9__GFX_TCC_SRC_FIFO_NEXT_RAM,
+ TA_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ TA_GFX_V9__GFX_TCC_INDEX2_END =
+ TA_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ /* TCC range 3*/
+ TA_GFX_V9__GFX_TCC_INDEX3_START,
+ TA_GFX_V9__GFX_TCC_LATENCY_FIFO = TA_GFX_V9__GFX_TCC_INDEX3_START,
+ TA_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ TA_GFX_V9__GFX_TCC_INDEX3_END =
+ TA_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ /* TCC range 4*/
+ TA_GFX_V9__GFX_TCC_INDEX4_START,
+ TA_GFX_V9__GFX_TCC_WRRET_TAG_WRITE_RETURN =
+ TA_GFX_V9__GFX_TCC_INDEX4_START,
+ TA_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ TA_GFX_V9__GFX_TCC_INDEX4_END =
+ TA_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ TA_GFX_V9__GFX_TCC_INDEX_END = TA_GFX_V9__GFX_TCC_INDEX4_END,
+ /* TCI*/
+ TA_GFX_V9__GFX_TCI_WRITE_RAM,
+ /* TCP*/
+ TA_GFX_V9__GFX_TCP_INDEX_START,
+ TA_GFX_V9__GFX_TCP_CACHE_RAM = TA_GFX_V9__GFX_TCP_INDEX_START,
+ TA_GFX_V9__GFX_TCP_LFIFO_RAM,
+ TA_GFX_V9__GFX_TCP_CMD_FIFO,
+ TA_GFX_V9__GFX_TCP_VM_FIFO,
+ TA_GFX_V9__GFX_TCP_DB_RAM,
+ TA_GFX_V9__GFX_TCP_UTCL1_LFIFO0,
+ TA_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ TA_GFX_V9__GFX_TCP_INDEX_END = TA_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ /* TD*/
+ TA_GFX_V9__GFX_TD_INDEX_START,
+ TA_GFX_V9__GFX_TD_SS_FIFO_LO = TA_GFX_V9__GFX_TD_INDEX_START,
+ TA_GFX_V9__GFX_TD_SS_FIFO_HI,
+ TA_GFX_V9__GFX_TD_CS_FIFO,
+ TA_GFX_V9__GFX_TD_INDEX_END = TA_GFX_V9__GFX_TD_CS_FIFO,
+ /* EA (3 sub-ranges)*/
+ TA_GFX_V9__GFX_EA_INDEX_START,
+ /* EA range 0*/
+ TA_GFX_V9__GFX_EA_INDEX0_START = TA_GFX_V9__GFX_EA_INDEX_START,
+ TA_GFX_V9__GFX_EA_DRAMRD_CMDMEM = TA_GFX_V9__GFX_EA_INDEX0_START,
+ TA_GFX_V9__GFX_EA_DRAMWR_CMDMEM,
+ TA_GFX_V9__GFX_EA_DRAMWR_DATAMEM,
+ TA_GFX_V9__GFX_EA_RRET_TAGMEM,
+ TA_GFX_V9__GFX_EA_WRET_TAGMEM,
+ TA_GFX_V9__GFX_EA_GMIRD_CMDMEM,
+ TA_GFX_V9__GFX_EA_GMIWR_CMDMEM,
+ TA_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ TA_GFX_V9__GFX_EA_INDEX0_END = TA_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ /* EA range 1*/
+ TA_GFX_V9__GFX_EA_INDEX1_START,
+ TA_GFX_V9__GFX_EA_DRAMRD_PAGEMEM = TA_GFX_V9__GFX_EA_INDEX1_START,
+ TA_GFX_V9__GFX_EA_DRAMWR_PAGEMEM,
+ TA_GFX_V9__GFX_EA_IORD_CMDMEM,
+ TA_GFX_V9__GFX_EA_IOWR_CMDMEM,
+ TA_GFX_V9__GFX_EA_IOWR_DATAMEM,
+ TA_GFX_V9__GFX_EA_GMIRD_PAGEMEM,
+ TA_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ TA_GFX_V9__GFX_EA_INDEX1_END = TA_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ /* EA range 2*/
+ TA_GFX_V9__GFX_EA_INDEX2_START,
+ TA_GFX_V9__GFX_EA_MAM_D0MEM = TA_GFX_V9__GFX_EA_INDEX2_START,
+ TA_GFX_V9__GFX_EA_MAM_D1MEM,
+ TA_GFX_V9__GFX_EA_MAM_D2MEM,
+ TA_GFX_V9__GFX_EA_MAM_D3MEM,
+ TA_GFX_V9__GFX_EA_INDEX2_END = TA_GFX_V9__GFX_EA_MAM_D3MEM,
+ TA_GFX_V9__GFX_EA_INDEX_END = TA_GFX_V9__GFX_EA_INDEX2_END,
+ /* UTC VM L2 bank*/
+ TA_GFX_V9__UTC_VML2_BANK_CACHE,
+ /* UTC VM walker*/
+ TA_GFX_V9__UTC_VML2_WALKER,
+ /* UTC ATC L2 2MB cache*/
+ TA_GFX_V9__UTC_ATCL2_CACHE_2M_BANK,
+ /* UTC ATC L2 4KB cache*/
+ TA_GFX_V9__UTC_ATCL2_CACHE_4K_BANK,
+ TA_GFX_V9__GFX_MAX
+};
+
+struct ras_gfx_subblock_t {
+ unsigned char *name;
+ int ta_subblock;
+ int hw_supported_error_type;
+ int sw_supported_error_type;
+};
+
+#define RAS_GFX_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \
+ [RAS_GFX_V9__##subblock] = { \
+ #subblock, \
+ TA_GFX_V9__##subblock, \
+ ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \
+ (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)), \
+ }
+
+const struct ras_gfx_subblock_t ras_gfx_v9_0_subblocks[] = {
+ RAS_GFX_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
+ 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
+ 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
+};
+
+static int gfx_v9_0_get_ta_subblock(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock)
+{
+ const struct ras_gfx_subblock_t *gfx_subblock;
+
+ if (subblock >= ARRAY_SIZE(ras_gfx_v9_0_subblocks))
+ return -EINVAL;
+
+ gfx_subblock = &ras_gfx_v9_0_subblocks[subblock];
+ if (!gfx_subblock->name)
+ return -EPERM;
+
+ if (!(gfx_subblock->hw_supported_error_type & error_type)) {
+ RAS_DEV_ERR(ras_core->dev, "GFX Subblock %s, hardware do not support type 0x%x\n",
+ gfx_subblock->name, error_type);
+ return -EPERM;
+ }
+
+ if (!(gfx_subblock->sw_supported_error_type & error_type)) {
+ RAS_DEV_ERR(ras_core->dev, "GFX Subblock %s, driver do not support type 0x%x\n",
+ gfx_subblock->name, error_type);
+ return -EPERM;
+ }
+
+ *ta_subblock = gfx_subblock->ta_subblock;
+
+ return 0;
+}
+
+const struct ras_gfx_ip_func gfx_ras_func_v9_0 = {
+ .get_ta_subblock = gfx_v9_0_get_ta_subblock,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h
new file mode 100644
index 000000000000..659b56619747
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_GFX_V9_0_H__
+#define __RAS_GFX_V9_0_H__
+
+enum ras_gfx_v9_subblock {
+ /* CPC */
+ RAS_GFX_V9__GFX_CPC_INDEX_START = 0,
+ RAS_GFX_V9__GFX_CPC_SCRATCH =
+ RAS_GFX_V9__GFX_CPC_INDEX_START,
+ RAS_GFX_V9__GFX_CPC_UCODE,
+ RAS_GFX_V9__GFX_DC_STATE_ME1,
+ RAS_GFX_V9__GFX_DC_CSINVOC_ME1,
+ RAS_GFX_V9__GFX_DC_RESTORE_ME1,
+ RAS_GFX_V9__GFX_DC_STATE_ME2,
+ RAS_GFX_V9__GFX_DC_CSINVOC_ME2,
+ RAS_GFX_V9__GFX_DC_RESTORE_ME2,
+ RAS_GFX_V9__GFX_CPC_INDEX_END =
+ RAS_GFX_V9__GFX_DC_RESTORE_ME2,
+ /* CPF */
+ RAS_GFX_V9__GFX_CPF_INDEX_START,
+ RAS_GFX_V9__GFX_CPF_ROQ_ME2 =
+ RAS_GFX_V9__GFX_CPF_INDEX_START,
+ RAS_GFX_V9__GFX_CPF_ROQ_ME1,
+ RAS_GFX_V9__GFX_CPF_TAG,
+ RAS_GFX_V9__GFX_CPF_INDEX_END = RAS_GFX_V9__GFX_CPF_TAG,
+ /* CPG */
+ RAS_GFX_V9__GFX_CPG_INDEX_START,
+ RAS_GFX_V9__GFX_CPG_DMA_ROQ =
+ RAS_GFX_V9__GFX_CPG_INDEX_START,
+ RAS_GFX_V9__GFX_CPG_DMA_TAG,
+ RAS_GFX_V9__GFX_CPG_TAG,
+ RAS_GFX_V9__GFX_CPG_INDEX_END = RAS_GFX_V9__GFX_CPG_TAG,
+ /* GDS */
+ RAS_GFX_V9__GFX_GDS_INDEX_START,
+ RAS_GFX_V9__GFX_GDS_MEM = RAS_GFX_V9__GFX_GDS_INDEX_START,
+ RAS_GFX_V9__GFX_GDS_INPUT_QUEUE,
+ RAS_GFX_V9__GFX_GDS_OA_PHY_CMD_RAM_MEM,
+ RAS_GFX_V9__GFX_GDS_OA_PHY_DATA_RAM_MEM,
+ RAS_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ RAS_GFX_V9__GFX_GDS_INDEX_END =
+ RAS_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ /* SPI */
+ RAS_GFX_V9__GFX_SPI_SR_MEM,
+ /* SQ */
+ RAS_GFX_V9__GFX_SQ_INDEX_START,
+ RAS_GFX_V9__GFX_SQ_SGPR = RAS_GFX_V9__GFX_SQ_INDEX_START,
+ RAS_GFX_V9__GFX_SQ_LDS_D,
+ RAS_GFX_V9__GFX_SQ_LDS_I,
+ RAS_GFX_V9__GFX_SQ_VGPR,
+ RAS_GFX_V9__GFX_SQ_INDEX_END = RAS_GFX_V9__GFX_SQ_VGPR,
+ /* SQC (3 ranges) */
+ RAS_GFX_V9__GFX_SQC_INDEX_START,
+ /* SQC range 0 */
+ RAS_GFX_V9__GFX_SQC_INDEX0_START =
+ RAS_GFX_V9__GFX_SQC_INDEX_START,
+ RAS_GFX_V9__GFX_SQC_INST_UTCL1_LFIFO =
+ RAS_GFX_V9__GFX_SQC_INDEX0_START,
+ RAS_GFX_V9__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
+ RAS_GFX_V9__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
+ RAS_GFX_V9__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
+ RAS_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ RAS_GFX_V9__GFX_SQC_INDEX0_END =
+ RAS_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ /* SQC range 1 */
+ RAS_GFX_V9__GFX_SQC_INDEX1_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_TAG_RAM =
+ RAS_GFX_V9__GFX_SQC_INDEX1_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_TAG_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_HIT_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_INDEX1_END =
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ /* SQC range 2 */
+ RAS_GFX_V9__GFX_SQC_INDEX2_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_TAG_RAM =
+ RAS_GFX_V9__GFX_SQC_INDEX2_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_TAG_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_HIT_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_INDEX2_END =
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_INDEX_END =
+ RAS_GFX_V9__GFX_SQC_INDEX2_END,
+ /* TA */
+ RAS_GFX_V9__GFX_TA_INDEX_START,
+ RAS_GFX_V9__GFX_TA_FS_DFIFO =
+ RAS_GFX_V9__GFX_TA_INDEX_START,
+ RAS_GFX_V9__GFX_TA_FS_AFIFO,
+ RAS_GFX_V9__GFX_TA_FL_LFIFO,
+ RAS_GFX_V9__GFX_TA_FX_LFIFO,
+ RAS_GFX_V9__GFX_TA_FS_CFIFO,
+ RAS_GFX_V9__GFX_TA_INDEX_END = RAS_GFX_V9__GFX_TA_FS_CFIFO,
+ /* TCA */
+ RAS_GFX_V9__GFX_TCA_INDEX_START,
+ RAS_GFX_V9__GFX_TCA_HOLE_FIFO =
+ RAS_GFX_V9__GFX_TCA_INDEX_START,
+ RAS_GFX_V9__GFX_TCA_REQ_FIFO,
+ RAS_GFX_V9__GFX_TCA_INDEX_END =
+ RAS_GFX_V9__GFX_TCA_REQ_FIFO,
+ /* TCC (5 sub-ranges) */
+ RAS_GFX_V9__GFX_TCC_INDEX_START,
+ /* TCC range 0 */
+ RAS_GFX_V9__GFX_TCC_INDEX0_START =
+ RAS_GFX_V9__GFX_TCC_INDEX_START,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA =
+ RAS_GFX_V9__GFX_TCC_INDEX0_START,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_0_1,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_0,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_1,
+ RAS_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_0,
+ RAS_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_1,
+ RAS_GFX_V9__GFX_TCC_HIGH_RATE_TAG,
+ RAS_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ RAS_GFX_V9__GFX_TCC_INDEX0_END =
+ RAS_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ /* TCC range 1 */
+ RAS_GFX_V9__GFX_TCC_INDEX1_START,
+ RAS_GFX_V9__GFX_TCC_IN_USE_DEC =
+ RAS_GFX_V9__GFX_TCC_INDEX1_START,
+ RAS_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ RAS_GFX_V9__GFX_TCC_INDEX1_END =
+ RAS_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ /* TCC range 2 */
+ RAS_GFX_V9__GFX_TCC_INDEX2_START,
+ RAS_GFX_V9__GFX_TCC_RETURN_DATA =
+ RAS_GFX_V9__GFX_TCC_INDEX2_START,
+ RAS_GFX_V9__GFX_TCC_RETURN_CONTROL,
+ RAS_GFX_V9__GFX_TCC_UC_ATOMIC_FIFO,
+ RAS_GFX_V9__GFX_TCC_WRITE_RETURN,
+ RAS_GFX_V9__GFX_TCC_WRITE_CACHE_READ,
+ RAS_GFX_V9__GFX_TCC_SRC_FIFO,
+ RAS_GFX_V9__GFX_TCC_SRC_FIFO_NEXT_RAM,
+ RAS_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ RAS_GFX_V9__GFX_TCC_INDEX2_END =
+ RAS_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ /* TCC range 3 */
+ RAS_GFX_V9__GFX_TCC_INDEX3_START,
+ RAS_GFX_V9__GFX_TCC_LATENCY_FIFO =
+ RAS_GFX_V9__GFX_TCC_INDEX3_START,
+ RAS_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ RAS_GFX_V9__GFX_TCC_INDEX3_END =
+ RAS_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ /* TCC range 4 */
+ RAS_GFX_V9__GFX_TCC_INDEX4_START,
+ RAS_GFX_V9__GFX_TCC_WRRET_TAG_WRITE_RETURN =
+ RAS_GFX_V9__GFX_TCC_INDEX4_START,
+ RAS_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ RAS_GFX_V9__GFX_TCC_INDEX4_END =
+ RAS_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ RAS_GFX_V9__GFX_TCC_INDEX_END =
+ RAS_GFX_V9__GFX_TCC_INDEX4_END,
+ /* TCI */
+ RAS_GFX_V9__GFX_TCI_WRITE_RAM,
+ /* TCP */
+ RAS_GFX_V9__GFX_TCP_INDEX_START,
+ RAS_GFX_V9__GFX_TCP_CACHE_RAM =
+ RAS_GFX_V9__GFX_TCP_INDEX_START,
+ RAS_GFX_V9__GFX_TCP_LFIFO_RAM,
+ RAS_GFX_V9__GFX_TCP_CMD_FIFO,
+ RAS_GFX_V9__GFX_TCP_VM_FIFO,
+ RAS_GFX_V9__GFX_TCP_DB_RAM,
+ RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO0,
+ RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ RAS_GFX_V9__GFX_TCP_INDEX_END =
+ RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ /* TD */
+ RAS_GFX_V9__GFX_TD_INDEX_START,
+ RAS_GFX_V9__GFX_TD_SS_FIFO_LO =
+ RAS_GFX_V9__GFX_TD_INDEX_START,
+ RAS_GFX_V9__GFX_TD_SS_FIFO_HI,
+ RAS_GFX_V9__GFX_TD_CS_FIFO,
+ RAS_GFX_V9__GFX_TD_INDEX_END = RAS_GFX_V9__GFX_TD_CS_FIFO,
+ /* EA (3 sub-ranges) */
+ RAS_GFX_V9__GFX_EA_INDEX_START,
+ /* EA range 0 */
+ RAS_GFX_V9__GFX_EA_INDEX0_START =
+ RAS_GFX_V9__GFX_EA_INDEX_START,
+ RAS_GFX_V9__GFX_EA_DRAMRD_CMDMEM =
+ RAS_GFX_V9__GFX_EA_INDEX0_START,
+ RAS_GFX_V9__GFX_EA_DRAMWR_CMDMEM,
+ RAS_GFX_V9__GFX_EA_DRAMWR_DATAMEM,
+ RAS_GFX_V9__GFX_EA_RRET_TAGMEM,
+ RAS_GFX_V9__GFX_EA_WRET_TAGMEM,
+ RAS_GFX_V9__GFX_EA_GMIRD_CMDMEM,
+ RAS_GFX_V9__GFX_EA_GMIWR_CMDMEM,
+ RAS_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ RAS_GFX_V9__GFX_EA_INDEX0_END =
+ RAS_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ /* EA range 1 */
+ RAS_GFX_V9__GFX_EA_INDEX1_START,
+ RAS_GFX_V9__GFX_EA_DRAMRD_PAGEMEM =
+ RAS_GFX_V9__GFX_EA_INDEX1_START,
+ RAS_GFX_V9__GFX_EA_DRAMWR_PAGEMEM,
+ RAS_GFX_V9__GFX_EA_IORD_CMDMEM,
+ RAS_GFX_V9__GFX_EA_IOWR_CMDMEM,
+ RAS_GFX_V9__GFX_EA_IOWR_DATAMEM,
+ RAS_GFX_V9__GFX_EA_GMIRD_PAGEMEM,
+ RAS_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ RAS_GFX_V9__GFX_EA_INDEX1_END =
+ RAS_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ /* EA range 2 */
+ RAS_GFX_V9__GFX_EA_INDEX2_START,
+ RAS_GFX_V9__GFX_EA_MAM_D0MEM =
+ RAS_GFX_V9__GFX_EA_INDEX2_START,
+ RAS_GFX_V9__GFX_EA_MAM_D1MEM,
+ RAS_GFX_V9__GFX_EA_MAM_D2MEM,
+ RAS_GFX_V9__GFX_EA_MAM_D3MEM,
+ RAS_GFX_V9__GFX_EA_INDEX2_END =
+ RAS_GFX_V9__GFX_EA_MAM_D3MEM,
+ RAS_GFX_V9__GFX_EA_INDEX_END =
+ RAS_GFX_V9__GFX_EA_INDEX2_END,
+ /* UTC VM L2 bank */
+ RAS_GFX_V9__UTC_VML2_BANK_CACHE,
+ /* UTC VM walker */
+ RAS_GFX_V9__UTC_VML2_WALKER,
+ /* UTC ATC L2 2MB cache */
+ RAS_GFX_V9__UTC_ATCL2_CACHE_2M_BANK,
+ /* UTC ATC L2 4KB cache */
+ RAS_GFX_V9__UTC_ATCL2_CACHE_4K_BANK,
+ RAS_GFX_V9__GFX_MAX
+};
+
+extern const struct ras_gfx_ip_func gfx_ras_func_v9_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c
new file mode 100644
index 000000000000..0a838fdcb2f6
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_core_status.h"
+#include "ras_log_ring.h"
+
+#define RAS_LOG_MAX_QUERY_SIZE 0xC000
+#define RAS_LOG_MEM_TEMP_SIZE 0x200
+#define RAS_LOG_MEMPOOL_SIZE \
+ (RAS_LOG_MAX_QUERY_SIZE + RAS_LOG_MEM_TEMP_SIZE)
+
+#define BATCH_IDX_TO_TREE_IDX(batch_idx, sn) (((batch_idx) << 8) | (sn))
+
+static const uint64_t ras_rma_aca_reg[ACA_REG_MAX_COUNT] = {
+ [ACA_REG_IDX__CTL] = 0x1,
+ [ACA_REG_IDX__STATUS] = 0xB000000000000137,
+ [ACA_REG_IDX__ADDR] = 0x0,
+ [ACA_REG_IDX__MISC0] = 0x0,
+ [ACA_REG_IDX__CONFG] = 0x1ff00000002,
+ [ACA_REG_IDX__IPID] = 0x9600000000,
+ [ACA_REG_IDX__SYND] = 0x0,
+};
+
+static uint64_t ras_log_ring_get_logged_ecc_count(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ uint64_t count = 0;
+
+ if (log_ring->logged_ecc_count < 0) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Error: the logged ras count should not less than 0!\n");
+ count = 0;
+ } else {
+ count = log_ring->logged_ecc_count;
+ }
+
+ if (count > RAS_LOG_MEMPOOL_SIZE)
+ RAS_DEV_WARN(ras_core->dev,
+ "Error: the logged ras count is out of range!\n");
+
+ return count;
+}
+
+static int ras_log_ring_add_data(struct ras_core_context *ras_core,
+ struct ras_log_info *log, struct ras_log_batch_tag *batch_tag)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ if (batch_tag && (batch_tag->sub_seqno >= MAX_RECORD_PER_BATCH)) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Invalid batch sub seqno:%d, batch:0x%llx\n",
+ batch_tag->sub_seqno, batch_tag->batch_id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ if (batch_tag) {
+ log->seqno =
+ BATCH_IDX_TO_TREE_IDX(batch_tag->batch_id, batch_tag->sub_seqno);
+ batch_tag->sub_seqno++;
+ } else {
+ log->seqno = BATCH_IDX_TO_TREE_IDX(log_ring->mono_upward_batch_id, 0);
+ log_ring->mono_upward_batch_id++;
+ }
+ ret = radix_tree_insert(&log_ring->ras_log_root, log->seqno, log);
+ if (!ret)
+ log_ring->logged_ecc_count++;
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to add ras log! seqno:0x%llx, ret:%d\n",
+ log->seqno, ret);
+ mempool_free(log, log_ring->ras_log_mempool);
+ }
+
+ return ret;
+}
+
+static int ras_log_ring_delete_data(struct ras_core_context *ras_core, uint32_t count)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ unsigned long flags = 0;
+ uint32_t i = 0, j = 0;
+ uint64_t batch_id, idx;
+ void *data;
+ int ret = -ENODATA;
+
+ if (count > ras_log_ring_get_logged_ecc_count(ras_core))
+ return -EINVAL;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ batch_id = log_ring->last_del_batch_id;
+ while (batch_id < log_ring->mono_upward_batch_id) {
+ for (j = 0; j < MAX_RECORD_PER_BATCH; j++) {
+ idx = BATCH_IDX_TO_TREE_IDX(batch_id, j);
+ data = radix_tree_delete(&log_ring->ras_log_root, idx);
+ if (data) {
+ mempool_free(data, log_ring->ras_log_mempool);
+ log_ring->logged_ecc_count--;
+ i++;
+ }
+ }
+ batch_id = ++log_ring->last_del_batch_id;
+ if (i >= count) {
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ return ret;
+}
+
+static void ras_log_ring_clear_log_tree(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ uint64_t batch_id, idx;
+ unsigned long flags = 0;
+ void *data;
+ int j;
+
+ if ((log_ring->mono_upward_batch_id <= log_ring->last_del_batch_id) &&
+ !log_ring->logged_ecc_count)
+ return;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ batch_id = log_ring->last_del_batch_id;
+ while (batch_id < log_ring->mono_upward_batch_id) {
+ for (j = 0; j < MAX_RECORD_PER_BATCH; j++) {
+ idx = BATCH_IDX_TO_TREE_IDX(batch_id, j);
+ data = radix_tree_delete(&log_ring->ras_log_root, idx);
+ if (data) {
+ mempool_free(data, log_ring->ras_log_mempool);
+ log_ring->logged_ecc_count--;
+ }
+ }
+ batch_id++;
+ }
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+}
+
+int ras_log_ring_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+
+ memset(log_ring, 0, sizeof(*log_ring));
+
+ log_ring->ras_log_mempool = mempool_create_kmalloc_pool(
+ RAS_LOG_MEMPOOL_SIZE, sizeof(struct ras_log_info));
+ if (!log_ring->ras_log_mempool)
+ return -ENOMEM;
+
+ INIT_RADIX_TREE(&log_ring->ras_log_root, GFP_KERNEL);
+
+ spin_lock_init(&log_ring->spin_lock);
+
+ return 0;
+}
+
+int ras_log_ring_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+
+ ras_log_ring_clear_log_tree(ras_core);
+ log_ring->logged_ecc_count = 0;
+ log_ring->last_del_batch_id = 0;
+ log_ring->mono_upward_batch_id = 0;
+
+ mempool_destroy(log_ring->ras_log_mempool);
+
+ return 0;
+}
+
+struct ras_log_batch_tag *ras_log_ring_create_batch_tag(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ struct ras_log_batch_tag *batch_tag;
+ unsigned long flags = 0;
+
+ batch_tag = kzalloc(sizeof(*batch_tag), GFP_KERNEL);
+ if (!batch_tag)
+ return NULL;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ batch_tag->batch_id = log_ring->mono_upward_batch_id;
+ log_ring->mono_upward_batch_id++;
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ batch_tag->sub_seqno = 0;
+ batch_tag->timestamp = ras_core_get_utc_second_timestamp(ras_core);
+ return batch_tag;
+}
+
+void ras_log_ring_destroy_batch_tag(struct ras_core_context *ras_core,
+ struct ras_log_batch_tag *batch_tag)
+{
+ kfree(batch_tag);
+}
+
+void ras_log_ring_add_log_event(struct ras_core_context *ras_core,
+ enum ras_log_event event, void *data, struct ras_log_batch_tag *batch_tag)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ struct device_system_info dev_info = {0};
+ struct ras_log_info *log;
+ uint64_t socket_id;
+ void *obj;
+
+ obj = mempool_alloc_preallocated(log_ring->ras_log_mempool);
+ if (!obj ||
+ (ras_log_ring_get_logged_ecc_count(ras_core) >= RAS_LOG_MEMPOOL_SIZE)) {
+ ras_log_ring_delete_data(ras_core, RAS_LOG_MEM_TEMP_SIZE);
+ if (!obj)
+ obj = mempool_alloc_preallocated(log_ring->ras_log_mempool);
+ }
+
+ if (!obj) {
+ RAS_DEV_ERR(ras_core->dev, "ERROR: Failed to alloc ras log buffer!\n");
+ return;
+ }
+
+ log = (struct ras_log_info *)obj;
+
+ memset(log, 0, sizeof(*log));
+ log->timestamp =
+ batch_tag ? batch_tag->timestamp : ras_core_get_utc_second_timestamp(ras_core);
+ log->event = event;
+
+ if (data)
+ memcpy(&log->aca_reg, data, sizeof(log->aca_reg));
+
+ if (event == RAS_LOG_EVENT_RMA) {
+ memcpy(&log->aca_reg, ras_rma_aca_reg, sizeof(log->aca_reg));
+ ras_core_get_device_system_info(ras_core, &dev_info);
+ socket_id = dev_info.socket_id;
+ log->aca_reg.regs[ACA_REG_IDX__IPID] |= ((socket_id / 4) & 0x01);
+ log->aca_reg.regs[ACA_REG_IDX__IPID] |= (((socket_id % 4) & 0x3) << 44);
+ }
+
+ ras_log_ring_add_data(ras_core, log, batch_tag);
+}
+
+static struct ras_log_info *ras_log_ring_lookup_data(struct ras_core_context *ras_core,
+ uint64_t idx)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ unsigned long flags = 0;
+ void *data;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ data = radix_tree_lookup(&log_ring->ras_log_root, idx);
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ return (struct ras_log_info *)data;
+}
+
+int ras_log_ring_get_batch_records(struct ras_core_context *ras_core, uint64_t batch_id,
+ struct ras_log_info **log_arr, uint32_t arr_num)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ uint32_t i, idx, count = 0;
+ void *data;
+
+ if ((batch_id >= log_ring->mono_upward_batch_id) ||
+ (batch_id < log_ring->last_del_batch_id))
+ return -EINVAL;
+
+ for (i = 0; i < MAX_RECORD_PER_BATCH; i++) {
+ idx = BATCH_IDX_TO_TREE_IDX(batch_id, i);
+ data = ras_log_ring_lookup_data(ras_core, idx);
+ if (data) {
+ log_arr[count++] = data;
+ if (count >= arr_num)
+ break;
+ }
+ }
+
+ return count;
+}
+
+int ras_log_ring_get_batch_overview(struct ras_core_context *ras_core,
+ struct ras_log_batch_overview *overview)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+
+ overview->logged_batch_count =
+ log_ring->mono_upward_batch_id - log_ring->last_del_batch_id;
+ overview->last_batch_id = log_ring->mono_upward_batch_id;
+ overview->first_batch_id = log_ring->last_del_batch_id;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h
new file mode 100644
index 000000000000..0ff6cc35678d
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_LOG_RING_H__
+#define __RAS_LOG_RING_H__
+#include "ras_aca.h"
+
+#define MAX_RECORD_PER_BATCH 32
+
+#define RAS_LOG_SEQNO_TO_BATCH_IDX(seqno) ((seqno) >> 8)
+
+enum ras_log_event {
+ RAS_LOG_EVENT_NONE,
+ RAS_LOG_EVENT_UE,
+ RAS_LOG_EVENT_DE,
+ RAS_LOG_EVENT_CE,
+ RAS_LOG_EVENT_POISON_CREATION,
+ RAS_LOG_EVENT_POISON_CONSUMPTION,
+ RAS_LOG_EVENT_RMA,
+ RAS_LOG_EVENT_COUNT_MAX,
+};
+
+struct ras_aca_reg {
+ uint64_t regs[ACA_REG_MAX_COUNT];
+};
+
+struct ras_log_info {
+ uint64_t seqno;
+ uint64_t timestamp;
+ enum ras_log_event event;
+ union {
+ struct ras_aca_reg aca_reg;
+ };
+};
+
+struct ras_log_batch_tag {
+ uint64_t batch_id;
+ uint64_t timestamp;
+ uint32_t sub_seqno;
+};
+
+struct ras_log_ring {
+ void *ras_log_mempool;
+ struct radix_tree_root ras_log_root;
+ spinlock_t spin_lock;
+ uint64_t mono_upward_batch_id;
+ uint64_t last_del_batch_id;
+ int logged_ecc_count;
+};
+
+struct ras_log_batch_overview {
+ uint64_t first_batch_id;
+ uint64_t last_batch_id;
+ uint32_t logged_batch_count;
+};
+
+struct ras_core_context;
+
+int ras_log_ring_sw_init(struct ras_core_context *ras_core);
+int ras_log_ring_sw_fini(struct ras_core_context *ras_core);
+
+struct ras_log_batch_tag *ras_log_ring_create_batch_tag(struct ras_core_context *ras_core);
+void ras_log_ring_destroy_batch_tag(struct ras_core_context *ras_core,
+ struct ras_log_batch_tag *tag);
+void ras_log_ring_add_log_event(struct ras_core_context *ras_core,
+ enum ras_log_event event, void *data, struct ras_log_batch_tag *tag);
+
+int ras_log_ring_get_batch_records(struct ras_core_context *ras_core, uint64_t batch_idx,
+ struct ras_log_info **log_arr, uint32_t arr_num);
+
+int ras_log_ring_get_batch_overview(struct ras_core_context *ras_core,
+ struct ras_log_batch_overview *overview);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c
new file mode 100644
index 000000000000..f3321df85021
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_mp1.h"
+#include "ras_mp1_v13_0.h"
+
+static const struct ras_mp1_ip_func *ras_mp1_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
+ return &mp1_ras_func_v13_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "MP1 ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_mp1_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+
+ return mp1->ip_func->get_valid_bank_count(ras_core, type, count);
+}
+
+int ras_mp1_dump_bank(struct ras_core_context *ras_core,
+ u32 type, u32 idx, u32 reg_idx, u64 *val)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+
+ return mp1->ip_func->dump_valid_bank(ras_core, type, idx, reg_idx, val);
+}
+
+int ras_mp1_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+
+ mp1->mp1_ip_version = ras_core->config->mp1_ip_version;
+ mp1->sys_func = ras_core->config->mp1_cfg.mp1_sys_fn;
+ if (!mp1->sys_func) {
+ RAS_DEV_ERR(ras_core->dev, "RAS mp1 sys function not configured!\n");
+ return -EINVAL;
+ }
+
+ mp1->ip_func = ras_mp1_get_ip_funcs(ras_core, mp1->mp1_ip_version);
+
+ return mp1->ip_func ? RAS_CORE_OK : -EINVAL;
+}
+
+int ras_mp1_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h
new file mode 100644
index 000000000000..de1d08286f41
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_MP1_H__
+#define __RAS_MP1_H__
+#include "ras.h"
+
+enum ras_err_type;
+struct ras_mp1_ip_func {
+ int (*get_valid_bank_count)(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count);
+ int (*dump_valid_bank)(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 idx, u32 reg_idx, u64 *val);
+};
+
+struct ras_mp1 {
+ uint32_t mp1_ip_version;
+ const struct ras_mp1_ip_func *ip_func;
+ const struct ras_mp1_sys_func *sys_func;
+};
+
+int ras_mp1_hw_init(struct ras_core_context *ras_core);
+int ras_mp1_hw_fini(struct ras_core_context *ras_core);
+
+int ras_mp1_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count);
+
+int ras_mp1_dump_bank(struct ras_core_context *ras_core,
+ u32 ecc_type, u32 idx, u32 reg_idx, u64 *val);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c
new file mode 100644
index 000000000000..310d39fc816b
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_mp1.h"
+#include "ras_core_status.h"
+#include "ras_mp1_v13_0.h"
+
+#define RAS_MP1_MSG_QueryValidMcaCount 0x36
+#define RAS_MP1_MSG_McaBankDumpDW 0x37
+#define RAS_MP1_MSG_ClearMcaOnRead 0x39
+#define RAS_MP1_MSG_QueryValidMcaCeCount 0x3A
+#define RAS_MP1_MSG_McaBankCeDumpDW 0x3B
+
+#define MAX_UE_BANKS_PER_QUERY 12
+#define MAX_CE_BANKS_PER_QUERY 12
+
+static int mp1_v13_0_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+ const struct ras_mp1_sys_func *sys_func = mp1->sys_func;
+ uint32_t bank_count = 0;
+ u32 msg;
+ int ret;
+
+ if (!count)
+ return -EINVAL;
+
+ if (!sys_func || !sys_func->mp1_get_valid_bank_count)
+ return -RAS_CORE_NOT_SUPPORTED;
+
+ switch (type) {
+ case RAS_ERR_TYPE__UE:
+ msg = RAS_MP1_MSG_QueryValidMcaCount;
+ break;
+ case RAS_ERR_TYPE__CE:
+ case RAS_ERR_TYPE__DE:
+ msg = RAS_MP1_MSG_QueryValidMcaCeCount;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = sys_func->mp1_get_valid_bank_count(ras_core, msg, &bank_count);
+ if (!ret) {
+ if (((type == RAS_ERR_TYPE__UE) && (bank_count >= MAX_UE_BANKS_PER_QUERY)) ||
+ ((type == RAS_ERR_TYPE__CE) && (bank_count >= MAX_CE_BANKS_PER_QUERY)))
+ return -EINVAL;
+
+ *count = bank_count;
+ }
+
+ return ret;
+}
+
+static int mp1_v13_0_dump_bank(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 idx, u32 reg_idx, u64 *val)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+ const struct ras_mp1_sys_func *sys_func = mp1->sys_func;
+ u32 msg;
+
+ if (!sys_func || !sys_func->mp1_dump_valid_bank)
+ return -RAS_CORE_NOT_SUPPORTED;
+
+ switch (type) {
+ case RAS_ERR_TYPE__UE:
+ msg = RAS_MP1_MSG_McaBankDumpDW;
+ break;
+ case RAS_ERR_TYPE__CE:
+ case RAS_ERR_TYPE__DE:
+ msg = RAS_MP1_MSG_McaBankCeDumpDW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sys_func->mp1_dump_valid_bank(ras_core, msg, idx, reg_idx, val);
+}
+
+const struct ras_mp1_ip_func mp1_ras_func_v13_0 = {
+ .get_valid_bank_count = mp1_v13_0_get_bank_count,
+ .dump_valid_bank = mp1_v13_0_dump_bank,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h
new file mode 100644
index 000000000000..2edfdb5f6a75
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_MP1_V13_0_H__
+#define __RAS_MP1_V13_0_H__
+#include "ras_mp1.h"
+
+extern const struct ras_mp1_ip_func mp1_ras_func_v13_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c
new file mode 100644
index 000000000000..bfddd104d548
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_nbio.h"
+#include "ras_nbio_v7_9.h"
+
+static const struct ras_nbio_ip_func *ras_nbio_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(7, 9, 0):
+ case IP_VERSION(7, 9, 1):
+ return &ras_nbio_v7_9;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "NBIO ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_nbio_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_nbio *nbio = &ras_core->ras_nbio;
+
+ nbio->nbio_ip_version = ras_core->config->nbio_ip_version;
+ nbio->sys_func = ras_core->config->nbio_cfg.nbio_sys_fn;
+ if (!nbio->sys_func) {
+ RAS_DEV_ERR(ras_core->dev, "RAS nbio sys function not configured!\n");
+ return -EINVAL;
+ }
+
+ nbio->ip_func = ras_nbio_get_ip_funcs(ras_core, nbio->nbio_ip_version);
+ if (!nbio->ip_func)
+ return -EINVAL;
+
+ if (nbio->sys_func) {
+ if (nbio->sys_func->set_ras_controller_irq_state)
+ nbio->sys_func->set_ras_controller_irq_state(ras_core, true);
+ if (nbio->sys_func->set_ras_err_event_athub_irq_state)
+ nbio->sys_func->set_ras_err_event_athub_irq_state(ras_core, true);
+ }
+
+ return 0;
+}
+
+int ras_nbio_hw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_nbio *nbio = &ras_core->ras_nbio;
+
+ if (nbio->sys_func) {
+ if (nbio->sys_func->set_ras_controller_irq_state)
+ nbio->sys_func->set_ras_controller_irq_state(ras_core, false);
+ if (nbio->sys_func->set_ras_err_event_athub_irq_state)
+ nbio->sys_func->set_ras_err_event_athub_irq_state(ras_core, false);
+ }
+
+ return 0;
+}
+
+bool ras_nbio_handle_irq_error(struct ras_core_context *ras_core, void *data)
+{
+ struct ras_nbio *nbio = &ras_core->ras_nbio;
+
+ if (nbio->ip_func) {
+ if (nbio->ip_func->handle_ras_controller_intr_no_bifring)
+ nbio->ip_func->handle_ras_controller_intr_no_bifring(ras_core);
+ if (nbio->ip_func->handle_ras_err_event_athub_intr_no_bifring)
+ nbio->ip_func->handle_ras_err_event_athub_intr_no_bifring(ras_core);
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h
new file mode 100644
index 000000000000..0a1313e59a02
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_NBIO_H__
+#define __RAS_NBIO_H__
+#include "ras.h"
+
+struct ras_core_context;
+
+struct ras_nbio_ip_func {
+ int (*handle_ras_controller_intr_no_bifring)(struct ras_core_context *ras_core);
+ int (*handle_ras_err_event_athub_intr_no_bifring)(struct ras_core_context *ras_core);
+ uint32_t (*get_memory_partition_mode)(struct ras_core_context *ras_core);
+};
+
+struct ras_nbio {
+ uint32_t nbio_ip_version;
+ const struct ras_nbio_ip_func *ip_func;
+ const struct ras_nbio_sys_func *sys_func;
+};
+
+int ras_nbio_hw_init(struct ras_core_context *ras_core);
+int ras_nbio_hw_fini(struct ras_core_context *ras_core);
+bool ras_nbio_handle_irq_error(struct ras_core_context *ras_core, void *data);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c
new file mode 100644
index 000000000000..f17d708ec668
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_nbio_v7_9.h"
+
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR__SHIFT 0x12
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR_MASK 0x00040000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS__SHIFT 0x2
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS_MASK 0x00000004L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR__SHIFT 0x11
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR_MASK 0x00020000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS__SHIFT 0x1
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS_MASK 0x00000002L
+
+#define regBIF_BX0_BIF_DOORBELL_INT_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_DOORBELL_INT_CNTL 0x00fe
+
+#define regBIF_BX0_BIF_INTR_CNTL 0x0101
+#define regBIF_BX0_BIF_INTR_CNTL_BASE_IDX 2
+
+/* BIF_BX0_BIF_INTR_CNTL */
+#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0
+#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L
+
+#define regBIF_BX_PF0_PARTITION_MEM_STATUS 0x0164
+#define regBIF_BX_PF0_PARTITION_MEM_STATUS_BASE_IDX 2
+/* BIF_BX_PF0_PARTITION_MEM_STATUS */
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE__SHIFT 0x0
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE__SHIFT 0x4
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE_MASK 0x0000000FL
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE_MASK 0x00000FF0L
+
+
+static int nbio_v7_9_handle_ras_controller_intr_no_bifring(struct ras_core_context *ras_core)
+{
+ uint32_t bif_doorbell_intr_cntl = 0;
+
+ bif_doorbell_intr_cntl =
+ RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_CNTLR_INTERRUPT_CLEAR, 1);
+
+ RAS_DEV_WREG32_SOC15(ras_core->dev,
+ NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ /* TODO: handle ras controller interrupt */
+ }
+
+ return 0;
+}
+
+static int nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring(struct ras_core_context *ras_core)
+{
+ uint32_t bif_doorbell_intr_cntl = 0;
+ int ret = 0;
+
+ bif_doorbell_intr_cntl =
+ RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
+
+ RAS_DEV_WREG32_SOC15(ras_core->dev,
+ NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ ret = ras_core_handle_fatal_error(ras_core);
+ }
+
+ return ret;
+}
+
+static uint32_t nbio_v7_9_get_memory_partition_mode(struct ras_core_context *ras_core)
+{
+ uint32_t mem_status;
+ uint32_t mem_mode;
+
+ mem_status =
+ RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS);
+
+ /* Each bit represents a mode 1-8*/
+ mem_mode = REG_GET_FIELD(mem_status, BIF_BX_PF0_PARTITION_MEM_STATUS, NPS_MODE);
+
+ return ffs(mem_mode);
+}
+
+const struct ras_nbio_ip_func ras_nbio_v7_9 = {
+ .handle_ras_controller_intr_no_bifring =
+ nbio_v7_9_handle_ras_controller_intr_no_bifring,
+ .handle_ras_err_event_athub_intr_no_bifring =
+ nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring,
+ .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h
new file mode 100644
index 000000000000..8711c82a927f
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_NBIO_V7_9_H__
+#define __RAS_NBIO_V7_9_H__
+#include "ras_nbio.h"
+
+extern const struct ras_nbio_ip_func ras_nbio_v7_9;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_process.c b/drivers/gpu/drm/amd/ras/rascore/ras_process.c
new file mode 100644
index 000000000000..3267dcdb169c
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_process.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_process.h"
+
+#define RAS_EVENT_FIFO_SIZE (128 * sizeof(struct ras_event_req))
+
+#define RAS_POLLING_ECC_TIMEOUT 300
+
+static int ras_process_put_event(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ int ret;
+
+ ret = kfifo_in_spinlocked(&ras_proc->event_fifo,
+ req, sizeof(*req), &ras_proc->fifo_spinlock);
+ if (!ret) {
+ RAS_DEV_ERR(ras_core->dev, "Poison message fifo is full!\n");
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int ras_process_add_reset_gpu_event(struct ras_core_context *ras_core,
+ uint32_t reset_cause)
+{
+ struct ras_event_req req = {0};
+
+ req.reset = reset_cause;
+
+ return ras_process_put_event(ras_core, &req);
+}
+
+static int ras_process_get_event(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ return kfifo_out_spinlocked(&ras_proc->event_fifo,
+ req, sizeof(*req), &ras_proc->fifo_spinlock);
+}
+
+static void ras_process_clear_event_fifo(struct ras_core_context *ras_core)
+{
+ struct ras_event_req req;
+ int ret;
+
+ do {
+ ret = ras_process_get_event(ras_core, &req);
+ } while (ret);
+}
+
+#define AMDGPU_RAS_WAITING_DATA_READY 200
+static int ras_process_umc_event(struct ras_core_context *ras_core,
+ uint32_t event_count)
+{
+ struct ras_ecc_count ecc_data;
+ int ret = 0;
+ uint32_t timeout = 0;
+ uint32_t detected_de_count = 0;
+
+ do {
+ memset(&ecc_data, 0, sizeof(ecc_data));
+ ret = ras_core_update_ecc_info(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_core_query_block_ecc_data(ras_core, RAS_BLOCK_ID__UMC, &ecc_data);
+ if (ret)
+ return ret;
+
+ if (ecc_data.new_de_count) {
+ detected_de_count += ecc_data.new_de_count;
+ timeout = 0;
+ } else {
+ if (!timeout && event_count)
+ timeout = AMDGPU_RAS_WAITING_DATA_READY;
+
+ if (timeout) {
+ if (!--timeout)
+ break;
+
+ msleep(1);
+ }
+ }
+ } while (detected_de_count < event_count);
+
+ if (detected_de_count && ras_core_gpu_is_rma(ras_core))
+ ras_process_add_reset_gpu_event(ras_core, GPU_RESET_CAUSE_RMA);
+
+ return 0;
+}
+
+static int ras_process_non_umc_event(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ struct ras_event_req req;
+ uint32_t event_count = kfifo_len(&ras_proc->event_fifo);
+ uint32_t reset_flags = 0;
+ int ret = 0, i;
+
+ for (i = 0; i < event_count; i++) {
+ memset(&req, 0, sizeof(req));
+ ret = ras_process_get_event(ras_core, &req);
+ if (!ret)
+ continue;
+
+ ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__POISON_CONSUMPTION, &req);
+
+ reset_flags |= req.reset;
+
+ if (req.reset == GPU_RESET_CAUSE_RMA)
+ continue;
+
+ if (req.reset)
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} GPU reset for %s RAS poison consumption is issued!\n",
+ req.seqno, ras_core_get_ras_block_name(req.block));
+ else
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} %s RAS poison consumption is issued!\n",
+ req.seqno, ras_core_get_ras_block_name(req.block));
+ }
+
+ if (reset_flags) {
+ ret = ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__RESET_GPU, &reset_flags);
+ if (!ret && (reset_flags & GPU_RESET_CAUSE_RMA))
+ return -RAS_CORE_GPU_IN_MODE1_RESET;
+ }
+
+ return ret;
+}
+
+int ras_process_handle_ras_event(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ uint32_t umc_event_count;
+ int ret;
+
+ ret = ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__RAS_EVENT_PROC_BEGIN, NULL);
+ if (ret)
+ return ret;
+
+ ras_aca_clear_fatal_flag(ras_core);
+ ras_umc_log_pending_bad_bank(ras_core);
+
+ do {
+ umc_event_count = atomic_read(&ras_proc->umc_interrupt_count);
+ ret = ras_process_umc_event(ras_core, umc_event_count);
+ if (ret == -RAS_CORE_GPU_IN_MODE1_RESET)
+ break;
+
+ if (umc_event_count)
+ atomic_sub(umc_event_count, &ras_proc->umc_interrupt_count);
+ } while (atomic_read(&ras_proc->umc_interrupt_count));
+
+ if ((ret != -RAS_CORE_GPU_IN_MODE1_RESET) &&
+ (kfifo_len(&ras_proc->event_fifo)))
+ ret = ras_process_non_umc_event(ras_core);
+
+ if (ret == -RAS_CORE_GPU_IN_MODE1_RESET) {
+ /* Clear poison fifo */
+ ras_process_clear_event_fifo(ras_core);
+ atomic_set(&ras_proc->umc_interrupt_count, 0);
+ }
+
+ ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__RAS_EVENT_PROC_END, NULL);
+ return ret;
+}
+
+static int thread_wait_condition(void *param)
+{
+ struct ras_process *ras_proc = (struct ras_process *)param;
+
+ return (kthread_should_stop() ||
+ atomic_read(&ras_proc->ras_interrupt_req));
+}
+
+static int ras_process_thread(void *context)
+{
+ struct ras_core_context *ras_core = (struct ras_core_context *)context;
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ while (!kthread_should_stop()) {
+ ras_wait_event_interruptible_timeout(&ras_proc->ras_process_wq,
+ thread_wait_condition, ras_proc,
+ msecs_to_jiffies(RAS_POLLING_ECC_TIMEOUT));
+
+ if (kthread_should_stop())
+ break;
+
+ if (!ras_core->is_initialized)
+ continue;
+
+ atomic_set(&ras_proc->ras_interrupt_req, 0);
+
+ if (ras_core_gpu_in_reset(ras_core))
+ continue;
+
+ if (ras_core->sys_fn && ras_core->sys_fn->async_handle_ras_event)
+ ras_core->sys_fn->async_handle_ras_event(ras_core, NULL);
+ else
+ ras_process_handle_ras_event(ras_core);
+ }
+
+ return 0;
+}
+
+int ras_process_init(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ int ret;
+
+ ret = kfifo_alloc(&ras_proc->event_fifo, RAS_EVENT_FIFO_SIZE, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&ras_proc->fifo_spinlock);
+
+ init_waitqueue_head(&ras_proc->ras_process_wq);
+
+ ras_proc->ras_process_thread = kthread_run(ras_process_thread,
+ (void *)ras_core, "ras_process_thread");
+ if (!ras_proc->ras_process_thread) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to create ras_process_thread.\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ras_process_fini(ras_core);
+ return ret;
+}
+
+int ras_process_fini(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ if (ras_proc->ras_process_thread) {
+ kthread_stop(ras_proc->ras_process_thread);
+ ras_proc->ras_process_thread = NULL;
+ }
+
+ kfifo_free(&ras_proc->event_fifo);
+
+ return 0;
+}
+
+static int ras_process_add_umc_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ atomic_inc(&ras_proc->umc_interrupt_count);
+ atomic_inc(&ras_proc->ras_interrupt_req);
+
+ wake_up(&ras_proc->ras_process_wq);
+ return 0;
+}
+
+static int ras_process_add_non_umc_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ int ret;
+
+ ret = ras_process_put_event(ras_core, req);
+ if (!ret) {
+ atomic_inc(&ras_proc->ras_interrupt_req);
+ wake_up(&ras_proc->ras_process_wq);
+ }
+
+ return ret;
+}
+
+int ras_process_add_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req, bool is_umc)
+{
+ int ret;
+
+ if (!ras_core)
+ return -EINVAL;
+
+ if (!ras_core->is_initialized)
+ return -EPERM;
+
+ if (is_umc)
+ ret = ras_process_add_umc_interrupt_req(ras_core, req);
+ else
+ ret = ras_process_add_non_umc_interrupt_req(ras_core, req);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_process.h b/drivers/gpu/drm/amd/ras/rascore/ras_process.h
new file mode 100644
index 000000000000..28458b50510e
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_process.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_PROCESS_H__
+#define __RAS_PROCESS_H__
+
+struct ras_event_req {
+ uint64_t seqno;
+ uint32_t idx_vf;
+ uint32_t block;
+ uint16_t pasid;
+ uint32_t reset;
+ void *pasid_fn;
+ void *data;
+};
+
+struct ras_process {
+ void *dev;
+ void *ras_process_thread;
+ wait_queue_head_t ras_process_wq;
+ atomic_t ras_interrupt_req;
+ atomic_t umc_interrupt_count;
+ struct kfifo event_fifo;
+ spinlock_t fifo_spinlock;
+};
+
+struct ras_core_context;
+int ras_process_init(struct ras_core_context *ras_core);
+int ras_process_fini(struct ras_core_context *ras_core);
+int ras_process_handle_ras_event(struct ras_core_context *ras_core);
+int ras_process_add_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req, bool is_umc);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp.c b/drivers/gpu/drm/amd/ras/rascore/ras_psp.c
new file mode 100644
index 000000000000..ccdb42d2dd60
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp.c
@@ -0,0 +1,750 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_ta_if.h"
+#include "ras_psp.h"
+#include "ras_psp_v13_0.h"
+
+/* position of instance value in sub_block_index of
+ * ta_ras_trigger_error_input, the sub block uses lower 12 bits
+ */
+#define RAS_TA_INST_MASK 0xfffff000
+#define RAS_TA_INST_SHIFT 0xc
+
+static const struct ras_psp_ip_func *ras_psp_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
+ return &ras_psp_v13_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "psp ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+static int ras_psp_sync_system_ras_psp_status(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+ struct ras_psp_sys_status status = {0};
+ int ret;
+
+ if (psp->sys_func && psp->sys_func->get_ras_psp_system_status) {
+ ret = psp->sys_func->get_ras_psp_system_status(ras_core, &status);
+ if (ret)
+ return ret;
+
+ if (status.initialized) {
+ ta_ctx->preload_ras_ta_enabled = true;
+ ta_ctx->ras_ta_initialized = status.initialized;
+ ta_ctx->session_id = status.session_id;
+ }
+
+ psp_ctx->external_mutex = status.psp_cmd_mutex;
+ }
+
+ return 0;
+}
+
+static int ras_psp_get_ras_ta_init_param(struct ras_core_context *ras_core,
+ struct ras_ta_init_param *ras_ta_param)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ if (psp->sys_func && psp->sys_func->get_ras_ta_init_param)
+ return psp->sys_func->get_ras_ta_init_param(ras_core, ras_ta_param);
+
+ RAS_DEV_ERR(ras_core->dev, "Not config get_ras_ta_init_param API!!\n");
+ return -EACCES;
+}
+
+static struct gpu_mem_block *ras_psp_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+ struct gpu_mem_block *gpu_mem = NULL;
+ int ret;
+
+ switch (mem_type) {
+ case GPU_MEM_TYPE_RAS_PSP_RING:
+ gpu_mem = &psp->psp_ring.ras_ring_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_PSP_CMD:
+ gpu_mem = &psp->psp_ctx.psp_cmd_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_PSP_FENCE:
+ gpu_mem = &psp->psp_ctx.out_fence_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_TA_FW:
+ gpu_mem = &psp->ta_ctx.fw_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_TA_CMD:
+ gpu_mem = &psp->ta_ctx.cmd_gpu_mem;
+ break;
+ default:
+ return NULL;
+ }
+
+ if (!gpu_mem->ref_count) {
+ ret = ras_core_get_gpu_mem(ras_core, mem_type, gpu_mem);
+ if (ret)
+ return NULL;
+ gpu_mem->mem_type = mem_type;
+ }
+
+ gpu_mem->ref_count++;
+
+ return gpu_mem;
+}
+
+static int ras_psp_put_gpu_mem(struct ras_core_context *ras_core,
+ struct gpu_mem_block *gpu_mem)
+{
+ if (!gpu_mem)
+ return 0;
+
+ gpu_mem->ref_count--;
+
+ if (gpu_mem->ref_count > 0) {
+ return 0;
+ } else if (gpu_mem->ref_count < 0) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Duplicate free gpu memory %u\n", gpu_mem->mem_type);
+ } else {
+ ras_core_put_gpu_mem(ras_core, gpu_mem->mem_type, gpu_mem);
+ memset(gpu_mem, 0, sizeof(*gpu_mem));
+ }
+
+ return 0;
+}
+
+static void __acquire_psp_cmd_lock(struct ras_core_context *ras_core)
+{
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+
+ if (psp_ctx->external_mutex)
+ mutex_lock(psp_ctx->external_mutex);
+ else
+ mutex_lock(&psp_ctx->internal_mutex);
+}
+
+static void __release_psp_cmd_lock(struct ras_core_context *ras_core)
+{
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+
+ if (psp_ctx->external_mutex)
+ mutex_unlock(psp_ctx->external_mutex);
+ else
+ mutex_unlock(&psp_ctx->internal_mutex);
+}
+
+static uint32_t __get_ring_frame_slot(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+ uint32_t ras_ring_wptr_dw;
+
+ ras_ring_wptr_dw = psp->ip_func->psp_ras_ring_wptr_get(ras_core);
+
+ return div64_u64((ras_ring_wptr_dw << 2), sizeof(struct psp_gfx_rb_frame));
+}
+
+static int __set_ring_frame_slot(struct ras_core_context *ras_core,
+ uint32_t slot)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ return psp->ip_func->psp_ras_ring_wptr_set(ras_core,
+ (slot * sizeof(struct psp_gfx_rb_frame)) >> 2);
+}
+
+static int write_frame_to_ras_psp_ring(struct ras_core_context *ras_core,
+ struct psp_gfx_rb_frame *frame)
+{
+ struct gpu_mem_block *ring_mem;
+ struct psp_gfx_rb_frame *rb_frame;
+ uint32_t max_frame_slot;
+ uint32_t slot_idx;
+ uint32_t write_flush_read_back = 0;
+ int ret = 0;
+
+ ring_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_RING);
+ if (!ring_mem)
+ return -ENOMEM;
+
+ max_frame_slot =
+ div64_u64(ring_mem->mem_size, sizeof(struct psp_gfx_rb_frame));
+
+ rb_frame =
+ (struct psp_gfx_rb_frame *)ring_mem->mem_cpu_addr;
+
+ slot_idx = __get_ring_frame_slot(ras_core);
+ if (slot_idx >= max_frame_slot)
+ slot_idx = 0;
+
+ memcpy(&rb_frame[slot_idx], frame, sizeof(*frame));
+
+ /* Do a read to force the write of the frame before writing
+ * write pointer.
+ */
+ write_flush_read_back = rb_frame[slot_idx].fence_value;
+ if (write_flush_read_back != frame->fence_value) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to submit ring cmd! cmd:0x%x:0x%x, fence:0x%x:0x%x value:%u, expected:%u\n",
+ rb_frame[slot_idx].cmd_buf_addr_hi,
+ rb_frame[slot_idx].cmd_buf_addr_lo,
+ rb_frame[slot_idx].fence_addr_hi,
+ rb_frame[slot_idx].fence_addr_lo,
+ write_flush_read_back, frame->fence_value);
+ ret = -EACCES;
+ goto err;
+ }
+
+ slot_idx++;
+
+ if (slot_idx >= max_frame_slot)
+ slot_idx = 0;
+
+ __set_ring_frame_slot(ras_core, slot_idx);
+
+err:
+ ras_psp_put_gpu_mem(ras_core, ring_mem);
+ return ret;
+}
+
+static int send_psp_cmd(struct ras_core_context *ras_core,
+ enum psp_gfx_cmd_id gfx_cmd_id, void *cmd_data,
+ uint32_t cmd_size, struct psp_cmd_resp *resp)
+{
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+ struct gpu_mem_block *psp_cmd_buf = NULL;
+ struct gpu_mem_block *psp_fence_buf = NULL;
+ struct psp_gfx_cmd_resp *gfx_cmd;
+ struct psp_gfx_rb_frame rb_frame;
+ int ret = 0;
+ int timeout = 1000;
+
+ if (!cmd_data || (cmd_size > sizeof(union psp_gfx_commands)) || !resp) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid RAS PSP command, id: %u\n", gfx_cmd_id);
+ return -EINVAL;
+ }
+
+ __acquire_psp_cmd_lock(ras_core);
+
+ psp_cmd_buf = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_CMD);
+ if (!psp_cmd_buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ psp_fence_buf = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_FENCE);
+ if (!psp_fence_buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ gfx_cmd = (struct psp_gfx_cmd_resp *)psp_cmd_buf->mem_cpu_addr;
+ memset(gfx_cmd, 0, sizeof(*gfx_cmd));
+ gfx_cmd->cmd_id = gfx_cmd_id;
+ memcpy(&gfx_cmd->cmd, cmd_data, cmd_size);
+
+ psp_ctx->in_fence_value++;
+
+ memset(&rb_frame, 0, sizeof(rb_frame));
+ rb_frame.cmd_buf_addr_hi = upper_32_bits(psp_cmd_buf->mem_mc_addr);
+ rb_frame.cmd_buf_addr_lo = lower_32_bits(psp_cmd_buf->mem_mc_addr);
+ rb_frame.fence_addr_hi = upper_32_bits(psp_fence_buf->mem_mc_addr);
+ rb_frame.fence_addr_lo = lower_32_bits(psp_fence_buf->mem_mc_addr);
+ rb_frame.fence_value = psp_ctx->in_fence_value;
+
+ ret = write_frame_to_ras_psp_ring(ras_core, &rb_frame);
+ if (ret) {
+ psp_ctx->in_fence_value--;
+ goto exit;
+ }
+
+ while (*((uint64_t *)psp_fence_buf->mem_cpu_addr) !=
+ psp_ctx->in_fence_value) {
+ if (--timeout == 0)
+ break;
+ /*
+ * Shouldn't wait for timeout when err_event_athub occurs,
+ * because gpu reset thread triggered and lock resource should
+ * be released for psp resume sequence.
+ */
+ if (ras_core_ras_interrupt_detected(ras_core))
+ break;
+
+ msleep(2);
+ }
+
+ resp->status = gfx_cmd->resp.status;
+ resp->session_id = gfx_cmd->resp.session_id;
+
+exit:
+ ras_psp_put_gpu_mem(ras_core, psp_cmd_buf);
+ ras_psp_put_gpu_mem(ras_core, psp_fence_buf);
+
+ __release_psp_cmd_lock(ras_core);
+
+ return ret;
+}
+
+static void __check_ras_ta_cmd_resp(struct ras_core_context *ras_core,
+ struct ras_ta_cmd *ras_cmd)
+{
+
+ if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
+ RAS_DEV_WARN(ras_core->dev, "ECC switch disabled\n");
+ ras_cmd->ras_status = RAS_TA_STATUS__ERROR_RAS_NOT_AVAILABLE;
+ } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
+ RAS_DEV_WARN(ras_core->dev, "RAS internal register access blocked\n");
+
+ switch (ras_cmd->ras_status) {
+ case RAS_TA_STATUS__ERROR_UNSUPPORTED_IP:
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: cmd failed due to unsupported ip\n");
+ break;
+ case RAS_TA_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: cmd failed due to unsupported error injection\n");
+ break;
+ case RAS_TA_STATUS__SUCCESS:
+ break;
+ case RAS_TA_STATUS__TEE_ERROR_ACCESS_DENIED:
+ if (ras_cmd->cmd_id == RAS_TA_CMD_ID__TRIGGER_ERROR)
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: Inject error to critical region is not allowed\n");
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
+ break;
+ }
+}
+
+static int send_ras_ta_runtime_cmd(struct ras_core_context *ras_core,
+ enum ras_ta_cmd_id cmd_id, void *in, uint32_t in_size,
+ void *out, uint32_t out_size)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct gpu_mem_block *cmd_mem;
+ struct ras_ta_cmd *ras_cmd;
+ struct psp_gfx_cmd_invoke_cmd invoke_cmd = {0};
+ struct psp_cmd_resp resp = {0};
+ int ret = 0;
+
+ if (!in || (in_size > sizeof(union ras_ta_cmd_input)) ||
+ (cmd_id >= MAX_RAS_TA_CMD_ID)) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid RAS TA command, id: %u\n", cmd_id);
+ return -EINVAL;
+ }
+
+ ras_psp_sync_system_ras_psp_status(ras_core);
+
+ cmd_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_CMD);
+ if (!cmd_mem)
+ return -ENOMEM;
+
+ if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) {
+ ret = -EACCES;
+ goto out;
+ }
+
+ ras_cmd = (struct ras_ta_cmd *)cmd_mem->mem_cpu_addr;
+
+ mutex_lock(&ta_ctx->ta_mutex);
+
+ memset(ras_cmd, 0, sizeof(*ras_cmd));
+ ras_cmd->cmd_id = cmd_id;
+ memcpy(&ras_cmd->ras_in_message, in, in_size);
+
+ invoke_cmd.ta_cmd_id = cmd_id;
+ invoke_cmd.session_id = ta_ctx->session_id;
+
+ ret = send_psp_cmd(ras_core, GFX_CMD_ID_INVOKE_CMD,
+ &invoke_cmd, sizeof(invoke_cmd), &resp);
+
+ /* If err_event_athub occurs error inject was successful, however
+ * return status from TA is no long reliable
+ */
+ if (ras_core_ras_interrupt_detected(ras_core)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ if (ret || resp.status) {
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS: Failed to send psp cmd! ret:%d, status:%u\n",
+ ret, resp.status);
+ ret = -ESTRPIPE;
+ goto unlock;
+ }
+
+ if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
+ RAS_DEV_WARN(ras_core->dev, "RAS: Unsupported Interface\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (!ras_cmd->ras_status && out && out_size)
+ memcpy(out, &ras_cmd->ras_out_message, out_size);
+
+ __check_ras_ta_cmd_resp(ras_core, ras_cmd);
+
+unlock:
+ mutex_unlock(&ta_ctx->ta_mutex);
+ ras_core_up_gpu_reset_lock(ras_core);
+out:
+ ras_psp_put_gpu_mem(ras_core, cmd_mem);
+ return ret;
+}
+
+static int trigger_ras_ta_error(struct ras_core_context *ras_core,
+ struct ras_ta_trigger_error_input *info, uint32_t instance_mask)
+{
+ uint32_t dev_mask = 0;
+
+ switch (info->block_id) {
+ case RAS_TA_BLOCK__GFX:
+ if (ras_gfx_get_ta_subblock(ras_core, info->inject_error_type,
+ info->sub_block_index, &info->sub_block_index))
+ return -EINVAL;
+
+ dev_mask = RAS_GET_MASK(ras_core->dev, GC, instance_mask);
+ break;
+ case RAS_TA_BLOCK__SDMA:
+ dev_mask = RAS_GET_MASK(ras_core->dev, SDMA0, instance_mask);
+ break;
+ case RAS_TA_BLOCK__VCN:
+ case RAS_TA_BLOCK__JPEG:
+ dev_mask = RAS_GET_MASK(ras_core->dev, VCN, instance_mask);
+ break;
+ default:
+ dev_mask = instance_mask;
+ break;
+ }
+
+ /* reuse sub_block_index for backward compatibility */
+ dev_mask <<= RAS_TA_INST_SHIFT;
+ dev_mask &= RAS_TA_INST_MASK;
+ info->sub_block_index |= dev_mask;
+
+ return send_ras_ta_runtime_cmd(ras_core, RAS_TA_CMD_ID__TRIGGER_ERROR,
+ info, sizeof(*info), NULL, 0);
+}
+
+static int send_load_ta_fw_cmd(struct ras_core_context *ras_core,
+ struct ras_ta_ctx *ta_ctx)
+{
+ struct ras_ta_fw_bin *fw_bin = &ta_ctx->fw_bin;
+ struct gpu_mem_block *fw_mem;
+ struct gpu_mem_block *cmd_mem;
+ struct ras_ta_cmd *ta_cmd;
+ struct ras_ta_init_flags *ta_init_flags;
+ struct psp_gfx_cmd_load_ta psp_load_ta_cmd;
+ struct psp_cmd_resp resp = {0};
+ struct ras_ta_image_header *fw_hdr = NULL;
+ int ret;
+
+ fw_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_FW);
+ if (!fw_mem)
+ return -ENOMEM;
+
+ cmd_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_CMD);
+ if (!cmd_mem) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = ras_psp_get_ras_ta_init_param(ras_core, &ta_ctx->init_param);
+ if (ret)
+ goto err;
+
+ if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) {
+ ret = -EACCES;
+ goto err;
+ }
+
+ /* copy ras ta binary to shared gpu memory */
+ memcpy(fw_mem->mem_cpu_addr, fw_bin->bin_addr, fw_bin->bin_size);
+ fw_mem->mem_size = fw_bin->bin_size;
+
+ /* Initialize ras ta startup parameter */
+ ta_cmd = (struct ras_ta_cmd *)cmd_mem->mem_cpu_addr;
+ ta_init_flags = &ta_cmd->ras_in_message.init_flags;
+
+ ta_init_flags->poison_mode_en = ta_ctx->init_param.poison_mode_en;
+ ta_init_flags->dgpu_mode = ta_ctx->init_param.dgpu_mode;
+ ta_init_flags->xcc_mask = ta_ctx->init_param.xcc_mask;
+ ta_init_flags->channel_dis_num = ta_ctx->init_param.channel_dis_num;
+ ta_init_flags->nps_mode = ta_ctx->init_param.nps_mode;
+ ta_init_flags->active_umc_mask = ta_ctx->init_param.active_umc_mask;
+
+ /* Setup load ras ta command */
+ memset(&psp_load_ta_cmd, 0, sizeof(psp_load_ta_cmd));
+ psp_load_ta_cmd.app_phy_addr_lo = lower_32_bits(fw_mem->mem_mc_addr);
+ psp_load_ta_cmd.app_phy_addr_hi = upper_32_bits(fw_mem->mem_mc_addr);
+ psp_load_ta_cmd.app_len = fw_mem->mem_size;
+ psp_load_ta_cmd.cmd_buf_phy_addr_lo = lower_32_bits(cmd_mem->mem_mc_addr);
+ psp_load_ta_cmd.cmd_buf_phy_addr_hi = upper_32_bits(cmd_mem->mem_mc_addr);
+ psp_load_ta_cmd.cmd_buf_len = cmd_mem->mem_size;
+
+ ret = send_psp_cmd(ras_core, GFX_CMD_ID_LOAD_TA,
+ &psp_load_ta_cmd, sizeof(psp_load_ta_cmd), &resp);
+ if (!ret && !resp.status) {
+ /* Read TA version at FW offset 0x60 if TA version not found*/
+ fw_hdr = (struct ras_ta_image_header *)fw_bin->bin_addr;
+ RAS_DEV_INFO(ras_core->dev, "PSP: RAS TA(version:%X.%X.%X.%X) is loaded.\n",
+ (fw_hdr->image_version >> 24) & 0xFF, (fw_hdr->image_version >> 16) & 0xFF,
+ (fw_hdr->image_version >> 8) & 0xFF, fw_hdr->image_version & 0xFF);
+ ta_ctx->ta_version = fw_hdr->image_version;
+ ta_ctx->session_id = resp.session_id;
+ ta_ctx->ras_ta_initialized = true;
+ } else {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to load RAS TA! ret:%d, status:%d\n", ret, resp.status);
+ }
+
+ ras_core_up_gpu_reset_lock(ras_core);
+
+err:
+ ras_psp_put_gpu_mem(ras_core, fw_mem);
+ ras_psp_put_gpu_mem(ras_core, cmd_mem);
+ return ret;
+}
+
+static int load_ras_ta_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_load *ras_ta_load)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct ras_ta_fw_bin *fw_bin = &ta_ctx->fw_bin;
+ int ret;
+
+ fw_bin->bin_addr = ras_ta_load->bin_addr;
+ fw_bin->bin_size = ras_ta_load->bin_size;
+ fw_bin->fw_version = ras_ta_load->fw_version;
+ fw_bin->feature_version = ras_ta_load->feature_version;
+
+ ret = send_load_ta_fw_cmd(ras_core, ta_ctx);
+ if (!ret) {
+ ras_ta_load->out_session_id = ta_ctx->session_id;
+ ras_ta_load->out_loaded_ta_version = ta_ctx->ta_version;
+ }
+
+ return ret;
+}
+
+static int unload_ras_ta_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_unload *ras_ta_unload)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct psp_gfx_cmd_unload_ta cmd_unload_ta = {0};
+ struct psp_cmd_resp resp = {0};
+ int ret;
+
+ if (!ras_core_down_trylock_gpu_reset_lock(ras_core))
+ return -EACCES;
+
+ cmd_unload_ta.session_id = ta_ctx->session_id;
+ ret = send_psp_cmd(ras_core, GFX_CMD_ID_UNLOAD_TA,
+ &cmd_unload_ta, sizeof(cmd_unload_ta), &resp);
+ if (ret || resp.status) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to unload RAS TA! ret:%d, status:%u\n",
+ ret, resp.status);
+ goto unlock;
+ }
+
+ kfree(ta_ctx->fw_bin.bin_addr);
+ memset(&ta_ctx->fw_bin, 0, sizeof(ta_ctx->fw_bin));
+ ta_ctx->ta_version = 0;
+ ta_ctx->ras_ta_initialized = false;
+ ta_ctx->session_id = 0;
+
+unlock:
+ ras_core_up_gpu_reset_lock(ras_core);
+
+ return ret;
+}
+
+int ras_psp_load_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_load *ras_ta_load)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct ras_psp_ta_unload ras_ta_unload = {0};
+ int ret;
+
+ if (ta_ctx->preload_ras_ta_enabled)
+ return 0;
+
+ if (!ras_ta_load)
+ return -EINVAL;
+
+ if (ta_ctx->ras_ta_initialized) {
+ ras_ta_unload.ras_session_id = ta_ctx->session_id;
+ ret = unload_ras_ta_firmware(ras_core, &ras_ta_unload);
+ if (ret)
+ return ret;
+ }
+
+ return load_ras_ta_firmware(ras_core, ras_ta_load);
+}
+
+int ras_psp_unload_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_unload *ras_ta_unload)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+
+ if (ta_ctx->preload_ras_ta_enabled)
+ return 0;
+
+ if ((!ras_ta_unload) ||
+ (ras_ta_unload->ras_session_id != ta_ctx->session_id))
+ return -EINVAL;
+
+ return unload_ras_ta_firmware(ras_core, ras_ta_unload);
+}
+
+int ras_psp_trigger_error(struct ras_core_context *ras_core,
+ struct ras_ta_trigger_error_input *info, uint32_t instance_mask)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+
+ if (!ta_ctx->preload_ras_ta_enabled && !ta_ctx->ras_ta_initialized) {
+ RAS_DEV_ERR(ras_core->dev, "RAS: ras firmware not initialized!");
+ return -ENOEXEC;
+ }
+
+ if (!info)
+ return -EINVAL;
+
+ return trigger_ras_ta_error(ras_core, info, instance_mask);
+}
+
+int ras_psp_query_address(struct ras_core_context *ras_core,
+ struct ras_ta_query_address_input *addr_in,
+ struct ras_ta_query_address_output *addr_out)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+
+ if (!ta_ctx->preload_ras_ta_enabled &&
+ !ta_ctx->ras_ta_initialized) {
+ RAS_DEV_ERR(ras_core->dev, "RAS: ras firmware not initialized!");
+ return -ENOEXEC;
+ }
+
+ if (!addr_in || !addr_out)
+ return -EINVAL;
+
+ return send_ras_ta_runtime_cmd(ras_core, RAS_TA_CMD_ID__QUERY_ADDRESS,
+ addr_in, sizeof(*addr_in), addr_out, sizeof(*addr_out));
+}
+
+int ras_psp_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ memset(psp, 0, sizeof(*psp));
+
+ psp->sys_func = ras_core->config->psp_cfg.psp_sys_fn;
+ if (!psp->sys_func) {
+ RAS_DEV_ERR(ras_core->dev, "RAS psp sys function not configured!\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&psp->psp_ctx.internal_mutex);
+ mutex_init(&psp->ta_ctx.ta_mutex);
+
+ return 0;
+}
+
+int ras_psp_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ mutex_destroy(&psp->psp_ctx.internal_mutex);
+ mutex_destroy(&psp->ta_ctx.ta_mutex);
+
+ memset(psp, 0, sizeof(*psp));
+
+ return 0;
+}
+
+int ras_psp_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ psp->psp_ip_version = ras_core->config->psp_ip_version;
+
+ psp->ip_func = ras_psp_get_ip_funcs(ras_core, psp->psp_ip_version);
+ if (!psp->ip_func)
+ return -EINVAL;
+
+ /* After GPU reset, the system RAS PSP status may change.
+ * therefore, it is necessary to synchronize the system status again.
+ */
+ ras_psp_sync_system_ras_psp_status(ras_core);
+
+ return 0;
+}
+
+int ras_psp_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
+
+bool ras_psp_check_supported_cmd(struct ras_core_context *ras_core,
+ enum ras_ta_cmd_id cmd_id)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ bool ret = false;
+
+ if (!ta_ctx->preload_ras_ta_enabled && !ta_ctx->ras_ta_initialized)
+ return false;
+
+ switch (cmd_id) {
+ case RAS_TA_CMD_ID__QUERY_ADDRESS:
+ /* Currently, querying the address from RAS TA is only supported
+ * when the RAS TA firmware is loaded during driver installation.
+ */
+ if (ta_ctx->preload_ras_ta_enabled)
+ ret = true;
+ break;
+ case RAS_TA_CMD_ID__TRIGGER_ERROR:
+ ret = true;
+ break;
+ default:
+ ret = false;
+ break;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp.h b/drivers/gpu/drm/amd/ras/rascore/ras_psp.h
new file mode 100644
index 000000000000..71776fecfd66
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_PSP_H__
+#define __RAS_PSP_H__
+#include "ras.h"
+#include "ras_ta_if.h"
+
+struct ras_core_context;
+struct ras_ta_trigger_error_input;
+struct ras_ta_query_address_input;
+struct ras_ta_query_address_output;
+enum ras_ta_cmd_id;
+
+struct ras_ta_image_header {
+ uint32_t reserved1[24];
+ uint32_t image_version; /* [0x60] Off Chip Firmware Version */
+ uint32_t reserved2[39];
+};
+
+struct ras_psp_sys_status {
+ bool initialized;
+ uint32_t session_id;
+ void *psp_cmd_mutex;
+};
+
+struct ras_ta_init_param {
+ uint8_t poison_mode_en;
+ uint8_t dgpu_mode;
+ uint16_t xcc_mask;
+ uint8_t channel_dis_num;
+ uint8_t nps_mode;
+ uint32_t active_umc_mask;
+};
+
+struct gpu_mem_block {
+ uint32_t mem_type;
+ void *mem_bo;
+ uint64_t mem_mc_addr;
+ void *mem_cpu_addr;
+ uint32_t mem_size;
+ int ref_count;
+ void *private;
+};
+
+struct ras_psp_ip_func {
+ uint32_t (*psp_ras_ring_wptr_get)(struct ras_core_context *ras_core);
+ int (*psp_ras_ring_wptr_set)(struct ras_core_context *ras_core, uint32_t wptr);
+};
+
+struct ras_psp_ring {
+ struct gpu_mem_block ras_ring_gpu_mem;
+};
+
+struct psp_cmd_resp {
+ uint32_t status;
+ uint32_t session_id;
+};
+
+struct ras_psp_ctx {
+ void *external_mutex;
+ struct mutex internal_mutex;
+ uint64_t in_fence_value;
+ struct gpu_mem_block psp_cmd_gpu_mem;
+ struct gpu_mem_block out_fence_gpu_mem;
+};
+
+struct ras_ta_fw_bin {
+ uint32_t fw_version;
+ uint32_t feature_version;
+ uint32_t bin_size;
+ uint8_t *bin_addr;
+};
+
+struct ras_ta_ctx {
+ bool preload_ras_ta_enabled;
+ bool ras_ta_initialized;
+ uint32_t session_id;
+ uint32_t resp_status;
+ uint32_t ta_version;
+ struct mutex ta_mutex;
+ struct ras_ta_fw_bin fw_bin;
+ struct ras_ta_init_param init_param;
+ struct gpu_mem_block fw_gpu_mem;
+ struct gpu_mem_block cmd_gpu_mem;
+};
+
+struct ras_psp {
+ uint32_t psp_ip_version;
+ struct ras_psp_ring psp_ring;
+ struct ras_psp_ctx psp_ctx;
+ struct ras_ta_ctx ta_ctx;
+ const struct ras_psp_ip_func *ip_func;
+ const struct ras_psp_sys_func *sys_func;
+};
+
+struct ras_psp_ta_load {
+ uint32_t fw_version;
+ uint32_t feature_version;
+ uint32_t bin_size;
+ uint8_t *bin_addr;
+ uint64_t out_session_id;
+ uint32_t out_loaded_ta_version;
+};
+
+struct ras_psp_ta_unload {
+ uint64_t ras_session_id;
+};
+
+int ras_psp_sw_init(struct ras_core_context *ras_core);
+int ras_psp_sw_fini(struct ras_core_context *ras_core);
+int ras_psp_hw_init(struct ras_core_context *ras_core);
+int ras_psp_hw_fini(struct ras_core_context *ras_core);
+int ras_psp_load_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_load *ras_ta_load);
+int ras_psp_unload_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_unload *ras_ta_unload);
+int ras_psp_trigger_error(struct ras_core_context *ras_core,
+ struct ras_ta_trigger_error_input *info, uint32_t instance_mask);
+int ras_psp_query_address(struct ras_core_context *ras_core,
+ struct ras_ta_query_address_input *addr_in,
+ struct ras_ta_query_address_output *addr_out);
+bool ras_psp_check_supported_cmd(struct ras_core_context *ras_core,
+ enum ras_ta_cmd_id cmd_id);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c
new file mode 100644
index 000000000000..626cf39b75ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_psp_v13_0.h"
+
+#define regMP0_SMN_C2PMSG_67 0x0083
+#define regMP0_SMN_C2PMSG_67_BASE_IDX 0
+
+static uint32_t ras_psp_v13_0_ring_wptr_get(struct ras_core_context *ras_core)
+{
+ return RAS_DEV_RREG32_SOC15(ras_core->dev, MP0, 0, regMP0_SMN_C2PMSG_67);
+}
+
+static int ras_psp_v13_0_ring_wptr_set(struct ras_core_context *ras_core, uint32_t value)
+{
+ RAS_DEV_WREG32_SOC15(ras_core->dev, MP0, 0, regMP0_SMN_C2PMSG_67, value);
+
+ return 0;
+}
+
+const struct ras_psp_ip_func ras_psp_v13_0 = {
+ .psp_ras_ring_wptr_get = ras_psp_v13_0_ring_wptr_get,
+ .psp_ras_ring_wptr_set = ras_psp_v13_0_ring_wptr_set,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h
new file mode 100644
index 000000000000..b705ffe38a12
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_PSP_V13_0_H__
+#define __RAS_PSP_V13_0_H__
+#include "ras_psp.h"
+
+extern const struct ras_psp_ip_func ras_psp_v13_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h b/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h
new file mode 100644
index 000000000000..0921e36d3274
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _RAS_TA_IF_H
+#define _RAS_TA_IF_H
+#include "ras.h"
+
+#define RAS_TA_HOST_IF_VER 0
+
+/* Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+
+/* invalid node instance value */
+#define RAS_TA_INV_NODE 0xffff
+
+/* RAS related enumerations */
+/**********************************************************/
+enum ras_ta_cmd_id {
+ RAS_TA_CMD_ID__ENABLE_FEATURES = 0,
+ RAS_TA_CMD_ID__DISABLE_FEATURES,
+ RAS_TA_CMD_ID__TRIGGER_ERROR,
+ RAS_TA_CMD_ID__QUERY_BLOCK_INFO,
+ RAS_TA_CMD_ID__QUERY_SUB_BLOCK_INFO,
+ RAS_TA_CMD_ID__QUERY_ADDRESS,
+ MAX_RAS_TA_CMD_ID
+};
+
+enum ras_ta_status {
+ RAS_TA_STATUS__SUCCESS = 0x0000,
+ RAS_TA_STATUS__RESET_NEEDED = 0xA001,
+ RAS_TA_STATUS__ERROR_INVALID_PARAMETER = 0xA002,
+ RAS_TA_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003,
+ RAS_TA_STATUS__ERROR_RAS_DUPLICATE_CMD = 0xA004,
+ RAS_TA_STATUS__ERROR_INJECTION_FAILED = 0xA005,
+ RAS_TA_STATUS__ERROR_ASD_READ_WRITE = 0xA006,
+ RAS_TA_STATUS__ERROR_TOGGLE_DF_CSTATE = 0xA007,
+ RAS_TA_STATUS__ERROR_TIMEOUT = 0xA008,
+ RAS_TA_STATUS__ERROR_BLOCK_DISABLED = 0XA009,
+ RAS_TA_STATUS__ERROR_GENERIC = 0xA00A,
+ RAS_TA_STATUS__ERROR_RAS_MMHUB_INIT = 0xA00B,
+ RAS_TA_STATUS__ERROR_GET_DEV_INFO = 0xA00C,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D,
+ RAS_TA_STATUS__ERROR_NOT_INITIALIZED = 0xA00E,
+ RAS_TA_STATUS__ERROR_TEE_INTERNAL = 0xA00F,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_FUNCTION = 0xA010,
+ RAS_TA_STATUS__ERROR_SYS_DRV_REG_ACCESS = 0xA011,
+ RAS_TA_STATUS__ERROR_RAS_READ_WRITE = 0xA012,
+ RAS_TA_STATUS__ERROR_NULL_PTR = 0xA013,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_IP = 0xA014,
+ RAS_TA_STATUS__ERROR_PCS_STATE_QUIET = 0xA015,
+ RAS_TA_STATUS__ERROR_PCS_STATE_ERROR = 0xA016,
+ RAS_TA_STATUS__ERROR_PCS_STATE_HANG = 0xA017,
+ RAS_TA_STATUS__ERROR_PCS_STATE_UNKNOWN = 0xA018,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019,
+ RAS_TA_STATUS__TEE_ERROR_ACCESS_DENIED = 0xA01A
+};
+
+enum ras_ta_block {
+ RAS_TA_BLOCK__UMC = 0,
+ RAS_TA_BLOCK__SDMA,
+ RAS_TA_BLOCK__GFX,
+ RAS_TA_BLOCK__MMHUB,
+ RAS_TA_BLOCK__ATHUB,
+ RAS_TA_BLOCK__PCIE_BIF,
+ RAS_TA_BLOCK__HDP,
+ RAS_TA_BLOCK__XGMI_WAFL,
+ RAS_TA_BLOCK__DF,
+ RAS_TA_BLOCK__SMN,
+ RAS_TA_BLOCK__SEM,
+ RAS_TA_BLOCK__MP0,
+ RAS_TA_BLOCK__MP1,
+ RAS_TA_BLOCK__FUSE,
+ RAS_TA_BLOCK__MCA,
+ RAS_TA_BLOCK__VCN,
+ RAS_TA_BLOCK__JPEG,
+ RAS_TA_BLOCK__IH,
+ RAS_TA_BLOCK__MPIO,
+ RAS_TA_BLOCK__MMSCH,
+ RAS_TA_NUM_BLOCK_MAX
+};
+
+enum ras_ta_mca_block {
+ RAS_TA_MCA_BLOCK__MP0 = 0,
+ RAS_TA_MCA_BLOCK__MP1 = 1,
+ RAS_TA_MCA_BLOCK__MPIO = 2,
+ RAS_TA_MCA_BLOCK__IOHC = 3,
+ RAS_TA_MCA_NUM_BLOCK_MAX
+};
+
+enum ras_ta_error_type {
+ RAS_TA_ERROR__NONE = 0,
+ RAS_TA_ERROR__PARITY = 1,
+ RAS_TA_ERROR__SINGLE_CORRECTABLE = 2,
+ RAS_TA_ERROR__MULTI_UNCORRECTABLE = 4,
+ RAS_TA_ERROR__POISON = 8,
+};
+
+enum ras_ta_address_type {
+ RAS_TA_MCA_TO_PA,
+ RAS_TA_PA_TO_MCA,
+};
+
+enum ras_ta_nps_mode {
+ RAS_TA_UNKNOWN_MODE = 0,
+ RAS_TA_NPS1_MODE = 1,
+ RAS_TA_NPS2_MODE = 2,
+ RAS_TA_NPS4_MODE = 4,
+ RAS_TA_NPS8_MODE = 8,
+};
+
+/* Input/output structures for RAS commands */
+/**********************************************************/
+
+struct ras_ta_enable_features_input {
+ enum ras_ta_block block_id;
+ enum ras_ta_error_type error_type;
+};
+
+struct ras_ta_disable_features_input {
+ enum ras_ta_block block_id;
+ enum ras_ta_error_type error_type;
+};
+
+struct ras_ta_trigger_error_input {
+ /* ras-block. i.e. umc, gfx */
+ enum ras_ta_block block_id;
+
+ /* type of error. i.e. single_correctable */
+ enum ras_ta_error_type inject_error_type;
+
+ /* mem block. i.e. hbm, sram etc. */
+ uint32_t sub_block_index;
+
+ /* explicit address of error */
+ uint64_t address;
+
+ /* method if error injection. i.e persistent, coherent etc. */
+ uint64_t value;
+};
+
+struct ras_ta_init_flags {
+ uint8_t poison_mode_en;
+ uint8_t dgpu_mode;
+ uint16_t xcc_mask;
+ uint8_t channel_dis_num;
+ uint8_t nps_mode;
+ uint32_t active_umc_mask;
+};
+
+struct ras_ta_mca_addr {
+ uint64_t err_addr;
+ uint32_t ch_inst;
+ uint32_t umc_inst;
+ uint32_t node_inst;
+ uint32_t socket_id;
+};
+
+struct ras_ta_phy_addr {
+ uint64_t pa;
+ uint32_t bank;
+ uint32_t channel_idx;
+};
+
+struct ras_ta_query_address_input {
+ enum ras_ta_address_type addr_type;
+ struct ras_ta_mca_addr ma;
+ struct ras_ta_phy_addr pa;
+};
+
+struct ras_ta_output_flags {
+ uint8_t ras_init_success_flag;
+ uint8_t err_inject_switch_disable_flag;
+ uint8_t reg_access_failure_flag;
+};
+
+struct ras_ta_query_address_output {
+ /* don't use the flags here */
+ struct ras_ta_output_flags flags;
+ struct ras_ta_mca_addr ma;
+ struct ras_ta_phy_addr pa;
+};
+
+/* Common input structure for RAS callbacks */
+/**********************************************************/
+union ras_ta_cmd_input {
+ struct ras_ta_init_flags init_flags;
+ struct ras_ta_enable_features_input enable_features;
+ struct ras_ta_disable_features_input disable_features;
+ struct ras_ta_trigger_error_input trigger_error;
+ struct ras_ta_query_address_input address;
+ uint32_t reserve_pad[256];
+};
+
+union ras_ta_cmd_output {
+ struct ras_ta_output_flags flags;
+ struct ras_ta_query_address_output address;
+ uint32_t reserve_pad[256];
+};
+
+struct ras_ta_cmd {
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ uint32_t ras_status;
+ uint32_t if_version;
+ union ras_ta_cmd_input ras_in_message;
+ union ras_ta_cmd_output ras_out_message;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc.c b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c
new file mode 100644
index 000000000000..4dae64c424a2
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_umc.h"
+#include "ras_umc_v12_0.h"
+
+#define MAX_ECC_NUM_PER_RETIREMENT 16
+
+/* bad page timestamp format
+ * yy[31:27] mm[26:23] day[22:17] hh[16:12] mm[11:6] ss[5:0]
+ */
+#define EEPROM_TIMESTAMP_MINUTE 6
+#define EEPROM_TIMESTAMP_HOUR 12
+#define EEPROM_TIMESTAMP_DAY 17
+#define EEPROM_TIMESTAMP_MONTH 23
+#define EEPROM_TIMESTAMP_YEAR 27
+
+static uint64_t ras_umc_get_eeprom_timestamp(struct ras_core_context *ras_core)
+{
+ struct ras_time tm = {0};
+ uint64_t utc_timestamp = 0;
+ uint64_t eeprom_timestamp = 0;
+
+ utc_timestamp = ras_core_get_utc_second_timestamp(ras_core);
+ if (!utc_timestamp)
+ return utc_timestamp;
+
+ ras_core_convert_timestamp_to_time(ras_core, utc_timestamp, &tm);
+
+ /* the year range is 2000 ~ 2031, set the year if not in the range */
+ if (tm.tm_year < 2000)
+ tm.tm_year = 2000;
+ if (tm.tm_year > 2031)
+ tm.tm_year = 2031;
+
+ tm.tm_year -= 2000;
+
+ eeprom_timestamp = tm.tm_sec + (tm.tm_min << EEPROM_TIMESTAMP_MINUTE)
+ + (tm.tm_hour << EEPROM_TIMESTAMP_HOUR)
+ + (tm.tm_mday << EEPROM_TIMESTAMP_DAY)
+ + (tm.tm_mon << EEPROM_TIMESTAMP_MONTH)
+ + (tm.tm_year << EEPROM_TIMESTAMP_YEAR);
+ eeprom_timestamp &= 0xffffffff;
+
+ return eeprom_timestamp;
+}
+
+static const struct ras_umc_ip_func *ras_umc_get_ip_func(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 5, 0):
+ return &ras_umc_func_v12_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "UMC ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_umc_psp_convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *in, struct umc_phy_addr *out,
+ uint32_t nps)
+{
+ struct ras_ta_query_address_input addr_in;
+ struct ras_ta_query_address_output addr_out;
+ int ret;
+
+ if (!in)
+ return -EINVAL;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ memset(&addr_out, 0, sizeof(addr_out));
+
+ addr_in.ma.err_addr = in->err_addr;
+ addr_in.ma.ch_inst = in->ch_inst;
+ addr_in.ma.umc_inst = in->umc_inst;
+ addr_in.ma.node_inst = in->node_inst;
+ addr_in.ma.socket_id = in->socket_id;
+
+ addr_in.addr_type = RAS_TA_MCA_TO_PA;
+
+ ret = ras_psp_query_address(ras_core, &addr_in, &addr_out);
+ if (ret) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Failed to query RAS physical address for 0x%llx, ret:%d",
+ in->err_addr, ret);
+ return -EREMOTEIO;
+ }
+
+ if (out) {
+ out->pa = addr_out.pa.pa;
+ out->bank = addr_out.pa.bank;
+ out->channel_idx = addr_out.pa.channel_idx;
+ }
+
+ return 0;
+}
+
+static int ras_umc_log_ecc(struct ras_core_context *ras_core,
+ unsigned long idx, void *data)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ int ret;
+
+ mutex_lock(&ras_umc->tree_lock);
+ ret = radix_tree_insert(&ras_umc->root, idx, data);
+ if (!ret)
+ radix_tree_tag_set(&ras_umc->root, idx, UMC_ECC_NEW_DETECTED_TAG);
+ mutex_unlock(&ras_umc->tree_lock);
+
+ return ret;
+}
+
+int ras_umc_clear_logged_ecc(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ uint64_t buf[8] = {0};
+ void **slot;
+ void *data;
+ void *iter = buf;
+
+ mutex_lock(&ras_umc->tree_lock);
+ radix_tree_for_each_slot(slot, &ras_umc->root, iter, 0) {
+ data = ras_radix_tree_delete_iter(&ras_umc->root, iter);
+ kfree(data);
+ }
+ mutex_unlock(&ras_umc->tree_lock);
+
+ return 0;
+}
+
+static void ras_umc_reserve_eeprom_record(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ uint64_t page_pfn[16];
+ int count = 0, i;
+
+ memset(page_pfn, 0, sizeof(page_pfn));
+ if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_pages) {
+ count = ras_umc->ip_func->eeprom_record_to_nps_pages(ras_core,
+ record, record->cur_nps, page_pfn, ARRAY_SIZE(page_pfn));
+ if (count <= 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Fail to convert error address! count:%d\n", count);
+ return;
+ }
+ }
+
+ /* Reserve memory */
+ for (i = 0; i < count; i++)
+ ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__RESERVE_BAD_PAGE, &page_pfn[i]);
+}
+
+/* When gpu reset is ongoing, ecc logging operations will be pended.
+ */
+int ras_umc_log_bad_bank_pending(struct ras_core_context *ras_core, struct ras_bank_ecc *bank)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_bank_ecc_node *ecc_node;
+
+ ecc_node = kzalloc(sizeof(*ecc_node), GFP_KERNEL);
+ if (!ecc_node)
+ return -ENOMEM;
+
+ memcpy(&ecc_node->ecc, bank, sizeof(ecc_node->ecc));
+
+ mutex_lock(&ras_umc->pending_ecc_lock);
+ list_add_tail(&ecc_node->node, &ras_umc->pending_ecc_list);
+ mutex_unlock(&ras_umc->pending_ecc_lock);
+
+ return 0;
+}
+
+/* After gpu reset is complete, re-log the pending error banks.
+ */
+int ras_umc_log_pending_bad_bank(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_bank_ecc_node *ecc_node, *tmp;
+
+ mutex_lock(&ras_umc->pending_ecc_lock);
+ list_for_each_entry_safe(ecc_node,
+ tmp, &ras_umc->pending_ecc_list, node){
+ if (ecc_node && !ras_umc_log_bad_bank(ras_core, &ecc_node->ecc)) {
+ list_del(&ecc_node->node);
+ kfree(ecc_node);
+ }
+ }
+ mutex_unlock(&ras_umc->pending_ecc_lock);
+
+ return 0;
+}
+
+int ras_umc_log_bad_bank(struct ras_core_context *ras_core, struct ras_bank_ecc *bank)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_umc_record umc_rec;
+ struct eeprom_umc_record *err_rec;
+ int ret;
+
+ memset(&umc_rec, 0, sizeof(umc_rec));
+
+ mutex_lock(&ras_umc->bank_log_lock);
+ ret = ras_umc->ip_func->bank_to_eeprom_record(ras_core, bank, &umc_rec);
+ if (ret)
+ goto out;
+
+ err_rec = kzalloc(sizeof(*err_rec), GFP_KERNEL);
+ if (!err_rec) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(err_rec, &umc_rec, sizeof(umc_rec));
+ ret = ras_umc_log_ecc(ras_core, err_rec->cur_nps_retired_row_pfn, err_rec);
+ if (ret) {
+ if (ret == -EEXIST) {
+ RAS_DEV_INFO(ras_core->dev, "The bad pages have been logged before.\n");
+ ret = 0;
+ }
+
+ kfree(err_rec);
+ goto out;
+ }
+
+ ras_umc_reserve_eeprom_record(ras_core, err_rec);
+
+ ret = ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__BAD_PAGE_DETECTED, NULL);
+
+out:
+ mutex_unlock(&ras_umc->bank_log_lock);
+ return ret;
+}
+
+static int ras_umc_get_new_records(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *records, u32 num)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_umc_record *entries[MAX_ECC_NUM_PER_RETIREMENT];
+ u32 entry_num = num < MAX_ECC_NUM_PER_RETIREMENT ? num : MAX_ECC_NUM_PER_RETIREMENT;
+ int count = 0;
+ int new_detected, i;
+
+ mutex_lock(&ras_umc->tree_lock);
+ new_detected = radix_tree_gang_lookup_tag(&ras_umc->root, (void **)entries,
+ 0, entry_num, UMC_ECC_NEW_DETECTED_TAG);
+ for (i = 0; i < new_detected; i++) {
+ if (!entries[i])
+ continue;
+
+ memcpy(&records[i], entries[i], sizeof(struct eeprom_umc_record));
+ count++;
+ radix_tree_tag_clear(&ras_umc->root,
+ entries[i]->cur_nps_retired_row_pfn, UMC_ECC_NEW_DETECTED_TAG);
+ }
+ mutex_unlock(&ras_umc->tree_lock);
+
+ return count;
+}
+
+static bool ras_umc_check_retired_record(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, bool from_eeprom)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.rom_data;
+ uint32_t nps = 0;
+ int i, ret;
+
+ if (from_eeprom) {
+ nps = ras_umc->umc_err_data.umc_nps_mode;
+ if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_record) {
+ ret = ras_umc->ip_func->eeprom_record_to_nps_record(ras_core, record, nps);
+ if (ret)
+ RAS_DEV_WARN(ras_core->dev,
+ "Failed to adjust eeprom record, ret:%d", ret);
+ }
+ return false;
+ }
+
+ for (i = 0; i < data->count; i++) {
+ if ((data->bps[i].retired_row_pfn == record->retired_row_pfn) &&
+ (data->bps[i].cur_nps_retired_row_pfn == record->cur_nps_retired_row_pfn))
+ return true;
+ }
+
+ return false;
+}
+
+/* alloc/realloc bps array */
+static int ras_umc_realloc_err_data_space(struct ras_core_context *ras_core,
+ struct eeprom_store_record *data, int pages)
+{
+ unsigned int old_space = data->count + data->space_left;
+ unsigned int new_space = old_space + pages;
+ unsigned int align_space = ALIGN(new_space, 512);
+ void *bps = kzalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+
+ if (!bps)
+ return -ENOMEM;
+
+ if (data->bps) {
+ memcpy(bps, data->bps,
+ data->count * sizeof(*data->bps));
+ kfree(data->bps);
+ }
+
+ data->bps = bps;
+ data->space_left += align_space - old_space;
+ return 0;
+}
+
+static int ras_umc_update_eeprom_rom_data(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *bps)
+{
+ struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.rom_data;
+
+ if (!data->space_left &&
+ ras_umc_realloc_err_data_space(ras_core, data, 256)) {
+ return -ENOMEM;
+ }
+
+ memcpy(&data->bps[data->count], bps, sizeof(*data->bps));
+ data->count++;
+ data->space_left--;
+ return 0;
+}
+
+static int ras_umc_update_eeprom_ram_data(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *bps)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.ram_data;
+ uint64_t page_pfn[16];
+ int count = 0, j;
+
+ if (!data->space_left &&
+ ras_umc_realloc_err_data_space(ras_core, data, 256)) {
+ return -ENOMEM;
+ }
+
+ memset(page_pfn, 0, sizeof(page_pfn));
+ if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_pages)
+ count = ras_umc->ip_func->eeprom_record_to_nps_pages(ras_core,
+ bps, bps->cur_nps, page_pfn, ARRAY_SIZE(page_pfn));
+
+ if (count > 0) {
+ for (j = 0; j < count; j++) {
+ bps->cur_nps_retired_row_pfn = page_pfn[j];
+ memcpy(&data->bps[data->count], bps, sizeof(*data->bps));
+ data->count++;
+ data->space_left--;
+ }
+ } else {
+ memcpy(&data->bps[data->count], bps, sizeof(*data->bps));
+ data->count++;
+ data->space_left--;
+ }
+
+ return 0;
+}
+
+/* it deal with vram only. */
+static int ras_umc_add_bad_pages(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *bps,
+ int pages, bool from_eeprom)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_umc_err_data *data = &ras_umc->umc_err_data;
+ int i, ret = 0;
+
+ if (!bps || pages <= 0)
+ return 0;
+
+ mutex_lock(&ras_umc->umc_lock);
+ for (i = 0; i < pages; i++) {
+ if (ras_umc_check_retired_record(ras_core, &bps[i], from_eeprom))
+ continue;
+
+ ret = ras_umc_update_eeprom_rom_data(ras_core, &bps[i]);
+ if (ret)
+ goto out;
+
+ if (data->last_retired_pfn == bps[i].cur_nps_retired_row_pfn)
+ continue;
+
+ data->last_retired_pfn = bps[i].cur_nps_retired_row_pfn;
+
+ if (from_eeprom)
+ ras_umc_reserve_eeprom_record(ras_core, &bps[i]);
+
+ ret = ras_umc_update_eeprom_ram_data(ras_core, &bps[i]);
+ if (ret)
+ goto out;
+ }
+out:
+ mutex_unlock(&ras_umc->umc_lock);
+
+ return ret;
+}
+
+/*
+ * read error record array in eeprom and reserve enough space for
+ * storing new bad pages
+ */
+int ras_umc_load_bad_pages(struct ras_core_context *ras_core)
+{
+ struct eeprom_umc_record *bps;
+ uint32_t ras_num_recs;
+ int ret;
+
+ ras_num_recs = ras_eeprom_get_record_count(ras_core);
+ /* no bad page record, skip eeprom access */
+ if (!ras_num_recs ||
+ ras_core->ras_eeprom.record_threshold_config == DISABLE_RETIRE_PAGE)
+ return 0;
+
+ bps = kcalloc(ras_num_recs, sizeof(*bps), GFP_KERNEL);
+ if (!bps)
+ return -ENOMEM;
+
+ ret = ras_eeprom_read(ras_core, bps, ras_num_recs);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to load EEPROM table records!");
+ } else {
+ ras_core->ras_umc.umc_err_data.last_retired_pfn = UMC_INV_MEM_PFN;
+ ret = ras_umc_add_bad_pages(ras_core, bps, ras_num_recs, true);
+ }
+
+ kfree(bps);
+ return ret;
+}
+
+/*
+ * write error record array to eeprom, the function should be
+ * protected by recovery_lock
+ * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
+ */
+static int ras_umc_save_bad_pages(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.rom_data;
+ uint32_t eeprom_record_num;
+ int save_count;
+ int ret = 0;
+
+ if (!data->bps)
+ return 0;
+
+ eeprom_record_num = ras_eeprom_get_record_count(ras_core);
+ mutex_lock(&ras_umc->umc_lock);
+ save_count = data->count - eeprom_record_num;
+ /* only new entries are saved */
+ if (save_count > 0) {
+ if (ras_eeprom_append(ras_core,
+ &data->bps[eeprom_record_num],
+ save_count)) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to save EEPROM table data!");
+ ret = -EIO;
+ goto exit;
+ }
+
+ RAS_DEV_INFO(ras_core->dev, "Saved %d pages to EEPROM table.\n", save_count);
+ }
+
+exit:
+ mutex_unlock(&ras_umc->umc_lock);
+ return ret;
+}
+
+int ras_umc_handle_bad_pages(struct ras_core_context *ras_core, void *data)
+{
+ struct eeprom_umc_record records[MAX_ECC_NUM_PER_RETIREMENT];
+ int count, ret;
+
+ memset(records, 0, sizeof(records));
+ count = ras_umc_get_new_records(ras_core, records, ARRAY_SIZE(records));
+ if (count <= 0)
+ return -ENODATA;
+
+ ret = ras_umc_add_bad_pages(ras_core, records, count, false);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to add ras bad page!\n");
+ return -EINVAL;
+ }
+
+ ret = ras_umc_save_bad_pages(ras_core);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to save ras bad page\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int ras_umc_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+
+ memset(ras_umc, 0, sizeof(*ras_umc));
+
+ INIT_LIST_HEAD(&ras_umc->pending_ecc_list);
+
+ INIT_RADIX_TREE(&ras_umc->root, GFP_KERNEL);
+
+ mutex_init(&ras_umc->tree_lock);
+ mutex_init(&ras_umc->pending_ecc_lock);
+ mutex_init(&ras_umc->umc_lock);
+ mutex_init(&ras_umc->bank_log_lock);
+
+ return 0;
+}
+
+int ras_umc_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_umc_err_data *umc_err_data = &ras_umc->umc_err_data;
+ struct ras_bank_ecc_node *ecc_node, *tmp;
+
+ mutex_destroy(&ras_umc->umc_lock);
+ mutex_destroy(&ras_umc->bank_log_lock);
+
+ if (umc_err_data->rom_data.bps) {
+ umc_err_data->rom_data.count = 0;
+ kfree(umc_err_data->rom_data.bps);
+ umc_err_data->rom_data.bps = NULL;
+ umc_err_data->rom_data.space_left = 0;
+ }
+
+ if (umc_err_data->ram_data.bps) {
+ umc_err_data->ram_data.count = 0;
+ kfree(umc_err_data->ram_data.bps);
+ umc_err_data->ram_data.bps = NULL;
+ umc_err_data->ram_data.space_left = 0;
+ }
+
+ ras_umc_clear_logged_ecc(ras_core);
+
+ mutex_lock(&ras_umc->pending_ecc_lock);
+ list_for_each_entry_safe(ecc_node,
+ tmp, &ras_umc->pending_ecc_list, node){
+ list_del(&ecc_node->node);
+ kfree(ecc_node);
+ }
+ mutex_unlock(&ras_umc->pending_ecc_lock);
+
+ mutex_destroy(&ras_umc->tree_lock);
+ mutex_destroy(&ras_umc->pending_ecc_lock);
+
+ return 0;
+}
+
+int ras_umc_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ uint32_t nps;
+
+ nps = ras_core_get_curr_nps_mode(ras_core);
+
+ if (!nps || (nps >= UMC_MEMORY_PARTITION_MODE_UNKNOWN)) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid memory NPS mode: %u!\n", nps);
+ return -ENODATA;
+ }
+
+ ras_umc->umc_err_data.umc_nps_mode = nps;
+
+ ras_umc->umc_vram_type = ras_core->config->umc_cfg.umc_vram_type;
+ if (!ras_umc->umc_vram_type) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid UMC VRAM Type: %u!\n",
+ ras_umc->umc_vram_type);
+ return -ENODATA;
+ }
+
+ ras_umc->umc_ip_version = ras_core->config->umc_ip_version;
+ ras_umc->ip_func = ras_umc_get_ip_func(ras_core, ras_umc->umc_ip_version);
+ if (!ras_umc->ip_func)
+ return -EINVAL;
+
+ return 0;
+}
+
+int ras_umc_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
+
+int ras_umc_clean_badpage_data(struct ras_core_context *ras_core)
+{
+ struct ras_umc_err_data *data = &ras_core->ras_umc.umc_err_data;
+
+ mutex_lock(&ras_core->ras_umc.umc_lock);
+
+ kfree(data->rom_data.bps);
+ kfree(data->ram_data.bps);
+
+ memset(data, 0, sizeof(*data));
+ mutex_unlock(&ras_core->ras_umc.umc_lock);
+
+ return 0;
+}
+
+int ras_umc_fill_eeprom_record(struct ras_core_context *ras_core,
+ uint64_t err_addr, uint32_t umc_inst, struct umc_phy_addr *cur_nps_addr,
+ enum umc_memory_partition_mode cur_nps, struct eeprom_umc_record *record)
+{
+ struct eeprom_umc_record *err_rec = record;
+
+ /* Set bad page pfn and nps mode */
+ EEPROM_RECORD_SETUP_UMC_ADDR_AND_NPS(err_rec,
+ RAS_ADDR_TO_PFN(cur_nps_addr->pa), cur_nps);
+
+ err_rec->address = err_addr;
+ err_rec->ts = ras_umc_get_eeprom_timestamp(ras_core);
+ err_rec->err_type = RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = cur_nps_addr->channel_idx;
+ err_rec->mcumc_id = umc_inst;
+ err_rec->cur_nps_retired_row_pfn = RAS_ADDR_TO_PFN(cur_nps_addr->pa);
+ err_rec->cur_nps_bank = cur_nps_addr->bank;
+ err_rec->cur_nps = cur_nps;
+ return 0;
+}
+
+int ras_umc_get_saved_eeprom_count(struct ras_core_context *ras_core)
+{
+ struct ras_umc_err_data *err_data = &ras_core->ras_umc.umc_err_data;
+
+ return err_data->rom_data.count;
+}
+
+int ras_umc_get_badpage_count(struct ras_core_context *ras_core)
+{
+ struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.ram_data;
+
+ return data->count;
+}
+
+int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record)
+{
+ struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.ram_data;
+
+ if (index >= data->count)
+ return -EINVAL;
+
+ memcpy(record, &data->bps[index], sizeof(struct eeprom_umc_record));
+ return 0;
+}
+
+bool ras_umc_check_retired_addr(struct ras_core_context *ras_core, uint64_t addr)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.ram_data;
+ uint64_t page_pfn = RAS_ADDR_TO_PFN(addr);
+ int i, ret = false;
+
+ mutex_lock(&ras_umc->umc_lock);
+ for (i = 0; i < data->count; i++) {
+ if (data->bps[i].cur_nps_retired_row_pfn == page_pfn) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&ras_umc->umc_lock);
+
+ return ret;
+}
+
+int ras_umc_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ int ret = 0;
+
+ if (bank_to_pa)
+ ret = ras_umc->ip_func->bank_to_soc_pa(ras_core, *bank_addr, soc_pa);
+ else
+ ret = ras_umc->ip_func->soc_pa_to_bank(ras_core, *soc_pa, bank_addr);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc.h b/drivers/gpu/drm/amd/ras/rascore/ras_umc.h
new file mode 100644
index 000000000000..7d9e779d8c4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_UMC_H__
+#define __RAS_UMC_H__
+#include "ras.h"
+#include "ras_eeprom.h"
+#include "ras_cmd.h"
+
+#define UMC_VRAM_TYPE_UNKNOWN 0
+#define UMC_VRAM_TYPE_GDDR1 1
+#define UMC_VRAM_TYPE_DDR2 2
+#define UMC_VRAM_TYPE_GDDR3 3
+#define UMC_VRAM_TYPE_GDDR4 4
+#define UMC_VRAM_TYPE_GDDR5 5
+#define UMC_VRAM_TYPE_HBM 6
+#define UMC_VRAM_TYPE_DDR3 7
+#define UMC_VRAM_TYPE_DDR4 8
+#define UMC_VRAM_TYPE_GDDR6 9
+#define UMC_VRAM_TYPE_DDR5 10
+#define UMC_VRAM_TYPE_LPDDR4 11
+#define UMC_VRAM_TYPE_LPDDR5 12
+#define UMC_VRAM_TYPE_HBM3E 13
+
+#define UMC_ECC_NEW_DETECTED_TAG 0x1
+#define UMC_INV_MEM_PFN (0xFFFFFFFFFFFFFFFF)
+
+/* three column bits and one row bit in MCA address flip
+ * in bad page retirement
+ */
+#define UMC_PA_FLIP_BITS_NUM 4
+
+enum umc_memory_partition_mode {
+ UMC_MEMORY_PARTITION_MODE_NONE = 0,
+ UMC_MEMORY_PARTITION_MODE_NPS1 = 1,
+ UMC_MEMORY_PARTITION_MODE_NPS2 = 2,
+ UMC_MEMORY_PARTITION_MODE_NPS3 = 3,
+ UMC_MEMORY_PARTITION_MODE_NPS4 = 4,
+ UMC_MEMORY_PARTITION_MODE_NPS6 = 6,
+ UMC_MEMORY_PARTITION_MODE_NPS8 = 8,
+ UMC_MEMORY_PARTITION_MODE_UNKNOWN
+};
+
+struct ras_core_context;
+struct ras_bank_ecc;
+
+struct umc_flip_bits {
+ uint32_t flip_bits_in_pa[UMC_PA_FLIP_BITS_NUM];
+ uint32_t flip_row_bit;
+ uint32_t r13_in_pa;
+ uint32_t bit_num;
+};
+
+struct umc_mca_addr {
+ uint64_t err_addr;
+ uint32_t ch_inst;
+ uint32_t umc_inst;
+ uint32_t node_inst;
+ uint32_t socket_id;
+};
+
+struct umc_phy_addr {
+ uint64_t pa;
+ uint32_t bank;
+ uint32_t channel_idx;
+};
+
+struct umc_bank_addr {
+ uint32_t stack_id; /* SID */
+ uint32_t bank_group;
+ uint32_t bank;
+ uint32_t row;
+ uint32_t column;
+ uint32_t channel;
+ uint32_t subchannel; /* Also called Pseudochannel (PC) */
+};
+
+struct ras_umc_ip_func {
+ int (*bank_to_eeprom_record)(struct ras_core_context *ras_core,
+ struct ras_bank_ecc *bank, struct eeprom_umc_record *record);
+ int (*eeprom_record_to_nps_record)(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps);
+ int (*eeprom_record_to_nps_pages)(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps,
+ uint64_t *pfns, uint32_t num);
+ int (*bank_to_soc_pa)(struct ras_core_context *ras_core,
+ struct umc_bank_addr bank_addr, uint64_t *soc_pa);
+ int (*soc_pa_to_bank)(struct ras_core_context *ras_core,
+ uint64_t soc_pa, struct umc_bank_addr *bank_addr);
+};
+
+struct eeprom_store_record {
+ /* point to data records array */
+ struct eeprom_umc_record *bps;
+ /* the count of entries */
+ int count;
+ /* the space can place new entries */
+ int space_left;
+};
+
+struct ras_umc_err_data {
+ struct eeprom_store_record rom_data;
+ struct eeprom_store_record ram_data;
+ enum umc_memory_partition_mode umc_nps_mode;
+ uint64_t last_retired_pfn;
+};
+
+struct ras_umc {
+ u32 umc_ip_version;
+ u32 umc_vram_type;
+ const struct ras_umc_ip_func *ip_func;
+ struct radix_tree_root root;
+ struct mutex tree_lock;
+ struct mutex umc_lock;
+ struct mutex bank_log_lock;
+ struct mutex pending_ecc_lock;
+ struct ras_umc_err_data umc_err_data;
+ struct list_head pending_ecc_list;
+};
+
+int ras_umc_sw_init(struct ras_core_context *ras);
+int ras_umc_sw_fini(struct ras_core_context *ras);
+int ras_umc_hw_init(struct ras_core_context *ras);
+int ras_umc_hw_fini(struct ras_core_context *ras);
+int ras_umc_psp_convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *in, struct umc_phy_addr *out,
+ uint32_t nps);
+int ras_umc_handle_bad_pages(struct ras_core_context *ras_core, void *data);
+int ras_umc_log_bad_bank(struct ras_core_context *ras, struct ras_bank_ecc *bank);
+int ras_umc_log_bad_bank_pending(struct ras_core_context *ras_core, struct ras_bank_ecc *bank);
+int ras_umc_log_pending_bad_bank(struct ras_core_context *ras_core);
+int ras_umc_clear_logged_ecc(struct ras_core_context *ras_core);
+int ras_umc_load_bad_pages(struct ras_core_context *ras_core);
+int ras_umc_get_saved_eeprom_count(struct ras_core_context *ras_core);
+int ras_umc_clean_badpage_data(struct ras_core_context *ras_core);
+int ras_umc_fill_eeprom_record(struct ras_core_context *ras_core,
+ uint64_t err_addr, uint32_t umc_inst, struct umc_phy_addr *cur_nps_addr,
+ enum umc_memory_partition_mode cur_nps, struct eeprom_umc_record *record);
+
+int ras_umc_get_badpage_count(struct ras_core_context *ras_core);
+int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record);
+bool ras_umc_check_retired_addr(struct ras_core_context *ras_core, uint64_t addr);
+int ras_umc_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c
new file mode 100644
index 000000000000..5d9a11c17a86
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_umc.h"
+#include "ras_core_status.h"
+#include "ras_umc_v12_0.h"
+
+#define NumDieInterleaved 4
+
+static const uint32_t umc_v12_0_channel_idx_tbl[]
+ [UMC_V12_0_UMC_INSTANCE_NUM][UMC_V12_0_CHANNEL_INSTANCE_NUM] = {
+ {{3, 7, 11, 15, 2, 6, 10, 14}, {1, 5, 9, 13, 0, 4, 8, 12},
+ {19, 23, 27, 31, 18, 22, 26, 30}, {17, 21, 25, 29, 16, 20, 24, 28}},
+ {{47, 43, 39, 35, 46, 42, 38, 34}, {45, 41, 37, 33, 44, 40, 36, 32},
+ {63, 59, 55, 51, 62, 58, 54, 50}, {61, 57, 53, 49, 60, 56, 52, 48}},
+ {{79, 75, 71, 67, 78, 74, 70, 66}, {77, 73, 69, 65, 76, 72, 68, 64},
+ {95, 91, 87, 83, 94, 90, 86, 82}, {93, 89, 85, 81, 92, 88, 84, 80}},
+ {{99, 103, 107, 111, 98, 102, 106, 110}, {97, 101, 105, 109, 96, 100, 104, 108},
+ {115, 119, 123, 127, 114, 118, 122, 126}, {113, 117, 121, 125, 112, 116, 120, 124}}
+};
+
+/* mapping of MCA error address to normalized address */
+static const uint32_t umc_v12_0_ma2na_mapping[] = {
+ 0, 5, 6, 8, 9, 14, 12, 13,
+ 10, 11, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28,
+ 24, 7, 29, 30,
+};
+
+static bool umc_v12_0_bit_wise_xor(uint32_t val)
+{
+ bool result = 0;
+ int i;
+
+ for (i = 0; i < 32; i++)
+ result = result ^ ((val >> i) & 0x1);
+
+ return result;
+}
+
+static void __get_nps_pa_flip_bits(struct ras_core_context *ras_core,
+ enum umc_memory_partition_mode nps,
+ struct umc_flip_bits *flip_bits)
+{
+ uint32_t vram_type = ras_core->ras_umc.umc_vram_type;
+
+ /* default setting */
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT;
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT;
+ flip_bits->flip_row_bit = 13;
+ flip_bits->bit_num = 4;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT;
+
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT;
+ } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT;
+ }
+
+ switch (vram_type) {
+ case UMC_VRAM_TYPE_HBM:
+ /* other nps modes are taken as nps1 */
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+
+ break;
+ case UMC_VRAM_TYPE_HBM3E:
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ flip_bits->flip_row_bit = 12;
+
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT;
+
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev,
+ "Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n");
+ break;
+ }
+}
+
+static uint64_t convert_nps_pa_to_row_pa(struct ras_core_context *ras_core,
+ uint64_t pa, enum umc_memory_partition_mode nps, bool zero_pfn_ok)
+{
+ struct umc_flip_bits flip_bits = {0};
+ uint64_t row_pa;
+ int i;
+
+ __get_nps_pa_flip_bits(ras_core, nps, &flip_bits);
+
+ row_pa = pa;
+ /* clear loop bits in soc physical address */
+ for (i = 0; i < flip_bits.bit_num; i++)
+ row_pa &= ~BIT_ULL(flip_bits.flip_bits_in_pa[i]);
+
+ if (!zero_pfn_ok && !RAS_ADDR_TO_PFN(row_pa))
+ row_pa |= BIT_ULL(flip_bits.flip_bits_in_pa[2]);
+
+ return row_pa;
+}
+
+static int lookup_bad_pages_in_a_row(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps,
+ uint64_t *pfns, uint32_t num,
+ uint64_t seq_no, bool dump)
+{
+ uint32_t col, col_lower, row, row_lower, idx, row_high;
+ uint64_t soc_pa, row_pa, column, err_addr;
+ uint64_t retired_addr = RAS_PFN_TO_ADDR(record->cur_nps_retired_row_pfn);
+ struct umc_flip_bits flip_bits = {0};
+ uint32_t retire_unit;
+ uint32_t i;
+
+ __get_nps_pa_flip_bits(ras_core, nps, &flip_bits);
+
+ row_pa = convert_nps_pa_to_row_pa(ras_core, retired_addr, nps, true);
+
+ err_addr = record->address;
+ /* get column bit 0 and 1 in mca address */
+ col_lower = (err_addr >> 1) & 0x3ULL;
+ /* MA_R13_BIT will be handled later */
+ row_lower = (err_addr >> UMC_V12_0_MCA_R0_BIT) & 0x1fffULL;
+ row_lower &= ~BIT_ULL(flip_bits.flip_row_bit);
+
+ if (ras_core->ras_gfx.gfx_ip_version >= IP_VERSION(9, 5, 0)) {
+ row_high = (row_pa >> flip_bits.r13_in_pa) & 0x3ULL;
+ /* it's 2.25GB in each channel, from MCA address to PA
+ * [R14 R13] is converted if the two bits value are 0x3,
+ * get them from PA instead of MCA address.
+ */
+ row_lower |= (row_high << 13);
+ }
+
+ idx = 0;
+ row = 0;
+ retire_unit = 0x1 << flip_bits.bit_num;
+ /* loop for all possibilities of retire bits */
+ for (column = 0; column < retire_unit; column++) {
+ soc_pa = row_pa;
+ for (i = 0; i < flip_bits.bit_num; i++)
+ soc_pa |= (((column >> i) & 0x1ULL) << flip_bits.flip_bits_in_pa[i]);
+
+ col = ((column & 0x7) << 2) | col_lower;
+
+ /* add row bit 13 */
+ if (flip_bits.bit_num == UMC_PA_FLIP_BITS_NUM)
+ row = ((column >> 3) << flip_bits.flip_row_bit) | row_lower;
+
+ if (dump)
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ seq_no, soc_pa, row, col,
+ record->cur_nps_bank, record->mem_channel);
+
+
+ if (pfns && (idx < num))
+ pfns[idx++] = RAS_ADDR_TO_PFN(soc_pa);
+ }
+
+ return idx;
+}
+
+static int umc_v12_convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *addr_in, struct umc_phy_addr *addr_out,
+ uint32_t nps)
+{
+ uint32_t i, na_shift;
+ uint64_t soc_pa, na, na_nps;
+ uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row;
+ uint32_t bank0, bank1, bank2, bank3, bank;
+ uint32_t ch_inst = addr_in->ch_inst;
+ uint32_t umc_inst = addr_in->umc_inst;
+ uint32_t node_inst = addr_in->node_inst;
+ uint32_t socket_id = addr_in->socket_id;
+ uint32_t channel_index;
+ uint64_t err_addr = addr_in->err_addr;
+
+ if (node_inst != UMC_INV_AID_NODE) {
+ if (ch_inst >= UMC_V12_0_CHANNEL_INSTANCE_NUM ||
+ umc_inst >= UMC_V12_0_UMC_INSTANCE_NUM ||
+ node_inst >= UMC_V12_0_AID_NUM_MAX ||
+ socket_id >= UMC_V12_0_SOCKET_NUM_MAX)
+ return -EINVAL;
+ } else {
+ if (socket_id >= UMC_V12_0_SOCKET_NUM_MAX ||
+ ch_inst >= UMC_V12_0_TOTAL_CHANNEL_NUM)
+ return -EINVAL;
+ }
+
+ bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL;
+ bank_hash1 = (err_addr >> UMC_V12_0_MCA_B1_BIT) & 0x1ULL;
+ bank_hash2 = (err_addr >> UMC_V12_0_MCA_B2_BIT) & 0x1ULL;
+ bank_hash3 = (err_addr >> UMC_V12_0_MCA_B3_BIT) & 0x1ULL;
+ col = (err_addr >> 1) & 0x1fULL;
+ row = (err_addr >> 10) & 0x3fffULL;
+
+ /* apply bank hash algorithm */
+ bank0 =
+ bank_hash0 ^ (UMC_V12_0_XOR_EN0 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR0) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR0))));
+ bank1 =
+ bank_hash1 ^ (UMC_V12_0_XOR_EN1 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR1) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR1))));
+ bank2 =
+ bank_hash2 ^ (UMC_V12_0_XOR_EN2 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR2) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR2))));
+ bank3 =
+ bank_hash3 ^ (UMC_V12_0_XOR_EN3 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR3) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR3))));
+
+ bank = bank0 | (bank1 << 1) | (bank2 << 2) | (bank3 << 3);
+ err_addr &= ~0x3c0ULL;
+ err_addr |= (bank << UMC_V12_0_MCA_B0_BIT);
+
+ na_nps = 0x0;
+ /* convert mca error address to normalized address */
+ for (i = 1; i < ARRAY_SIZE(umc_v12_0_ma2na_mapping); i++)
+ na_nps |= ((err_addr >> i) & 0x1ULL) << umc_v12_0_ma2na_mapping[i];
+
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS1)
+ na_shift = 8;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS2)
+ na_shift = 9;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4)
+ na_shift = 10;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS8)
+ na_shift = 11;
+ else
+ return -EINVAL;
+
+ na = ((na_nps >> na_shift) << 8) | (na_nps & 0xff);
+
+ if (node_inst != UMC_INV_AID_NODE)
+ channel_index =
+ umc_v12_0_channel_idx_tbl[node_inst][umc_inst][ch_inst];
+ else {
+ channel_index = ch_inst;
+ node_inst = channel_index /
+ (UMC_V12_0_UMC_INSTANCE_NUM * UMC_V12_0_CHANNEL_INSTANCE_NUM);
+ }
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ soc_pa = ADDR_OF_32KB_BLOCK(na) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(na);
+
+ /* calc channel hash based on absolute address */
+ soc_pa += socket_id * SOCKET_LFB_SIZE;
+ /* the umc channel bits are not original values, they are hashed */
+ UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa);
+ /* restore pa */
+ soc_pa -= socket_id * SOCKET_LFB_SIZE;
+
+ /* get some channel bits from na_nps directly and
+ * add nps section offset
+ */
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) {
+ soc_pa &= ~(0x1ULL << UMC_V12_0_PA_CH5_BIT);
+ soc_pa |= ((na_nps & 0x100) << 5);
+ soc_pa += (node_inst >> 1) * (SOCKET_LFB_SIZE >> 1);
+ } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) {
+ soc_pa &= ~(0x3ULL << UMC_V12_0_PA_CH4_BIT);
+ soc_pa |= ((na_nps & 0x300) << 4);
+ soc_pa += node_inst * (SOCKET_LFB_SIZE >> 2);
+ } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS8) {
+ soc_pa &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT);
+ soc_pa |= ((na_nps & 0x700) << 4);
+ soc_pa += node_inst * (SOCKET_LFB_SIZE >> 2) +
+ (channel_index >> 4) * (SOCKET_LFB_SIZE >> 3);
+ }
+
+ addr_out->pa = soc_pa;
+ addr_out->bank = bank;
+ addr_out->channel_idx = channel_index;
+
+ return 0;
+}
+
+static int convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *addr_in, struct umc_phy_addr *addr_out,
+ uint32_t nps)
+{
+ int ret;
+
+ if (ras_psp_check_supported_cmd(ras_core, RAS_TA_CMD_ID__QUERY_ADDRESS))
+ ret = ras_umc_psp_convert_ma_to_pa(ras_core,
+ addr_in, addr_out, nps);
+ else
+ ret = umc_v12_convert_ma_to_pa(ras_core,
+ addr_in, addr_out, nps);
+
+ return ret;
+}
+
+static int convert_bank_to_nps_addr(struct ras_core_context *ras_core,
+ struct ras_bank_ecc *bank, struct umc_phy_addr *pa_addr, uint32_t nps)
+{
+ struct umc_mca_addr addr_in;
+ struct umc_phy_addr addr_out;
+ int ret;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ memset(&addr_out, 0, sizeof(addr_out));
+
+ addr_in.err_addr = ACA_ADDR_2_ERR_ADDR(bank->addr);
+ addr_in.ch_inst = ACA_IPID_2_UMC_CH(bank->ipid);
+ addr_in.umc_inst = ACA_IPID_2_UMC_INST(bank->ipid);
+ addr_in.node_inst = ACA_IPID_2_DIE_ID(bank->ipid);
+ addr_in.socket_id = ACA_IPID_2_SOCKET_ID(bank->ipid);
+
+ ret = convert_ma_to_pa(ras_core, &addr_in, &addr_out, nps);
+ if (!ret) {
+ pa_addr->pa =
+ convert_nps_pa_to_row_pa(ras_core, addr_out.pa, nps, false);
+ pa_addr->channel_idx = addr_out.channel_idx;
+ pa_addr->bank = addr_out.bank;
+ }
+
+ return ret;
+}
+
+static int umc_v12_0_bank_to_eeprom_record(struct ras_core_context *ras_core,
+ struct ras_bank_ecc *bank, struct eeprom_umc_record *record)
+{
+ struct umc_phy_addr nps_addr;
+ int ret;
+
+ memset(&nps_addr, 0, sizeof(nps_addr));
+
+ ret = convert_bank_to_nps_addr(ras_core, bank,
+ &nps_addr, bank->nps);
+ if (ret)
+ return ret;
+
+ ras_umc_fill_eeprom_record(ras_core,
+ ACA_ADDR_2_ERR_ADDR(bank->addr), ACA_IPID_2_UMC_INST(bank->ipid),
+ &nps_addr, bank->nps, record);
+
+ lookup_bad_pages_in_a_row(ras_core, record,
+ bank->nps, NULL, 0, bank->seq_no, true);
+
+ return 0;
+}
+
+static int convert_eeprom_record_to_nps_addr(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint64_t *pa, uint32_t nps)
+{
+ struct device_system_info dev_info = {0};
+ struct umc_mca_addr addr_in;
+ struct umc_phy_addr addr_out;
+ int ret;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ memset(&addr_out, 0, sizeof(addr_out));
+
+ ras_core_get_device_system_info(ras_core, &dev_info);
+
+ addr_in.err_addr = record->address;
+ addr_in.ch_inst = record->mem_channel;
+ addr_in.umc_inst = record->mcumc_id;
+ addr_in.node_inst = UMC_INV_AID_NODE;
+ addr_in.socket_id = dev_info.socket_id;
+
+ ret = convert_ma_to_pa(ras_core, &addr_in, &addr_out, nps);
+ if (ret)
+ return ret;
+
+ *pa = convert_nps_pa_to_row_pa(ras_core, addr_out.pa, nps, false);
+
+ return 0;
+}
+
+static int umc_v12_0_eeprom_record_to_nps_record(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps)
+{
+ uint64_t pa = 0;
+ int ret = 0;
+
+ if (nps == EEPROM_RECORD_UMC_NPS_MODE(record)) {
+ record->cur_nps_retired_row_pfn = EEPROM_RECORD_UMC_ADDR_PFN(record);
+ } else {
+ ret = convert_eeprom_record_to_nps_addr(ras_core,
+ record, &pa, nps);
+ if (!ret)
+ record->cur_nps_retired_row_pfn = RAS_ADDR_TO_PFN(pa);
+ }
+
+ record->cur_nps = nps;
+
+ return ret;
+}
+
+static int umc_v12_0_eeprom_record_to_nps_pages(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps,
+ uint64_t *pfns, uint32_t num)
+{
+ return lookup_bad_pages_in_a_row(ras_core,
+ record, nps, pfns, num, 0, false);
+}
+
+static int umc_12_0_soc_pa_to_bank(struct ras_core_context *ras_core,
+ uint64_t soc_pa,
+ struct umc_bank_addr *bank_addr)
+{
+
+ int channel_hashed = 0;
+ int channel_real = 0;
+ int channel_reversed = 0;
+ int i = 0;
+
+ bank_addr->stack_id = UMC_V12_0_SOC_PA_TO_SID(soc_pa);
+ bank_addr->bank_group = 0; /* This is a combination of SID & Bank. Needed?? */
+ bank_addr->bank = UMC_V12_0_SOC_PA_TO_BANK(soc_pa);
+ bank_addr->row = UMC_V12_0_SOC_PA_TO_ROW(soc_pa);
+ bank_addr->column = UMC_V12_0_SOC_PA_TO_COL(soc_pa);
+
+ /* Channel bits 4-6 are hashed. Bruteforce reverse the hash */
+ channel_hashed = (soc_pa >> UMC_V12_0_PA_CH4_BIT) & 0x7;
+
+ for (i = 0; i < 8; i++) {
+ channel_reversed = 0;
+ channel_reversed |= UMC_V12_0_CHANNEL_HASH_CH4((i << 4), soc_pa);
+ channel_reversed |= (UMC_V12_0_CHANNEL_HASH_CH5((i << 4), soc_pa) << 1);
+ channel_reversed |= (UMC_V12_0_CHANNEL_HASH_CH6((i << 4), soc_pa) << 2);
+ if (channel_reversed == channel_hashed)
+ channel_real = ((i << 4)) | ((soc_pa >> UMC_V12_0_PA_CH0_BIT) & 0xf);
+ }
+
+ bank_addr->channel = channel_real;
+ bank_addr->subchannel = UMC_V12_0_SOC_PA_TO_PC(soc_pa);
+
+ return 0;
+}
+
+static int umc_12_0_bank_to_soc_pa(struct ras_core_context *ras_core,
+ struct umc_bank_addr bank_addr,
+ uint64_t *soc_pa)
+{
+ uint64_t na = 0;
+ uint64_t tmp_pa = 0;
+ *soc_pa = 0;
+
+ tmp_pa |= UMC_V12_0_SOC_SID_TO_PA(bank_addr.stack_id);
+ tmp_pa |= UMC_V12_0_SOC_BANK_TO_PA(bank_addr.bank);
+ tmp_pa |= UMC_V12_0_SOC_ROW_TO_PA(bank_addr.row);
+ tmp_pa |= UMC_V12_0_SOC_COL_TO_PA(bank_addr.column);
+ tmp_pa |= UMC_V12_0_SOC_CH_TO_PA(bank_addr.channel);
+ tmp_pa |= UMC_V12_0_SOC_PC_TO_PA(bank_addr.subchannel);
+
+ /* Get the NA */
+ na = ((tmp_pa >> UMC_V12_0_PA_C2_BIT) << UMC_V12_0_NA_C2_BIT);
+ na |= tmp_pa & 0xff;
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ tmp_pa = ADDR_OF_32KB_BLOCK(na) |
+ ADDR_OF_256B_BLOCK(bank_addr.channel) |
+ OFFSET_IN_256B_BLOCK(na);
+
+ /* the umc channel bits are not original values, they are hashed */
+ UMC_V12_0_SET_CHANNEL_HASH(bank_addr.channel, tmp_pa);
+
+ *soc_pa = tmp_pa;
+
+ return 0;
+}
+
+const struct ras_umc_ip_func ras_umc_func_v12_0 = {
+ .bank_to_eeprom_record = umc_v12_0_bank_to_eeprom_record,
+ .eeprom_record_to_nps_record = umc_v12_0_eeprom_record_to_nps_record,
+ .eeprom_record_to_nps_pages = umc_v12_0_eeprom_record_to_nps_pages,
+ .bank_to_soc_pa = umc_12_0_bank_to_soc_pa,
+ .soc_pa_to_bank = umc_12_0_soc_pa_to_bank,
+};
+
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h
new file mode 100644
index 000000000000..8a35ad856165
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_UMC_V12_0_H__
+#define __RAS_UMC_V12_0_H__
+#include "ras.h"
+
+/* MCA_UMC_UMC0_MCUMC_ADDRT0 */
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x38
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved_MASK 0xFF00000000000000L
+
+/* MCMP1_IPIDT0 */
+#define MCMP1_IPIDT0__InstanceIdLo__SHIFT 0x0
+#define MCMP1_IPIDT0__HardwareID__SHIFT 0x20
+#define MCMP1_IPIDT0__InstanceIdHi__SHIFT 0x2c
+#define MCMP1_IPIDT0__McaType__SHIFT 0x30
+
+#define MCMP1_IPIDT0__InstanceIdLo_MASK 0x00000000FFFFFFFFL
+#define MCMP1_IPIDT0__HardwareID_MASK 0x00000FFF00000000L
+#define MCMP1_IPIDT0__InstanceIdHi_MASK 0x0000F00000000000L
+#define MCMP1_IPIDT0__McaType_MASK 0xFFFF000000000000L
+
+/* number of umc channel instance with memory map register access */
+#define UMC_V12_0_CHANNEL_INSTANCE_NUM 8
+/* number of umc instance with memory map register access */
+#define UMC_V12_0_UMC_INSTANCE_NUM 4
+
+/* one piece of normalized address is mapped to 8 pieces of physical address */
+#define UMC_V12_0_NA_MAP_PA_NUM 8
+
+/* bank bits in MCA error address */
+#define UMC_V12_0_MCA_B0_BIT 6
+#define UMC_V12_0_MCA_B1_BIT 7
+#define UMC_V12_0_MCA_B2_BIT 8
+#define UMC_V12_0_MCA_B3_BIT 9
+
+/* row bits in MCA address */
+#define UMC_V12_0_MCA_R0_BIT 10
+
+/* Stack ID bits in SOC physical address */
+#define UMC_V12_0_PA_SID1_BIT 37
+#define UMC_V12_0_PA_SID0_BIT 36
+
+/* bank bits in SOC physical address */
+#define UMC_V12_0_PA_B3_BIT 18
+#define UMC_V12_0_PA_B2_BIT 17
+#define UMC_V12_0_PA_B1_BIT 20
+#define UMC_V12_0_PA_B0_BIT 19
+
+/* row bits in SOC physical address */
+#define UMC_V12_0_PA_R13_BIT 35
+#define UMC_V12_0_PA_R12_BIT 34
+#define UMC_V12_0_PA_R11_BIT 33
+#define UMC_V12_0_PA_R10_BIT 32
+#define UMC_V12_0_PA_R9_BIT 31
+#define UMC_V12_0_PA_R8_BIT 30
+#define UMC_V12_0_PA_R7_BIT 29
+#define UMC_V12_0_PA_R6_BIT 28
+#define UMC_V12_0_PA_R5_BIT 27
+#define UMC_V12_0_PA_R4_BIT 26
+#define UMC_V12_0_PA_R3_BIT 25
+#define UMC_V12_0_PA_R2_BIT 24
+#define UMC_V12_0_PA_R1_BIT 23
+#define UMC_V12_0_PA_R0_BIT 22
+
+/* column bits in SOC physical address */
+#define UMC_V12_0_PA_C4_BIT 21
+#define UMC_V12_0_PA_C3_BIT 16
+#define UMC_V12_0_PA_C2_BIT 15
+#define UMC_V12_0_PA_C1_BIT 6
+#define UMC_V12_0_PA_C0_BIT 5
+
+/* channel index bits in SOC physical address */
+#define UMC_V12_0_PA_CH6_BIT 14
+#define UMC_V12_0_PA_CH5_BIT 13
+#define UMC_V12_0_PA_CH4_BIT 12
+#define UMC_V12_0_PA_CH3_BIT 11
+#define UMC_V12_0_PA_CH2_BIT 10
+#define UMC_V12_0_PA_CH1_BIT 9
+#define UMC_V12_0_PA_CH0_BIT 8
+
+/* Pseudochannel index bits in SOC physical address */
+#define UMC_V12_0_PA_PC0_BIT 7
+
+#define UMC_V12_0_NA_C2_BIT 8
+
+#define UMC_V12_0_SOC_PA_TO_SID(pa) \
+ ((((pa >> UMC_V12_0_PA_SID0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_SID1_BIT) & 0x1ULL) << 1ULL))
+
+#define UMC_V12_0_SOC_PA_TO_BANK(pa) \
+ ((((pa >> UMC_V12_0_PA_B0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_B1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_B2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_B3_BIT) & 0x1ULL) << 3ULL))
+
+#define UMC_V12_0_SOC_PA_TO_ROW(pa) \
+ ((((pa >> UMC_V12_0_PA_R0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_R1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_R2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_R3_BIT) & 0x1ULL) << 3ULL) | \
+ (((pa >> UMC_V12_0_PA_R4_BIT) & 0x1ULL) << 4ULL) | \
+ (((pa >> UMC_V12_0_PA_R5_BIT) & 0x1ULL) << 5ULL) | \
+ (((pa >> UMC_V12_0_PA_R6_BIT) & 0x1ULL) << 6ULL) | \
+ (((pa >> UMC_V12_0_PA_R7_BIT) & 0x1ULL) << 7ULL) | \
+ (((pa >> UMC_V12_0_PA_R8_BIT) & 0x1ULL) << 8ULL) | \
+ (((pa >> UMC_V12_0_PA_R9_BIT) & 0x1ULL) << 9ULL) | \
+ (((pa >> UMC_V12_0_PA_R10_BIT) & 0x1ULL) << 10ULL) | \
+ (((pa >> UMC_V12_0_PA_R11_BIT) & 0x1ULL) << 11ULL) | \
+ (((pa >> UMC_V12_0_PA_R12_BIT) & 0x1ULL) << 12ULL) | \
+ (((pa >> UMC_V12_0_PA_R13_BIT) & 0x1ULL) << 13ULL))
+
+#define UMC_V12_0_SOC_PA_TO_COL(pa) \
+ ((((pa >> UMC_V12_0_PA_C0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_C1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_C2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_C3_BIT) & 0x1ULL) << 3ULL) | \
+ (((pa >> UMC_V12_0_PA_C4_BIT) & 0x1ULL) << 4ULL))
+
+#define UMC_V12_0_SOC_PA_TO_CH(pa) \
+ ((((pa >> UMC_V12_0_PA_CH0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_CH1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_CH2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_CH3_BIT) & 0x1ULL) << 3ULL) | \
+ (((pa >> UMC_V12_0_PA_CH4_BIT) & 0x1ULL) << 4ULL) | \
+ (((pa >> UMC_V12_0_PA_CH5_BIT) & 0x1ULL) << 5ULL) | \
+ (((pa >> UMC_V12_0_PA_CH6_BIT) & 0x1ULL) << 6ULL))
+
+#define UMC_V12_0_SOC_PA_TO_PC(pa) (((pa >> UMC_V12_0_PA_PC0_BIT) & 0x1ULL) << 0ULL)
+
+#define UMC_V12_0_SOC_SID_TO_PA(sid) \
+ ((((sid >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_SID0_BIT) | \
+ (((sid >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_SID1_BIT))
+
+#define UMC_V12_0_SOC_BANK_TO_PA(bank) \
+ ((((bank >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_B0_BIT) | \
+ (((bank >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_B1_BIT) | \
+ (((bank >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_B2_BIT) | \
+ (((bank >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_B3_BIT))
+
+#define UMC_V12_0_SOC_ROW_TO_PA(row) \
+ ((((row >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_R0_BIT) | \
+ (((row >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_R1_BIT) | \
+ (((row >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_R2_BIT) | \
+ (((row >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_R3_BIT) | \
+ (((row >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_R4_BIT) | \
+ (((row >> 5ULL) & 0x1ULL) << UMC_V12_0_PA_R5_BIT) | \
+ (((row >> 6ULL) & 0x1ULL) << UMC_V12_0_PA_R6_BIT) | \
+ (((row >> 7ULL) & 0x1ULL) << UMC_V12_0_PA_R7_BIT) | \
+ (((row >> 8ULL) & 0x1ULL) << UMC_V12_0_PA_R8_BIT) | \
+ (((row >> 9ULL) & 0x1ULL) << UMC_V12_0_PA_R9_BIT) | \
+ (((row >> 10ULL) & 0x1ULL) << UMC_V12_0_PA_R10_BIT) | \
+ (((row >> 11ULL) & 0x1ULL) << UMC_V12_0_PA_R11_BIT) | \
+ (((row >> 12ULL) & 0x1ULL) << UMC_V12_0_PA_R12_BIT) | \
+ (((row >> 13ULL) & 0x1ULL) << UMC_V12_0_PA_R13_BIT))
+
+#define UMC_V12_0_SOC_COL_TO_PA(col) \
+ ((((col >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_C0_BIT) | \
+ (((col >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_C1_BIT) | \
+ (((col >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_C2_BIT) | \
+ (((col >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_C3_BIT) | \
+ (((col >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_C4_BIT))
+
+#define UMC_V12_0_SOC_CH_TO_PA(ch) \
+ ((((ch >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_CH0_BIT) | \
+ (((ch >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_CH1_BIT) | \
+ (((ch >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_CH2_BIT) | \
+ (((ch >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_CH3_BIT) | \
+ (((ch >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_CH4_BIT) | \
+ (((ch >> 5ULL) & 0x1ULL) << UMC_V12_0_PA_CH5_BIT) | \
+ (((ch >> 6ULL) & 0x1ULL) << UMC_V12_0_PA_CH6_BIT))
+
+#define UMC_V12_0_SOC_PC_TO_PA(pc) (((pc >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_PC0_BIT)
+
+/* bank hash settings */
+#define UMC_V12_0_XOR_EN0 1
+#define UMC_V12_0_XOR_EN1 1
+#define UMC_V12_0_XOR_EN2 1
+#define UMC_V12_0_XOR_EN3 1
+#define UMC_V12_0_COL_XOR0 0x0
+#define UMC_V12_0_COL_XOR1 0x0
+#define UMC_V12_0_COL_XOR2 0x800
+#define UMC_V12_0_COL_XOR3 0x1000
+#define UMC_V12_0_ROW_XOR0 0x11111
+#define UMC_V12_0_ROW_XOR1 0x22222
+#define UMC_V12_0_ROW_XOR2 0x4444
+#define UMC_V12_0_ROW_XOR3 0x8888
+
+/* channel hash settings */
+#define UMC_V12_0_HASH_4K 0
+#define UMC_V12_0_HASH_64K 1
+#define UMC_V12_0_HASH_2M 1
+#define UMC_V12_0_HASH_1G 1
+#define UMC_V12_0_HASH_1T 1
+
+/* XOR some bits of PA into CH4~CH6 bits (bits 12~14 of PA),
+ * hash bit is only effective when related setting is enabled
+ */
+#define UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) ((((channel_idx) >> 5) & 0x1) ^ \
+ (((pa) >> 20) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
+ (((pa) >> 27) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
+ (((pa) >> 34) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
+ (((pa) >> 41) & 0x1ULL & UMC_V12_0_HASH_1T))
+#define UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) ((((channel_idx) >> 6) & 0x1) ^ \
+ (((pa) >> 21) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
+ (((pa) >> 28) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
+ (((pa) >> 35) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
+ (((pa) >> 42) & 0x1ULL & UMC_V12_0_HASH_1T))
+#define UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) ((((channel_idx) >> 4) & 0x1) ^ \
+ (((pa) >> 19) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
+ (((pa) >> 26) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
+ (((pa) >> 33) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
+ (((pa) >> 40) & 0x1ULL & UMC_V12_0_HASH_1T) ^ \
+ (((pa) >> 47) & 0x1ULL & UMC_V12_0_HASH_1T))
+#define UMC_V12_0_SET_CHANNEL_HASH(channel_idx, pa) do { \
+ (pa) &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); \
+ (pa) |= (UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) << UMC_V12_0_PA_CH4_BIT); \
+ (pa) |= (UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) << UMC_V12_0_PA_CH5_BIT); \
+ (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \
+ } while (0)
+
+
+/*
+ * (addr / 256) * 4096, the higher 26 bits in ErrorAddr
+ * is the index of 4KB block
+ */
+#define ADDR_OF_4KB_BLOCK(addr) (((addr) & ~0xffULL) << 4)
+/*
+ * (addr / 256) * 8192, the higher 26 bits in ErrorAddr
+ * is the index of 8KB block
+ */
+#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5)
+/*
+ * (addr / 256) * 32768, the higher 26 bits in ErrorAddr
+ * is the index of 8KB block
+ */
+#define ADDR_OF_32KB_BLOCK(addr) (((addr) & ~0xffULL) << 7)
+/* channel index is the index of 256B block */
+#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8)
+/* offset in 256B block */
+#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL)
+
+
+#define UMC_V12_ADDR_MASK_BAD_COLS(addr) \
+ ((addr) & ~((0x3ULL << UMC_V12_0_PA_C2_BIT) | \
+ (0x1ULL << UMC_V12_0_PA_C4_BIT) | \
+ (0x1ULL << UMC_V12_0_PA_R13_BIT)))
+
+#define ACA_IPID_HI_2_UMC_AID(_ipid_hi) (((_ipid_hi) >> 2) & 0x3)
+#define ACA_IPID_LO_2_UMC_CH(_ipid_lo) \
+ (((((_ipid_lo) >> 20) & 0x1) * 4) + (((_ipid_lo) >> 12) & 0xF))
+#define ACA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7)
+
+#define ACA_IPID_2_DIE_ID(ipid) ((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) >> 2) & 0x03)
+#define ACA_IPID_2_UMC_CH(ipid) \
+ (ACA_IPID_LO_2_UMC_CH(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo)))
+
+#define ACA_IPID_2_UMC_INST(ipid) \
+ (ACA_IPID_LO_2_UMC_INST(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo)))
+
+#define ACA_IPID_2_SOCKET_ID(ipid) \
+ (((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \
+ (REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03))
+
+#define ACA_ADDR_2_ERR_ADDR(addr) \
+ REG_GET_FIELD(addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr)
+
+/* R13 bit shift should be considered, double the number */
+#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
+
+
+/* C2, C3, C4, R13, four MCA bits are looped in page retirement */
+#define UMC_V12_0_RETIRE_LOOP_BITS 4
+
+/* invalid node instance value */
+#define UMC_INV_AID_NODE 0xffff
+
+#define UMC_V12_0_AID_NUM_MAX 4
+#define UMC_V12_0_SOCKET_NUM_MAX 8
+
+#define UMC_V12_0_TOTAL_CHANNEL_NUM \
+ (UMC_V12_0_AID_NUM_MAX * UMC_V12_0_UMC_INSTANCE_NUM * UMC_V12_0_CHANNEL_INSTANCE_NUM)
+
+/* one device has 192GB HBM */
+#define SOCKET_LFB_SIZE 0x3000000000ULL
+
+extern const struct ras_umc_ip_func ras_umc_func_v12_0;
+
+int ras_umc_get_badpage_count(struct ras_core_context *ras_core);
+int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record);
+#endif
+